Merge "wcnss: Remove enabling of CXO in suspend/resume"
diff --git a/Documentation/devicetree/bindings/arm/msm/smem.txt b/Documentation/devicetree/bindings/arm/msm/smem.txt
new file mode 100644
index 0000000..a38984c
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/smem.txt
@@ -0,0 +1,107 @@
+Qualcomm Shared Memory
+
+[Root level node]
+Required properties:
+-compatible : should be "qcom,smem"
+-reg : the location and size of smem, the irq register base memory, and
+	optionally any auxiliary smem areas
+-reg-names : "smem" - string to identify the shared memory region
+	     "irq-reg-base" - string to identify the irq register region
+	     "aux-mem1", "aux-mem2", "aux-mem3", ... - optional strings to
+			identify any auxiliary shared memory regions
+
+[Second level nodes]
+
+qcom,smd
+Required properties:
+-compatible : should be "qcom,smd"
+-qcom,smd-edge : the smd edge
+-qcom,smd-irq-offset : the offset into the irq register base memory for sending
+	interrupts
+-qcom,smd-irq-bitmask : the sending irq bitmask
+-interrupts : the receiving interrupt line
+
+Optional properties:
+-qcom,pil-string : the name to use when loading this edge
+-qcom,irq-no-suspend: configure the incoming irq line as active during suspend
+
+qcom,smsm
+Required properties:
+-compatible : should be "qcom,smsm"
+-qcom,smsm-edge : the smsm edge
+-qcom,smsm-irq-offset : the offset into the irq register base memory for sending
+	interrupts
+-qcom,smsm-irq-bitmask : the sending irq bitmask
+-interrupts : the receiving interrupt line
+
+
+Example:
+
+	qcom,smem@fa00000 {
+		compatible = "qcom,smem";
+		reg = <0xfa00000 0x200000>,
+			<0xfa006000 0x1000>,
+			<0xfc428000 0x4000>;
+		reg-names = "smem", "irq-reg-base", "aux-mem1";
+
+		qcom,smd-modem {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <0>;
+			qcom,smd-irq-offset = <0x8>;
+			qcom,smd-irq-bitmask = <0x1000>;
+			qcom,pil-string = "modem";
+			interrupts = <0 25 1>;
+		};
+
+		qcom,smsm-modem {
+			compatible = "qcom,smsm";
+			qcom,smsm-edge = <0>;
+			qcom,smsm-irq-offset = <0x8>;
+			qcom,smsm-irq-bitmask = <0x2000>;
+			interrupts = <0 26 1>;
+		};
+
+		qcom,smd-adsp {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <1>;
+			qcom,smd-irq-offset = <0x8>;
+			qcom,smd-irq-bitmask = <0x100>;
+			qcom,pil-string = "adsp";
+			interrupts = <0 156 1>;
+		};
+
+		qcom,smsm-adsp {
+			compatible = "qcom,smsm";
+			qcom,smsm-edge = <1>;
+			qcom,smsm-irq-offset = <0x8>;
+			qcom,smsm-irq-bitmask = <0x200>;
+			interrupts = <0 157 1>;
+		};
+
+		qcom,smd-wcnss {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <6>;
+			qcom,smd-irq-offset = <0x8>;
+			qcom,smd-irq-bitmask = <0x20000>;
+			qcom,pil-string = "wcnss";
+			interrupts = <0 142 1>;
+		};
+
+		qcom,smsm-wcnss {
+			compatible = "qcom,smsm";
+			qcom,smsm-edge = <6>;
+			qcom,smsm-irq-offset = <0x8>;
+			qcom,smsm-irq-bitmask = <0x80000>;
+			interrupts = <0 144 1>;
+		};
+
+		qcom,smd-rpm {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <15>;
+			qcom,smd-irq-offset = <0x8>;
+			qcom,smd-irq-bitmask = <0x1>;
+			interrupts = <0 168 1>;
+			qcom,irq-no-syspend;
+		};
+	};
+
diff --git a/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt b/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
index 70fea73..e777094 100644
--- a/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
+++ b/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
@@ -6,22 +6,32 @@
 - reg: offset and length of the register regions(s) for the device.
 - reg-names: a list of strings that map in order to the list of regs.
 
-- <supply-name>-supply: phandle to the regulator device tree node.
-- <compatible-name>-supply-names: a list of strings that map in order
+- hpd-5v-supply: phandle to the 5V regulator device tree node.
+- core-vdda-supply: phandle to the HDMI vdda regulator device tree node.
+- core-vcc-supply: phandle to the HDMI vcc regulator device tree node.
+- qcom,hdmi-tx-supply-names: a list of strings that map in order
   to the list of supplies.
-- <<compatible-name>-supply-type: a type of supply(ies) mentioned above.
+- qcom,hdmi-tx-supply-type: a type of supply(ies) mentioned above.
     0 = supply with controlled output
     1 = supply without controlled output. i.e. voltage switch
-- <compatible-name>-min-voltage-level: specifies minimum voltage level
+- qcom,hdmi-tx-min-voltage-level: specifies minimum voltage level
   of supply(ies) mentioned above.
-- <compatible-name>-max-voltage-level: specifies maximum voltage level
+- qcom,hdmi-tx-max-voltage-level: specifies maximum voltage level
   of supply(ies) mentioned above.
-- <compatible-name>-op-mode: specifies optimum operating mode of
+- qcom,hdmi-tx-op-mode: specifies optimum operating mode of
   supply(ies) mentioned above.
 
-- gpios: specifies gpios assigned for the device.
-- <compatible-name>-gpio-names: a list of strings that map in order to
-  the list of gpios
+- qcom,hdmi-tx-cec: gpio for Consumer Electronics Control (cec) line.
+- qcom,hdmi-tx-ddc-clk: gpio for Display Data Channel (ddc) clock line.
+- qcom,hdmi-tx-ddc-data: gpio for ddc data line.
+- qcom,hdmi-tx-hpd: gpio required for HDMI hot-plug detect.
+
+Optional properties:
+- qcom,hdmi-tx-mux-sel: gpio required to toggle HDMI output between
+  docking station, type A, and liquid device, type D, ports. Required
+  property for liquid devices.
+- qcom,hdmi-tx-mux-en: gpio required to enable mux for HDMI output
+  on liquid devices. Required property for liquid devices.
 
 Example:
 	qcom,hdmi_tx@fd922100 {
@@ -41,10 +51,8 @@
 		qcom,hdmi-tx-max-voltage-level = <0 1800000 1800000>;
 		qcom,hdmi-tx-op-mode = <0 1800000 0>;
 
-		gpios = <&msmgpio 31 0>,
-			<&msmgpio 32 0>,
-			<&msmgpio 33 0>,
-			<&msmgpio 34 0>;
-		qcom,hdmi-tx-gpio-names =
-			"cec-pin", "hpd-ddc-clk", "hpd-ddc-data", "hpd-pin";
+		qcom,hdmi-tx-cec = <&msmgpio 31 0>;
+		qcom,hdmi-tx-ddc-clk = <&msmgpio 32 0>;
+		qcom,hdmi-tx-ddc-data = <&msmgpio 33 0>;
+		qcom,hdmi-tx-hpd = <&msmgpio 34 0>;
 	};
diff --git a/Documentation/devicetree/bindings/i2c/sii8334-i2c.txt b/Documentation/devicetree/bindings/i2c/sii8334-i2c.txt
new file mode 100644
index 0000000..ed45192
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/sii8334-i2c.txt
@@ -0,0 +1,26 @@
+* Silicon Image-8334 MHL Tx
+
+Required properties:
+- compatible: must be "qcom,mhl-sii8334"
+- reg: i2c slave address
+- mhl-intr-gpio: MHL interrupt gpio coming out of sii8334
+- mhl-pwr-gpio: MHL power gpio required for power rails
+- mhl-rst-gpio: MHL reset gpio going into sii8334 for toggling reset pin
+- <supply-name>-supply: phandle to the regulator device tree node.
+
+Example:
+	i2c@f9967000 {
+		sii8334@72 {
+			compatible = "qcom,mhl-sii8334";
+			reg = <0x72>;
+			interrupt-parent = <&msmgpio>;
+			interrupts = <82 0x8>;
+			mhl-intr-gpio = <&msmgpio 82 0>;
+			mhl-pwr-gpio = <&msmgpio 12 0>;
+			mhl-rst-gpio = <&pm8941_mpps 8 0>;
+			avcc_18-supply = <&pm8941_l24>;
+			avcc_12-supply = <&pm8941_l2>;
+			smps3a-supply = <&pm8941_s3>;
+			vdda-supply = <&pm8941_l12>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp.txt b/Documentation/devicetree/bindings/leds/leds-qpnp.txt
index d7290e0..10732cf 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp.txt
@@ -24,14 +24,14 @@
 
 Optional properties for WLED:
 - qcom,num-strings: number of wled strings supported
-- qcom,ovp_val: over voltage protection threshold,
+- qcom,ovp-val: over voltage protection threshold,
 		follows enum wled_ovp_threshold
-- qcom,boost_curr_lim: boot currnet limit, follows enum wled_current_bost_limit
-- qcom,ctrl_delay_us: delay in activation of led
-- qcom,dig_mod_gen_en: digital module generator
-- qcom,cs_out_en: current sink output enable
-- qcom,op_fdbck: selection of output as feedback for the boost
-- qcom,cp_select: high pole capacitance
+- qcom,boost-curr-lim: boot currnet limit, follows enum wled_current_bost_limit
+- qcom,ctrl-delay-us: delay in activation of led
+- qcom,dig-mod-gen-en: digital module generator
+- qcom,cs-out-en: current sink output enable
+- qcom,op-fdbck: selection of output as feedback for the boost
+- qcom,cp-select: high pole capacitance
 - linux,default-trigger: trigger the led from external modules such as display
 - qcom,default-state:  default state of the led, should be "on" or "off"
 
diff --git a/Documentation/devicetree/bindings/misc/ti_drv2667.txt b/Documentation/devicetree/bindings/misc/ti_drv2667.txt
new file mode 100644
index 0000000..3a8f4b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/ti_drv2667.txt
@@ -0,0 +1,44 @@
+TI DRV2667 is a haptic controller chip. It can drive piezo haptics
+and can operate in two modes - analog and digital.
+
+Required properties:
+
+-compatible	: should be "ti,drv2667".
+-reg		: i2c address to be used.
+-vdd-supply	: regulator to power the chip.
+-vdd-i2c-supply	: regulator to power i2c bus.
+
+Optional properties:
+
+-ti,label		: Name to be registered with timedoutput class.
+-ti,mode		: Mode to be supported, 0 to 3 - FIFO, RAM, WAVE and ANALOG.
+-ti,wav-seq		: Wave Sequence composed of 11 bytes - wave form id,
+				Header size, start upper byte, start lower byte,
+				stop upper byte, stop lower byte, repeat count,
+				amplitude, frequency, duration and envelope
+-ti,gain		: Gain to be programmed for the chip.
+-ti,idle-timeout-ms	: Idle timeout in ms to be programmed for the chip to go into
+				low power mode after finishing its operation.
+-ti,max-runtime-ms	: Maximum time in ms for which chip can drive haptics.
+
+Example:
+	i2c@f9967000 {
+		ti-drv2667@59 {
+			compatible = "ti,drv2667";
+			reg = <0x59>;
+			vdd-supply = <&drv2667_vreg>;
+			vdd-i2c-supply = <&pm8941_s3>;
+			ti,label = "vibrator";
+			ti,gain = <3>;
+			ti,idle-timeout-ms = <20>;
+			ti,max-runtime-ms = <15000>;
+			ti,mode = <2>;
+			ti,wav-seq = [
+				/* wave form id */
+				01
+				/* header size, start and stop bytes */
+				05 80 06 00 09
+				/* repeat, amp, freq, duration, envelope */
+				01 ff 19 02 00];
+		};
+	};
diff --git a/Documentation/devicetree/bindings/net/wireless/ath/ath6kl.txt b/Documentation/devicetree/bindings/net/wireless/ath/ath6kl.txt
new file mode 100644
index 0000000..7b9feae
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/wireless/ath/ath6kl.txt
@@ -0,0 +1,24 @@
+* Qualcomm Atheros mobile chipsets
+
+Required properties:
+    - compatible: Can be "qca,ar6004-sdio" for SDIO device and
+    "qca,ar6004-hsic" for HSIC devcie.
+    - qca,chip-pwd-l-gpios: specify GPIO for CHIP_PWD_L.
+
+Optional Properties:
+    - cell-index: WLAN Hardware index.
+    - qca,pm-enable-gpios: Specify this GPIO if internal PMU needs to be used.
+    - qca,ar6004-vbatt-supply: Specify this if VBATT is provided through a
+    regulator.
+    - qca,ar6004-vdd-io-supply: Specify this if VDD-IO is provided through a
+    regulator.
+
+Example:
+
+	wlan0: qca,wlan {
+		cell-index = <0>;
+		compatible = "qca,ar6004-sdio";
+		qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
+		qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
+		qca,ar6004-vdd-io-supply = <&pm8019_l11>;
+	};
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
index 556300d..802716c 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -18,8 +18,7 @@
 - qcom,pil-self-auth: <0> if the hardware does not require self-authenticating
 		      images and self-authentication is not desired;
 		      <1> if the hardware requires self-authenticating images.
-- qcom,is_loadable:   <0> if PIL should not load the modem image
-                      <1> if PIL is required to load the modem image
+- qcom,is-loadable:   if PIL is required to load the modem image
 
 Example:
 	qcom,mss@fc880000 {
@@ -34,7 +33,7 @@
 		interrupts = <0 24 1>;
 		vdd_mss-supply = <&pm8841_s3>;
 
-		qcom,is_loadable = <1>;
+		qcom,is-loadable;
 		qcom,firmware-name = "mba";
 		qcom,pil-self-auth = <1>;
 	};
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
new file mode 100644
index 0000000..86c60e8
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -0,0 +1,81 @@
+Qualcomm Internet Packet Accelerator
+
+Internet Packet Accelerator (IPA) is a programmable protocol
+processor HW block. It is designed to support generic HW processing
+of UL/DL IP packets for various use cases independent of radio technology.
+
+Required properties:
+
+IPA node:
+
+- compatible : "qcom,ipa"
+- reg: Specifies the base physical addresses and the sizes of the IPA
+       registers.
+- reg-names: "ipa-base" - string to identify the IPA CORE base registers.
+	     "bam-base" - string to identify the IPA BAM base registers.
+- interrupts: Specifies the interrupt associated with IPA.
+- interrupt-names: "ipa-irq" - string to identify the IPA core interrupt.
+                   "bam-irq" - string to identify the IPA BAM interrupt.
+
+IPA pipe sub nodes (A2 static pipes configurations):
+
+-label: two labels are supported, a2-to-ipa and ipa-to-a2 which
+supply static configuration for A2-IPA connection.
+-qcom,src-bam-physical-address: The physical address of the source BAM
+-qcom,ipa-bam-mem-type:The memory type:
+                       0(Pipe memory), 1(Private memory), 2(System memory)
+-qcom,src-bam-pipe-index: Source pipe index
+-qcom,dst-bam-physical-address: The physical address of the
+                                destination BAM
+-qcom,dst-bam-pipe-index: Destination pipe index
+-qcom,data-fifo-offset: Data fifo base offset
+-qcom,data-fifo-size:  Data fifo size (bytes)
+-qcom,descriptor-fifo-offset: Descriptor fifo base offset
+-qcom,descriptor-fifo-size: Descriptor fifo size (bytes)
+
+Optional properties:
+-qcom,ipa-pipe-mem: Specifies the base physical address and the
+                    size of the IPA pipe memory region.
+                    Pipe memory is a feature which may be supported by the
+                    target (HW platform). The Driver support using pipe
+                    memory instead of system memory. In case this property
+                    will not appear in the IPA DTS entry, the driver will
+                    use system memory.
+
+Example:
+
+qcom,ipa@fd4c0000 {
+	compatible = "qcom,ipa";
+	reg = <0xfd4c0000 0x26000>,
+	      <0xfd4c4000 0x14818>;
+	reg-names = "ipa-base", "bam-base";
+	interrupts = <0 252 0>,
+	             <0 253 0>;
+	interrupt-names = "ipa-irq", "bam-irq";
+
+	qcom,pipe1 {
+		label = "a2-to-ipa";
+		qcom,src-bam-physical-address = <0xfc834000>;
+		qcom,ipa-bam-mem-type = <0>;
+		qcom,src-bam-pipe-index = <1>;
+		qcom,dst-bam-physical-address = <0xfd4c0000>;
+		qcom,dst-bam-pipe-index = <6>;
+		qcom,data-fifo-offset = <0x1000>;
+		qcom,data-fifo-size = <0xd00>;
+		qcom,descriptor-fifo-offset = <0x1d00>;
+		qcom,descriptor-fifo-size = <0x300>;
+	};
+
+	qcom,pipe2 {
+		label = "ipa-to-a2";
+		qcom,src-bam-physical-address = <0xfd4c0000>;
+		qcom,ipa-bam-mem-type = <0>;
+		qcom,src-bam-pipe-index = <7>;
+		qcom,dst-bam-physical-address = <0xfc834000>;
+		qcom,dst-bam-pipe-index = <0>;
+		qcom,data-fifo-offset = <0x00>;
+		qcom,data-fifo-size = <0xd00>;
+		qcom,descriptor-fifo-offset = <0xd00>;
+		qcom,descriptor-fifo-size = <0x300>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/power/qpnp-bms.txt b/Documentation/devicetree/bindings/power/qpnp-bms.txt
index 1d3c2db..4d571eb 100644
--- a/Documentation/devicetree/bindings/power/qpnp-bms.txt
+++ b/Documentation/devicetree/bindings/power/qpnp-bms.txt
@@ -36,12 +36,34 @@
 - qcom,bms-adjust-soc-low-threshold : The low threshold for the "flat portion"
 			of the charging curve. The BMS will not adjust SoC
 			based on voltage during this time.
-- qcom,bms-adjust-soc-high-threshold :  The high threshold for the "flat
+- qcom,bms-adjust-soc-high-threshold : The high threshold for the "flat
 			portion" of the charging curve. The BMS will not
 			adjust SoC based on voltage during this time.
+- qcom,bms-low-soc-calculate-soc-threshold : The SoC threshold for when
+			the period calculate_soc work speeds up. This ensures
+			SoC is updated in userspace constantly when we are near
+			shutdown.
+- qcom,bms-low-soc-calculate-soc-ms : The time period between subsequent
+			SoC recalculations when the current SoC is below
+			qcom,bms-low-soc-calculate-soc-threshold.
+- qcom,bms-soc-calculate-soc-ms : The time period between subsequent SoC
+			recalculations when the current SoC is above or equal
+			qcom,bms-low-soc-calculate-soc-threshold.
 - qcom,bms-chg-term-ua : current in micro-amps when charging is considered done.
 			As soon as current passes this point, charging is
 			stopped.
+- qcom,bms-batt-type: Type of battery used. This is an integer that corresponds
+			to the enum defined in
+			include/linux/mfd/pm8xxx/batterydata-lib.h
+
+Optional properties:
+- qcom,bms-ignore-shutdown-soc: A boolean that controls whether BMS will
+			try to force the startup SoC to be the same as the
+			shutdown SoC. Defining it will make BMS ignore the
+			shutdown SoC.
+- qcom,bms-use-voltage-soc : A boolean that controls whether BMS will use
+			voltage-based SoC instead of a coulomb counter based
+			one. Voltage-based SoC will not guarantee linearity.
 
 Example:
 	bms@4000 {
@@ -76,5 +98,10 @@
 		qcom,bms-shutdown-soc-valid-limit = <20>;
 		qcom,bms-adjust-soc-low-threshold = <25>;
 		qcom,bms-adjust-soc-high-threshold = <45>;
+		qcom,bms-low-soc-calculate-soc-threshold = <15>;
+		qcom,bms-low-soc-calculate-soc-ms = <5000>;
+		qcom,bms-calculate-soc-ms = <20000>;
 		qcom,bms-chg-term-ua = <100000>;
+		qcom,bms-batt-type = <0>;
+		qcom,bms-ignore-shutdown-soc;
 	};
diff --git a/Documentation/devicetree/bindings/prng/msm-rng.txt b/Documentation/devicetree/bindings/prng/msm-rng.txt
index 3d55808..28dfe50 100644
--- a/Documentation/devicetree/bindings/prng/msm-rng.txt
+++ b/Documentation/devicetree/bindings/prng/msm-rng.txt
@@ -4,9 +4,13 @@
 - compatible : Should be "qcom,msm-rng"
 - reg        : Offset and length of the register set for the device
 
+Optional property:
+- qcom,msm-rng-iface-clk : If the device uses iface-clk.
+
 Example:
 
-        qcom,msm-rng@f9bff000 {
-                              compatible = "qcom,msm-rng";
-                              reg = <0xf9bff000 0x200>;
-        };
+	qcom,msm-rng@f9bff000 {
+		compatible = "qcom,msm-rng";
+		reg = <0xf9bff000 0x200>;
+		qcom,msm-rng-iface-clk;
+	};
diff --git a/Documentation/devicetree/bindings/qseecom/qseecom.txt b/Documentation/devicetree/bindings/qseecom/qseecom.txt
index 5e7c42a..43033a8 100644
--- a/Documentation/devicetree/bindings/qseecom/qseecom.txt
+++ b/Documentation/devicetree/bindings/qseecom/qseecom.txt
@@ -2,6 +2,7 @@
 
 Required properties:
 - compatible : Should be "qcom,qseecom"
+- reg : should contain memory region address reserved for loading secure apps.
 - qcom, msm_bus,name: Should be "qseecom-noc"
 - qcom, msm_bus,num_cases: Depends on the use cases for bus scaling
 - qcom, msm_bus,num_paths: The paths for source and destination ports
@@ -10,6 +11,8 @@
 Example:
 	qcom,qseecom@fe806000 {
 		compatible = "qcom,qseecom";
+		reg = <0x7f00000 0x500000>;
+		reg-names = "secapp-region";
 		qcom,msm_bus,name = "qseecom-noc";
 		qcom,msm_bus,num_cases = <4>;
 		qcom,msm_bus,active_only = <0>;
diff --git a/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt b/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
index ecac09d..6b090fa 100644
--- a/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
+++ b/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
@@ -1,4 +1,19 @@
 Qualcomm SLIMBUS controller
+Qualcomm implements 2 type of slimbus controllers:
+1. "qcom,slim-msm": This controller is used if applications processor
+	driver is controlling slimbus master component. This driver is
+	responsible for communicating with slave HW directly using
+	messaging interface, and doing data channel management. Driver
+	also communicates with satellite component (driver implemented
+	by other execution environment, such as ADSP) to get its
+	requirements for data channel and bandwidth requirements.
+2. "qcom,slim-ngd": This controller is used if applications processor
+	driver is controlling slimbus satellite component (also known as
+	Non-ported Generic Device, or NGD). This is light-weight slimbus
+	controller responsible for communicating with slave HW directly
+	over bus messaging interface, and communicating with master component
+	(driver residing on other execution environment, such as ADSP)
+	for bandwidth and data channel management.
 
 Required properties:
 
@@ -8,7 +23,8 @@
 	 "slimbus_physical": Physical adderss of controller register blocks
 	 "slimbus_bam_physical": Physical address of Bus Access Module (BAM)
 				 for this controller
- - compatible : should be "qcom,slim-msm"
+ - compatible : should be "qcom,slim-msm" if this is master component driver
+ - compatible : should be "qcom,slim-ngd" if this is satellite component driver
  - cell-index : SLIMBUS number used for this controller
  - interrupts : Interrupt numbers used by this controller
  - interrupt-names : Required interrupt resource entries are:
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index bf605ec..213da90 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -381,3 +381,115 @@
 	taiko-mclk-clk = <&pm8941_clkdiv1>;
 	qcom,taiko-mclk-clk-freq = <9600000>;
 };
+
+* msm-dai-mi2s
+
+[First Level Nodes]
+
+Required properties:
+
+ - compatible : "msm-dai-mi2s"
+
+ [Second Level Nodes]
+
+Required properties:
+
+ - compatible : "qcom,msm-dai-q6-mi2s"
+ - qcom,msm-dai-q6-mi2s-dev-id: MSM or MDM can use Slimbus or I2S interface to transfer data
+								to (WCD9XXX) codec. If slimbus interface is used then
+								"msm-dai-q6" needs to be filled with correct data for slimbus
+								interface. The sections "msm-dai-mi2s" is used by MDM or MSM
+								to use I2S interface with codec. This section is used by CPU
+								driver in ASOC MSM to configure MI2S interface. MSM internally
+								has multiple MI2S namely Primary, Secondary, Tertiary and
+								Quaternary MI2S. They are represented with id 0, 1, 2, 3
+								respectively. The field "qcom,msm-dai-q6-mi2s-dev-id" represents
+								which of the MI2S block is used. These MI2S are connected to I2S
+								interface.
+
+ - qcom,msm-mi2s-rx-lines:		Each MI2S interface in MSM has one or more SD lines. These lines
+								are used for data transfer between codec and MSM. This element in
+								indicates which output RX lines are used in the MI2S interface.
+
+ - qcom,msm-mi2s-tx-lines:  	Each MI2S interface in MSM has one or more SD lines. These lines
+								are used for data transfer between codec and MSM. This element in
+								indicates which input TX lines are used in the MI2S interface.
+
+Example:
+
+qcom,msm-dai-mi2s {
+		compatible = "qcom,msm-dai-mi2s";
+		qcom,msm-dai-q6-mi2s-prim {
+			compatible = "qcom,msm-dai-q6-mi2s";
+			qcom,msm-dai-q6-mi2s-dev-id = <0>;
+			qcom,msm-mi2s-rx-lines = <2>;
+			qcom,msm-mi2s-tx-lines = <1>;
+		};
+};
+
+* MSM9625 ASoC Machine driver
+
+Required properties:
+- compatible : "qcom,mdm9625-audio-taiko"
+- qcom,model : The user-visible name of this sound card.
+- qcom,audio-routing : A list of the connections between audio components.
+  Each entry is a pair of strings, the first being the connection's sink,
+  the second being the connection's source.
+- qcom,taiko-mclk-clk-freq : Master clock value given to codec. Some WCD9XXX
+  codec can run at different mclk values. Mclk value can be 9.6MHz or 12.288MHz.
+  This element represents the value for MCLK provided to codec.
+
+Example:
+
+sound {
+		compatible = "qcom,mdm9625-audio-taiko";
+		qcom,model = "mdm9625-taiko-i2s-snd-card";
+
+		qcom,audio-routing =
+			"RX_BIAS", "MCLK",
+			"LDO_H", "MCLK",
+			"Ext Spk Bottom Pos", "LINEOUT1",
+			"Ext Spk Bottom Neg", "LINEOUT3",
+			"Ext Spk Top Pos", "LINEOUT2",
+			"Ext Spk Top Neg", "LINEOUT4",
+			"AMIC1", "MIC BIAS1 External",
+			"MIC BIAS1 External", "Handset Mic",
+			"AMIC2", "MIC BIAS2 External",
+			"MIC BIAS2 External", "Headset Mic",
+			"AMIC3", "MIC BIAS3 Internal1",
+			"MIC BIAS3 Internal1", "ANCRight Headset Mic",
+			"AMIC4", "MIC BIAS1 Internal2",
+			"MIC BIAS1 Internal2", "ANCLeft Headset Mic",
+			"DMIC1", "MIC BIAS1 External",
+			"MIC BIAS1 External", "Digital Mic1",
+			"DMIC2", "MIC BIAS1 External",
+			"MIC BIAS1 External", "Digital Mic2",
+			"DMIC3", "MIC BIAS3 External",
+			"MIC BIAS3 External", "Digital Mic3",
+			"DMIC4", "MIC BIAS3 External",
+			"MIC BIAS3 External", "Digital Mic4",
+			"DMIC5", "MIC BIAS4 External",
+			"MIC BIAS4 External", "Digital Mic5",
+			"DMIC6", "MIC BIAS4 External",
+			"MIC BIAS4 External", "Digital Mic6";
+			qcom,taiko-mclk-clk-freq = <12288000>;
+};
+
+* msm-adsp-loader
+
+Required properties:
+ - compatible : "msm-adsp-loader"
+ - qcom,adsp-state:
+	It is possible that some MSM use PIL to load the ADSP image. While
+	other MSM may use SBL to load the ADSP image at boot. Audio APR needs
+	state of ADSP to register and enable APR to be used for sending commands
+	to ADSP. so adsp-state represents the state of ADSP to ADSP loader. Value
+	of 0 indicates ADSP loader needs to use PIL and value of 2 means ADSP
+	image is already loaded by SBL.
+
+Example:
+
+qcom,msm-adsp-loader {
+	compatible = "qcom,adsp-loader";
+	qcom,adsp-state = <2>;
+};
diff --git a/Documentation/devicetree/bindings/sound/taiko_codec.txt b/Documentation/devicetree/bindings/sound/taiko_codec.txt
index 96e3a61..090d8db 100644
--- a/Documentation/devicetree/bindings/sound/taiko_codec.txt
+++ b/Documentation/devicetree/bindings/sound/taiko_codec.txt
@@ -22,12 +22,14 @@
  - qcom,cdc-micbias-cfilt1-mv - cfilt1 output voltage in milli volts.
  - qcom,cdc-micbias-cfilt2-mv - cfilt2 output voltage in milli volts.
  - qcom,cdc-micbias-cfilt3-mv - cfilt3 output voltage in milli volts.
-   cfilt volatge can be set to max of qcom,cdc-micbias-ldoh-v - 0.15V.
+   cfilt voltage can be set to max of qcom,cdc-micbias-ldoh-v - 0.15V.
 
  - qcom,cdc-micbias1-cfilt-sel = cfilt to use for micbias1 (should be from 1 to 3).
  - qcom,cdc-micbias2-cfilt-sel = cfilt to use for micbias2 (should be from 1 to 3).
  - qcom,cdc-micbias3-cfilt-sel = cfilt to use for micbias3 (should be from 1 to 3).
  - qcom,cdc-micbias4-cfilt-sel = cfilt to use for micbias4 (should be from 1 to 3).
+   This value represents the connected CFLIT to MIC Bias.
+
  - qcom,cdc-micbias1-ext-cap: Boolean. Enable micbias 1 external capacitor mode.
  - qcom,cdc-micbias2-ext-cap: Boolean. Enable micbias 2 external capacitor mode.
  - qcom,cdc-micbias3-ext-cap: Boolean. Enable micbias 3 external capacitor mode.
@@ -88,3 +90,109 @@
 	qcom,cdc-slim-ifd = "taiko-slim-ifd";
 	qcom,cdc-slim-ifd-elemental-addr = [00 00 A0 00 17 02];
 };
+
+Wcd9xxx audio CODEC in I2C mode
+
+  - compatible = "qcom,wcd9xxx-i2c-device";
+  - reg: represents the slave address provided to the I2C driver.
+  - qcom,cdc-reset-gpio: gpio used for codec SOC reset.
+  - <supply-name>-supply: phandle to the regulator device tree node.
+  - qcom,<supply-name>-voltage -  specifies voltage levels for supply. Should be
+       specified in pairs (min, max), units mV.
+  - qcom,<supply-name>-current - specifies max current in mA that can drawn
+       from the <supply-name>.
+
+    above three properties with "supply-name" set to  "qcom,cdc-vdd-buck", "qcom,cdc-vdd-tx-h",
+     "qcom,cdc-vdd-rx-h", "qcom,cdc-vddpx-1", "qcom,cdc-vdd-a-1p2v", "qcom,cdc-vddcx-1",
+     "qcom,cdc-vddcx-2" should be present.
+
+ - qcom,cdc-micbias-ldoh-v - LDOH output in volts ( should be 1.95 V and 3.00 V).
+
+ - qcom,cdc-micbias-cfilt1-mv - cfilt1 output voltage in milli volts.
+ - qcom,cdc-micbias-cfilt2-mv - cfilt2 output voltage in milli volts.
+ - qcom,cdc-micbias-cfilt3-mv - cfilt3 output voltage in milli volts.
+   cfilt voltage can be set to max of qcom,cdc-micbias-ldoh-v - 0.15V.
+
+ - qcom,cdc-micbias1-cfilt-sel = cfilt to use for micbias1 (should be from 1 to 3).
+ - qcom,cdc-micbias2-cfilt-sel = cfilt to use for micbias2 (should be from 1 to 3).
+ - qcom,cdc-micbias3-cfilt-sel = cfilt to use for micbias3 (should be from 1 to 3).
+ - qcom,cdc-micbias4-cfilt-sel = cfilt to use for micbias4 (should be from 1 to 3).
+   This value represents the connected CFLIT to MIC Bias.
+
+ - qcom,cdc-micbias1-ext-cap: Boolean. Enable micbias 1 external capacitor mode.
+ - qcom,cdc-micbias2-ext-cap: Boolean. Enable micbias 2 external capacitor mode.
+ - qcom,cdc-micbias3-ext-cap: Boolean. Enable micbias 3 external capacitor mode.
+ - qcom,cdc-micbias4-ext-cap: Boolean. Enable micbias 4 external capacitor mode.
+
+Example:
+i2c@f9925000 {
+	cell-index = <3>;
+	compatible = "qcom,i2c-qup";
+	reg = <0xf9925000 0x1000>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	reg-names = "qup_phys_addr";
+	interrupts = <0 97 0>;
+	interrupt-names = "qup_err_intr";
+	qcom,i2c-bus-freq = <100000>;
+	qcom,i2c-src-freq = <24000000>;
+
+	wcd9xxx_codec@0d{
+		compatible = "qcom,wcd9xxx-i2c";
+		reg = <0x0d>;
+		qcom,cdc-reset-gpio = <&msmgpio 22 0>;
+		interrupt-parent = <&wcd9xxx_intc>;
+		interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28>;
+
+		cdc-vdd-buck-supply = <&pm8019_l11>;
+		qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-buck-current = <25000>;
+
+		cdc-vdd-tx-h-supply = <&pm8019_l11>;
+		qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-tx-h-current = <25000>;
+
+		cdc-vdd-rx-h-supply = <&pm8019_l11>;
+		qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-rx-h-current = <25000>;
+
+		cdc-vddpx-1-supply = <&pm8019_l11>;
+		qcom,cdc-vddpx-1-voltage = <1800000 1800000>;
+		qcom,cdc-vddpx-1-current = <10000>;
+
+		cdc-vdd-a-1p2v-supply = <&pm8019_l9>;
+		qcom,cdc-vdd-a-1p2v-voltage = <1200000 1200000>;
+		qcom,cdc-vdd-a-1p2v-current = <10000>;
+
+		cdc-vddcx-1-supply = <&pm8019_l9>;
+		qcom,cdc-vddcx-1-voltage = <1200000 1200000>;
+		qcom,cdc-vddcx-1-current = <10000>;
+
+		cdc-vddcx-2-supply = <&pm8019_l9>;
+		qcom,cdc-vddcx-2-voltage = <1200000 1200000>;
+		qcom,cdc-vddcx-2-current = <10000>;
+
+		qcom,cdc-micbias-ldoh-v = <0x3>;
+		qcom,cdc-micbias-cfilt1-mv = <1800>;
+		qcom,cdc-micbias-cfilt2-mv = <2700>;
+		qcom,cdc-micbias-cfilt3-mv = <1800>;
+		qcom,cdc-micbias1-cfilt-sel = <0x0>;
+		qcom,cdc-micbias2-cfilt-sel = <0x1>;
+		qcom,cdc-micbias3-cfilt-sel = <0x2>;
+		qcom,cdc-micbias4-cfilt-sel = <0x2>;
+	};
+
+	wcd9xxx_codec@77{
+		compatible = "qcom,wcd9xxx-i2c";
+		reg = <0x77>;
+	};
+
+	wcd9xxx_codec@66{
+		compatible = "qcom,wcd9xxx-i2c";
+		reg = <0x66>;
+	};
+	wcd9xxx_codec@55{
+		compatible = "qcom,wcd9xxx-i2c";
+		reg = <0x55>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/tty/serial/msm_serial.txt b/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
index e784bfa..ae7d736 100644
--- a/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
+++ b/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
@@ -34,15 +34,20 @@
 - reg : offset and length of the register set for the device.
 - interrupts : should contain the uart interrupt.
 
-Optional properties:
-- cell-index: An integer specifying the line number of the UART device that
-  represents this HSL hardware instance.
+Aliases:
+An alias may optionally be used to bind the serial device to a tty device
+(ttyHSLx) with a given line number. Aliases are of the form serial<n> where <n>
+is an integer representing the line number to use. On systems with multiple
+serial devices present it is recommended that an alias be defined for each such
+device.
 
 Example:
+	aliases {
+		serial0 = &uart0;       // This device will be called ttyHSL0
+	};
 
-	serial@19c400000 {
+	uart0: serial@19c400000 {
 		compatible = "qcom,msm-lsuart-v14"
 		reg = <0x19c40000 0x1000">;
 		interrupts = <195>;
-		cell-index = <0>;	// this device will be named ttyHSL0
 	};
diff --git a/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt b/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
new file mode 100644
index 0000000..0e59f69
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
@@ -0,0 +1,20 @@
+MSM HSIC EHCI controller
+
+Required properties :
+- compatible : should be "qcom,hsic-host"
+- regs : offset and length of the register set in the memory map
+- interrupts: IRQ lines used by this controller
+- interrupt-names : Required interrupt resource entries are:
+            HSIC EHCI expects "core_irq" and optionally "async_irq".
+- <supply-name>-supply: handle to the regulator device tree node
+  Required "supply-name" is "HSIC_VDDCX" and optionally - "HSIC_GDSC".
+
+Example MSM HSIC EHCI controller device node :
+	hsic@f9a15000 {
+		compatible = "qcom,hsic-host";
+		reg = <0xf9a15000 0x400>;
+		interrupts = <0 136 0>;
+		interrupt-names = "core_irq";
+		HSIC_VDDCX-supply = <&pm8019_l12>;
+		HSIC_GDSC-supply = <&gdsc_usb_hsic>;
+	};
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
index 7bff0f2..015822f 100644
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
@@ -100,13 +100,24 @@
 - compatible: should be "qcom,usb-bam-msm"
 - reg  : pairs of physical base addresses and region sizes
             of all the memory mapped BAM devices present
-- reg-names : Register region name(s) referenced in reg above
-            SSUSB BAM expects "ssusb" and "hsusb" for HSSUB BAM.
-            Specify "qscratch_ram1_reg" to provide QSCRATCH's RAM1
-            register to control USB3 private memory for uses as BAM FIFOs.
+- reg-names : Register region name(s), in 1-1 correspondence with the
+	    registers in 'reg'. This list should contain at least as many names
+            as the number of unique values given in both 'usb-active-bam' and
+            all the subnodes' 'usb-bam-type' properties.
+
+            If SSUSB_BAM is used, "ssusb" should be present.
+            If HSUSB_BAM is used, "hsusb" should be present.
+            If HSIC_BAM is used, "hsic" should be present.
+
+            If a QSCRATCH RAM1 register is designated for providing USB3
+            private memory to use as a BAM FIFO, specify "qscratch_ram1_reg".
 - interrupts: IRQ lines for BAM devices
-- interrupt-names: BAM interrupt name(s) referenced in interrupts above
-            SSUSB BAM expects "ssusb" and "hsusb" for HSSUB BAM
+- interrupt-names: BAM interrupt name(s), in 1-1 correspondence with
+            'interrupts' above.
+
+            If SSUSB_BAM is used, "ssusb" should be present.
+            If HSUSB_BAM is used, "hsusb" should be present.
+            If HSIC_BAM is used, "hsic" should be present.
 - qcom,usb-active-bam: active BAM type. Can be one of
             0 - SSUSB_BAM
             1 - HSUSB_BAM
diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
index 57d776f..d686523 100644
--- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
@@ -10,12 +10,20 @@
 	"irq" : Interrupt for DWC3 core
 	"otg_irq" : Interrupt for DWC3 core's OTG Events
 - <supply-name>-supply: phandle to the regulator device tree node
-  Required "supply-name" examples are "SSUSB_VDDCX", "SSUSB_1p8",
-  "HSUSB_VDDCX", "HSUSB_1p8", "HSUSB_3p3" and "vbus_dwc3".
+  Required "supply-name" examples are:
+	"SSUSB_lp8" : 1.8v supply for SSPHY
+	"HSUSB_1p8" : 1.8v supply for HSPHY
+	"HSUSB_3p3" : 3.3v supply for HSPHY
+	"vbus_dwc3" : vbus supply for host mode
+	"ssusb_vdd_dig" : vdd supply for SSPHY digital circuit operation
+	"hsusb_vdd_dig" : vdd supply for HSPHY digital circuit operation
 - qcom,dwc-usb3-msm-dbm-eps: Number of endpoints avaliable for
   the DBM (Device Bus Manager). The DBM is HW unit which is part of
   the MSM USB3.0 core (which also includes the Synopsys DesignWare
   USB3.0 controller)
+- qcom,vdd-voltage-level: This property must be a list of three integer
+  values (no, min, max) where each value represents either a voltage in
+  microvolts or a value corresponding to voltage corner
 
 Optional properties :
 - Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
@@ -40,13 +48,14 @@
 		      <0xFD4AB000 0x4>;
 		interrupts = <0 131 0>, <0 179 0>, <0 133 0>;
 		interrupt-names = "irq", "otg_irq", "hs_phy_irq";
-		SSUSB_VDDCX-supply = <&pm8841_s2>;
+		ssusb_vdd_dig-supply = <&pm8841_s2_corner>;
 		SSUSB_1p8-supply = <&pm8941_l6>;
-		HSUSB_VDDCX-supply = <&pm8841_s2>;
+		hsusb_vdd_dig-supply = <&pm8841_s2_corner>;
 		HSUSB_1p8-supply = <&pm8941_l6>;
 		HSUSB_3p3-supply = <&pm8941_l24>;
 		vbus_dwc3-supply = <&pm8941_mvs1>;
 		qcom,dwc-usb3-msm-dbm-eps = <4>
+		qcom,vdd-voltage-level = <1 5 7>;
 
 		qcom,msm_bus,name = "usb3";
 		qcom,msm_bus,num_cases = <2>;
diff --git a/Documentation/trace/events-rpm-smd.txt b/Documentation/trace/events-rpm-smd.txt
new file mode 100644
index 0000000..f6c6bef
--- /dev/null
+++ b/Documentation/trace/events-rpm-smd.txt
@@ -0,0 +1,43 @@
+Subsystem Trace Points: rpm_smd
+
+The rpm-smd tracing system captures the events related to sending/receiving
+messages to/from RPM hardware. The tracing system adds the following events to
+capture the transactions with the RPM driver.
+
+1) RPM send message
+===================
+rpm_send_message: ctx:%s set:%s rsc_type:0%x(%s),rsc_id:i0x%x, id:%u
+
+The event captures the parameters of the message that was sent to the RPM.
+
+The 'ctx' parameters takes one of the following string constants to indicate
+if the request was made in atomic/non-atomic context.
+ . "sleep" - non-atomic context
+ . "noslp" - atomic context
+
+The 'set' parameter takes one of the following string values to indicate if
+the message affects active or sleep set value of the resource
+ . "act" - active set configuraion
+ . "slp" - sleep set configuration
+
+The 'rsc_type' and 'rsc_id' identify the resource whose parametes is being
+modified. The 4 bytes string equivalent of the resource type is also displayed
+for easier identification of resources.
+
+The 'id' parameter is the id that RPM uses to acknowledge the receipt of the
+message in its ACK message
+
+
+2) RPM ack message
+=================
+rpm_ack_recd: ctx:%s id:%u
+
+The event captures the acknowledgement messages received from the RPM after
+successfully send a message request.
+
+The 'ctx' parameter has the same string constants referred to the "RPM Send
+Message"
+
+The 'id' parameter is the id that RPM uses to acknowledge the receipt of the
+message in its ACK message
+
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5d5f9de..89c7417 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -264,6 +264,29 @@
 	def_bool y
 	depends on BUG
 
+config GENERIC_TIME_VSYSCALL
+	bool "Enable gettimeofday updates"
+	depends on CPU_V7
+	help
+	  Enables updating the kernel user helper area with the xtime struct
+	  data for gettimeofday via kernel user helpers.
+
+config ARM_USE_USER_ACCESSIBLE_TIMERS
+	bool "Enables mapping a timer counter page to user space"
+	depends on USE_USER_ACCESSIBLE_TIMERS && GENERIC_TIME_VSYSCALL
+	help
+	 Enables ARM-specific user-accessible timers via a shared
+	 memory page containing the cycle counter.
+
+config ARM_USER_ACCESSIBLE_TIMER_BASE
+	hex "Base address of user-accessible timer counter page"
+	default 0xfffef000
+	depends on ARM_USE_USER_ACCESSIBLE_TIMERS
+	help
+	 Specify the base user-space virtual address where the user-accessible
+	 timer counter page should be mapped by the kernel.  User-space apps
+	 will read directly from the page at this address.
+
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
diff --git a/arch/arm/boot/dts/mpq8092.dtsi b/arch/arm/boot/dts/mpq8092.dtsi
index 7961b78..502d34a 100644
--- a/arch/arm/boot/dts/mpq8092.dtsi
+++ b/arch/arm/boot/dts/mpq8092.dtsi
@@ -272,5 +272,33 @@
 	};
 };
 
+&gdsc_venus {
+	status = "ok";
+};
+
+&gdsc_mdss {
+	status = "ok";
+};
+
+&gdsc_jpeg {
+	status = "ok";
+};
+
+&gdsc_vfe {
+	status = "ok";
+};
+
+&gdsc_oxili_gx {
+	status = "ok";
+};
+
+&gdsc_oxili_cx {
+	status = "ok";
+};
+
+&gdsc_usb_hsic {
+	status = "ok";
+};
+
 /include/ "msm-pm8644.dtsi"
 /include/ "mpq8092-regulator.dtsi"
diff --git a/arch/arm/boot/dts/msm-gdsc.dtsi b/arch/arm/boot/dts/msm-gdsc.dtsi
index f83fe76..f0570ba 100644
--- a/arch/arm/boot/dts/msm-gdsc.dtsi
+++ b/arch/arm/boot/dts/msm-gdsc.dtsi
@@ -18,41 +18,48 @@
 		compatible = "qcom,gdsc";
 		regulator-name = "gdsc_venus";
 		reg = <0xfd8c1024 0x4>;
+		status = "disabled";
 	};
 
 	gdsc_mdss: qcom,gdsc@fd8c2304 {
 		compatible = "qcom,gdsc";
 		regulator-name = "gdsc_mdss";
 		reg = <0xfd8c2304 0x4>;
+		status = "disabled";
 	};
 
 	gdsc_jpeg: qcom,gdsc@fd8c35a4 {
 		compatible = "qcom,gdsc";
 		regulator-name = "gdsc_jpeg";
 		reg = <0xfd8c35a4 0x4>;
+		status = "disabled";
 	};
 
 	gdsc_vfe: qcom,gdsc@fd8c36a4 {
 		compatible = "qcom,gdsc";
 		regulator-name = "gdsc_vfe";
 		reg = <0xfd8c36a4 0x4>;
+		status = "disabled";
 	};
 
 	gdsc_oxili_gx: qcom,gdsc@fd8c4024 {
 		compatible = "qcom,gdsc";
 		regulator-name = "gdsc_oxili_gx";
 		reg = <0xfd8c4024 0x4>;
+		status = "disabled";
 	};
 
 	gdsc_oxili_cx: qcom,gdsc@fd8c4034 {
 		compatible = "qcom,gdsc";
 		regulator-name = "gdsc_oxili_cx";
 		reg = <0xfd8c4034 0x4>;
+		status = "disabled";
 	};
 
 	gdsc_usb_hsic: qcom,gdsc@fc400404 {
 		compatible = "qcom,gdsc";
 		regulator-name = "gdsc_usb_hsic";
 		reg = <0xfc400404 0x4>;
+		status = "disabled";
 	};
 };
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index 4ec5684..89d4df8 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -58,10 +58,10 @@
 			};
 		};
 
-		bms@4000 {
+		pm8941_bms: bms@4000 {
 			#address-cells = <1>;
 			#size-cells = <1>;
-
+			status = "disabled";
 			compatible = "qcom,qpnp-bms";
 			reg = <0x4000 0x100>;
 
@@ -90,7 +90,12 @@
 			qcom,bms-shutdown-soc-valid-limit = <20>;
 			qcom,bms-adjust-soc-low-threshold = <25>;
 			qcom,bms-adjust-soc-high-threshold = <45>;
+			qcom,bms-low-soc-calculate-soc-threshold = <15>;
+			qcom,bms-low-soc-calculate-soc-ms = <5000>;
+			qcom,bms-calculate-soc-ms = <20000>;
 			qcom,bms-chg-term-ua = <100000>;
+			qcom,bms-batt-type = <0>;
+			qcom,bms-use-voltage-soc;
 		};
 
 		clkdiv@5b00 {
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index f0e950a..b900c3f 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -12,6 +12,7 @@
 
 /include/ "skeleton.dtsi"
 /include/ "msm8226-ion.dtsi"
+/include/ "msm-gdsc.dtsi"
 
 / {
 	model = "Qualcomm MSM 8226";
@@ -73,6 +74,39 @@
 		compatible = "qcom,android-usb";
 	};
 
+	qcom,wdt@f9017000 {
+		compatible = "qcom,msm-watchdog";
+		reg = <0xf9017000 0x1000>;
+		interrupts = <0 3 0>, <0 4 0>;
+		qcom,bark-time = <11000>;
+		qcom,pet-time = <10000>;
+		qcom,ipi-ping = <1>;
+	};
+
+};
+
+&gdsc_venus {
+	status = "ok";
+};
+
+&gdsc_mdss {
+	status = "ok";
+};
+
+&gdsc_jpeg {
+	status = "ok";
+};
+
+&gdsc_vfe {
+	status = "ok";
+};
+
+&gdsc_oxili_cx {
+	status = "ok";
+};
+
+&gdsc_usb_hsic {
+	status = "ok";
 };
 
 /include/ "msm8226-regulator.dtsi"
diff --git a/arch/arm/boot/dts/msm8910-ion.dtsi b/arch/arm/boot/dts/msm8910-ion.dtsi
new file mode 100644
index 0000000..88bb1ab
--- /dev/null
+++ b/arch/arm/boot/dts/msm8910-ion.dtsi
@@ -0,0 +1,62 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+	qcom,ion {
+		compatible = "qcom,msm-ion";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,ion-heap@30 { /* SYSTEM HEAP */
+			reg = <30>;
+		};
+
+		qcom,ion-heap@8 { /* CP_MM HEAP */
+			compatible = "qcom,msm-ion-reserve";
+			reg = <8>;
+			qcom,heap-align = <0x1000>;
+			qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+			qcom,memory-reservation-size = <0x3800000>;
+		};
+
+		qcom,ion-heap@25 { /* IOMMU HEAP */
+			reg = <25>;
+		};
+
+		qcom,ion-heap@27 { /* QSECOM HEAP */
+			compatible = "qcom,msm-ion-reserve";
+			reg = <27>;
+			qcom,heap-align = <0x1000>;
+			qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+			qcom,memory-reservation-size = <0x780000>;
+		};
+
+		qcom,ion-heap@28 { /* AUDIO HEAP */
+			compatible = "qcom,msm-ion-reserve";
+			reg = <28>;
+			qcom,heap-align = <0x1000>;
+			qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+			qcom,memory-reservation-size = <0x314000>;
+		};
+
+		qcom,ion-heap@29 { /* FIRMWARE HEAP */
+			compatible = "qcom,msm-ion-reserve";
+			reg = <29>;
+			qcom,heap-align = <0x20000>;
+			qcom,heap-adjacent = <8>;
+			qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+			qcom,memory-reservation-size = <0xA00000>;
+		};
+
+	};
+};
+
diff --git a/arch/arm/boot/dts/msm8910-rumi.dts b/arch/arm/boot/dts/msm8910-rumi.dts
new file mode 100644
index 0000000..0d944aa
--- /dev/null
+++ b/arch/arm/boot/dts/msm8910-rumi.dts
@@ -0,0 +1,25 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+/include/ "msm8910.dtsi"
+
+/ {
+	model = "Qualcomm MSM 8910 Rumi";
+	compatible = "qcom,msm8910-rumi", "qcom,msm8910";
+	qcom,msm-id = <147 1 0>;
+
+	serial@f991f000 {
+		status = "ok";
+	};
+};
diff --git a/arch/arm/boot/dts/msm8910.dtsi b/arch/arm/boot/dts/msm8910.dtsi
index 1f3c1d8..9514e5a 100644
--- a/arch/arm/boot/dts/msm8910.dtsi
+++ b/arch/arm/boot/dts/msm8910.dtsi
@@ -11,6 +11,7 @@
  */
 
 /include/ "skeleton.dtsi"
+/include/ "msm8910-ion.dtsi"
 
 / {
 	model = "Qualcomm MSM 8910";
@@ -73,13 +74,25 @@
 		interrupts = <0 123 0>;
 		interrupt-names = "core_irq";
 
+		vdd-supply = <&pm8110_l17>;
+		qcom,vdd-always-on;
+		qcom,vdd-lpm-sup;
+		qcom,vdd-voltage-level = <2900000 2900000>;
+		qcom,vdd-current-level = <9000 400000>;
+
+		vdd-io-supply = <&pm8110_l6>;
+		qcom,vdd-io-always-on;
+		qcom,vdd-io-lpm-sup;
+		qcom,vdd-io-voltage-level = <1800000 1800000>;
+		qcom,vdd-io-current-level = <9000 60000>;
+
 		qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
 		qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
 		qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
 		qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
 
 		qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
-		qcom,sup-voltages = <2950 2950>;
+		qcom,sup-voltages = <2900 2900>;
 		qcom,bus-width = <8>;
 		qcom,nonremovable;
 		qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
@@ -93,6 +106,14 @@
 		interrupts = <0 125 0>;
 		interrupt-names = "core_irq";
 
+		vdd-supply = <&pm8110_l18>;
+		qcom,vdd-voltage-level = <2950000 2950000>;
+		qcom,vdd-current-level = <9000 400000>;
+
+		vdd-io-supply = <&pm8110_l21>;
+		qcom,vdd-io-voltage-level = <1800000 2950000>;
+		qcom,vdd-io-current-level = <9000 50000>;
+
 		qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
 		qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
 		qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
@@ -106,6 +127,73 @@
 		qcom,current-limit = <800>;
 	};
 
+	qcom,smem@fa00000 {
+		compatible = "qcom,smem";
+		reg = <0xfa00000 0x200000>,
+			<0xfa006000 0x1000>,
+			<0xfc428000 0x4000>;
+		reg-names = "smem", "irq-reg-base", "aux-mem1";
+
+		qcom,smd-modem {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <0>;
+			qcom,smd-irq-offset = <0x8>;
+			qcom,smd-irq-bitmask = <0x1000>;
+			qcom,pil-string = "modem";
+			interrupts = <0 25 1>;
+		};
+
+		qcom,smsm-modem {
+			compatible = "qcom,smsm";
+			qcom,smsm-edge = <0>;
+			qcom,smsm-irq-offset = <0x8>;
+			qcom,smsm-irq-bitmask = <0x2000>;
+			interrupts = <0 26 1>;
+		};
+
+		qcom,smd-adsp {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <1>;
+			qcom,smd-irq-offset = <0x8>;
+			qcom,smd-irq-bitmask = <0x100>;
+			qcom,pil-string = "adsp";
+			interrupts = <0 156 1>;
+		};
+
+		qcom,smsm-adsp {
+			compatible = "qcom,smsm";
+			qcom,smsm-edge = <1>;
+			qcom,smsm-irq-offset = <0x8>;
+			qcom,smsm-irq-bitmask = <0x200>;
+			interrupts = <0 157 1>;
+		};
+
+		qcom,smd-wcnss {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <6>;
+			qcom,smd-irq-offset = <0x8>;
+			qcom,smd-irq-bitmask = <0x20000>;
+			qcom,pil-string = "wcnss";
+			interrupts = <0 142 1>;
+		};
+
+		qcom,smsm-wcnss {
+			compatible = "qcom,smsm";
+			qcom,smsm-edge = <6>;
+			qcom,smsm-irq-offset = <0x8>;
+			qcom,smsm-irq-bitmask = <0x80000>;
+			interrupts = <0 144 1>;
+		};
+
+		qcom,smd-rpm {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <15>;
+			qcom,smd-irq-offset = <0x8>;
+			qcom,smd-irq-bitmask = <0x1>;
+			interrupts = <0 168 1>;
+			qcom,irq-no-suspend;
+		};
+	};
 };
 
 /include/ "msm8910-regulator.dtsi"
diff --git a/arch/arm/boot/dts/msm8974-camera-sensor-liquid.dtsi b/arch/arm/boot/dts/msm8974-camera-sensor-liquid.dtsi
new file mode 100644
index 0000000..25f79f8
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-camera-sensor-liquid.dtsi
@@ -0,0 +1,98 @@
+
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&cci {
+
+	qcom,camera@6e {
+		compatible = "qcom,s5k3l1yx";
+		reg = <0x6e 0x0>;
+		qcom,csi-if = <1>;
+		qcom,csid-core = <0>;
+		qcom,flash-type = <0>;
+		qcom,mount-angle = <0>;
+		qcom,sensor-name = "s5k3l1yx";
+		cam_vdig-supply = <&pm8941_l3>;
+		cam_vana-supply = <&pm8941_l17>;
+		cam_vio-supply = <&pm8941_lvs3>;
+		cam_vaf-supply = <&pm8941_l23>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vana", "cam_vio",
+				     "cam_vaf";
+		qcom,cam-vreg-type = <0 0 1 0>;
+		qcom,cam-vreg-min-voltage = <1225000 2850000 0 3000000>;
+		qcom,cam-vreg-max-voltage = <1225000 2850000 0 3000000>;
+		qcom,cam-vreg-op-mode = <105000 80000 0 100000>;
+		qcom,gpio-no-mux = <0>;
+		gpios = <&msmgpio 15 0>,
+			<&msmgpio 19 0>,
+			<&msmgpio 20 0>,
+			<&msmgpio 90 0>;
+		qcom,gpio-common-tbl-num = <0 1 2>;
+		qcom,gpio-common-tbl-flags = <1 1 1>;
+		qcom,gpio-common-tbl-label = "CAMIF_MCLK",
+					     "CAMIF_I2C_DATA",
+					     "CAMIF_I2C_CLK";
+		qcom,gpio-req-tbl-num = <3>;
+		qcom,gpio-req-tbl-flags = <0>;
+		qcom,gpio-req-tbl-label = "CAM_RESET1";
+		qcom,gpio-set-tbl-num = <3 3>;
+		qcom,gpio-set-tbl-flags = <0 2>;
+		qcom,gpio-set-tbl-delay = <1000 30000>;
+		qcom,csi-lane-assign = <0x4320>;
+		qcom,csi-lane-mask = <0x1F>;
+		qcom,csi-phy-sel = <0>;
+		qcom,camera-type = <0>;
+		qcom,sensor-type = <0>;
+		status = "ok";
+	};
+
+	qcom,camera@6c {
+		compatible = "qcom,ov2720";
+		reg = <0x6c 0x0>;
+		qcom,csi-if = <1>;
+		qcom,csid-core = <0>;
+		qcom,flash-type = <0>;
+		qcom,mount-angle = <180>;
+		qcom,sensor-name = "ov2720";
+		cam_vdig-supply = <&pm8941_l3>;
+		cam_vana-supply = <&pm8941_l17>;
+		cam_vio-supply = <&pm8941_lvs2>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vana", "cam_vio";
+		qcom,cam-vreg-type = <0 0 1>;
+		qcom,cam-vreg-min-voltage = <1225000 2850000 0>;
+		qcom,cam-vreg-max-voltage = <1225000 2850000 0>;
+		qcom,cam-vreg-op-mode = <105000 80000 0>;
+		qcom,gpio-no-mux = <0>;
+		gpios = <&msmgpio 17 0>,
+			<&msmgpio 19 0>,
+			<&msmgpio 20 0>,
+			<&msmgpio 18 0>;
+		qcom,gpio-common-tbl-num = <0 1 2>;
+		qcom,gpio-common-tbl-flags = <1 1 1>;
+		qcom,gpio-common-tbl-label = "CAMIF_MCLK",
+					     "CAMIF_I2C_DATA",
+					     "CAMIF_I2C_CLK";
+		qcom,gpio-req-tbl-num = <3>;
+		qcom,gpio-req-tbl-flags = <0>;
+		qcom,gpio-req-tbl-label = "CAM_RESET1";
+		qcom,gpio-set-tbl-num = <3 3>;
+		qcom,gpio-set-tbl-flags = <0 2>;
+		qcom,gpio-set-tbl-delay = <1000 4000>;
+		qcom,csi-lane-assign = <0x4320>;
+		qcom,csi-lane-mask = <0x7>;
+		qcom,csi-phy-sel = <2>;
+		qcom,camera-type = <1>;
+		qcom,sensor-type = <0>;
+		status = "ok";
+	};
+};
diff --git a/arch/arm/boot/dts/msm8974-camera-sensor.dtsi b/arch/arm/boot/dts/msm8974-camera-sensor.dtsi
new file mode 100644
index 0000000..d804355
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-camera-sensor.dtsi
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&cci {
+
+	qcom,camera@6e {
+		compatible = "qcom,s5k3l1yx";
+		reg = <0x6e 0x0>;
+		qcom,csi-if = <1>;
+		qcom,csid-core = <0>;
+		qcom,flash-type = <0>;
+		qcom,mount-angle = <90>;
+		qcom,sensor-name = "s5k3l1yx";
+		cam_vdig-supply = <&pm8941_l3>;
+		cam_vana-supply = <&pm8941_l17>;
+		cam_vio-supply = <&pm8941_lvs3>;
+		cam_vaf-supply = <&pm8941_l23>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vana", "cam_vio",
+				     "cam_vaf";
+		qcom,cam-vreg-type = <0 0 1 0>;
+		qcom,cam-vreg-min-voltage = <1225000 2850000 0 3000000>;
+		qcom,cam-vreg-max-voltage = <1225000 2850000 0 3000000>;
+		qcom,cam-vreg-op-mode = <105000 80000 0 100000>;
+		qcom,gpio-no-mux = <0>;
+		gpios = <&msmgpio 15 0>,
+			<&msmgpio 19 0>,
+			<&msmgpio 20 0>,
+			<&msmgpio 90 0>;
+		qcom,gpio-common-tbl-num = <0 1 2>;
+		qcom,gpio-common-tbl-flags = <1 1 1>;
+		qcom,gpio-common-tbl-label = "CAMIF_MCLK",
+					     "CAMIF_I2C_DATA",
+					     "CAMIF_I2C_CLK";
+		qcom,gpio-req-tbl-num = <3>;
+		qcom,gpio-req-tbl-flags = <0>;
+		qcom,gpio-req-tbl-label = "CAM_RESET1";
+		qcom,gpio-set-tbl-num = <3 3>;
+		qcom,gpio-set-tbl-flags = <0 2>;
+		qcom,gpio-set-tbl-delay = <1000 30000>;
+		qcom,csi-lane-assign = <0x4320>;
+		qcom,csi-lane-mask = <0x1F>;
+		qcom,csi-phy-sel = <0>;
+		qcom,camera-type = <0>;
+		qcom,sensor-type = <0>;
+		status = "ok";
+	};
+
+	qcom,camera@6c {
+		compatible = "qcom,ov2720";
+		reg = <0x6c 0x0>;
+		qcom,csi-if = <1>;
+		qcom,csid-core = <0>;
+		qcom,flash-type = <0>;
+		qcom,mount-angle = <180>;
+		qcom,sensor-name = "ov2720";
+		cam_vdig-supply = <&pm8941_l3>;
+		cam_vana-supply = <&pm8941_l17>;
+		cam_vio-supply = <&pm8941_lvs3>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vana", "cam_vio";
+		qcom,cam-vreg-type = <0 0 1>;
+		qcom,cam-vreg-min-voltage = <1225000 2850000 0>;
+		qcom,cam-vreg-max-voltage = <1225000 2850000 0>;
+		qcom,cam-vreg-op-mode = <105000 80000 0>;
+		qcom,gpio-no-mux = <0>;
+		gpios = <&msmgpio 17 0>,
+			<&msmgpio 19 0>,
+			<&msmgpio 20 0>,
+			<&msmgpio 18 0>;
+		qcom,gpio-common-tbl-num = <0 1 2>;
+		qcom,gpio-common-tbl-flags = <1 1 1>;
+		qcom,gpio-common-tbl-label = "CAMIF_MCLK",
+					     "CAMIF_I2C_DATA",
+					     "CAMIF_I2C_CLK";
+		qcom,gpio-req-tbl-num = <3>;
+		qcom,gpio-req-tbl-flags = <0>;
+		qcom,gpio-req-tbl-label = "CAM_RESET1";
+		qcom,gpio-set-tbl-num = <3 3>;
+		qcom,gpio-set-tbl-flags = <0 2>;
+		qcom,gpio-set-tbl-delay = <1000 4000>;
+		qcom,csi-lane-assign = <0x4320>;
+		qcom,csi-lane-mask = <0x7>;
+		qcom,csi-phy-sel = <2>;
+		qcom,camera-type = <1>;
+		qcom,sensor-type = <0>;
+		status = "ok";
+	};
+};
diff --git a/arch/arm/boot/dts/msm8974-camera.dtsi b/arch/arm/boot/dts/msm8974-camera.dtsi
index fd652a0..4b96811 100644
--- a/arch/arm/boot/dts/msm8974-camera.dtsi
+++ b/arch/arm/boot/dts/msm8974-camera.dtsi
@@ -165,7 +165,7 @@
 		vdd-supply = <&gdsc_vfe>;
 	};
 
-	qcom,cci@fda0C000 {
+	cci: qcom,cci@fda0C000 {
 		cell-index = <0>;
 		compatible = "qcom,cci";
 		reg = <0xfda0C000 0x1000>;
@@ -177,83 +177,11 @@
 		interrupt-names = "cci";
 
 		qcom,camera@6e {
-			compatible = "qcom,s5k3l1yx";
-			reg = <0x6e 0x0>;
-			qcom,csi-if = <1>;
-			qcom,csid-core = <0>;
-			qcom,flash-type = <0>;
-			qcom,mount-angle = <90>;
-			qcom,sensor-name = "s5k3l1yx";
-			cam_vdig-supply = <&pm8941_l3>;
-			cam_vana-supply = <&pm8941_l17>;
-			cam_vio-supply = <&pm8941_lvs3>;
-			cam_vaf-supply = <&pm8941_l23>;
-			qcom,cam-vreg-name = "cam_vdig", "cam_vana", "cam_vio",
-					     "cam_vaf";
-			qcom,cam-vreg-type = <0 0 1 0>;
-			qcom,cam-vreg-min-voltage = <1225000 2850000 0 3000000>;
-			qcom,cam-vreg-max-voltage = <1225000 2850000 0 3000000>;
-			qcom,cam-vreg-op-mode = <105000 80000 0 100000>;
-			qcom,gpio-no-mux = <0>;
-			gpios = <&msmgpio 15 0>,
-				<&msmgpio 19 0>,
-				<&msmgpio 20 0>,
-				<&msmgpio 90 0>;
-			qcom,gpio-common-tbl-num = <0 1 2>;
-			qcom,gpio-common-tbl-flags = <1 1 1>;
-			qcom,gpio-common-tbl-label = "CAMIF_MCLK",
-						     "CAMIF_I2C_DATA",
-						     "CAMIF_I2C_CLK";
-			qcom,gpio-req-tbl-num = <3>;
-			qcom,gpio-req-tbl-flags = <0>;
-			qcom,gpio-req-tbl-label = "CAM_RESET1";
-			qcom,gpio-set-tbl-num = <3 3>;
-			qcom,gpio-set-tbl-flags = <0 2>;
-			qcom,gpio-set-tbl-delay = <1000 30000>;
-			qcom,csi-lane-assign = <0x4320>;
-			qcom,csi-lane-mask = <0x1F>;
-			qcom,csi-phy-sel = <0>;
-			qcom,camera-type = <0>;
-			qcom,sensor-type = <0>;
+			status = "disable";
 		};
 
 		qcom,camera@6c {
-			compatible = "qcom,ov2720";
-			reg = <0x6c 0x0>;
-			qcom,csi-if = <1>;
-			qcom,csid-core = <0>;
-			qcom,flash-type = <0>;
-			qcom,mount-angle = <0>;
-			qcom,sensor-name = "ov2720";
-			cam_vdig-supply = <&pm8941_l3>;
-			cam_vana-supply = <&pm8941_l17>;
-			cam_vio-supply = <&pm8941_lvs3>;
-			qcom,cam-vreg-name = "cam_vdig", "cam_vana", "cam_vio";
-			qcom,cam-vreg-type = <0 0 1>;
-			qcom,cam-vreg-min-voltage = <1225000 2850000 0>;
-			qcom,cam-vreg-max-voltage = <1225000 2850000 0>;
-			qcom,cam-vreg-op-mode = <105000 80000 0>;
-			qcom,gpio-no-mux = <0>;
-			gpios = <&msmgpio 16 0>,
-				<&msmgpio 19 0>,
-				<&msmgpio 20 0>,
-				<&msmgpio 92 0>;
-			qcom,gpio-common-tbl-num = <0 1 2>;
-			qcom,gpio-common-tbl-flags = <1 1 1>;
-			qcom,gpio-common-tbl-label = "CAMIF_MCLK",
-						     "CAMIF_I2C_DATA",
-						     "CAMIF_I2C_CLK";
-			qcom,gpio-req-tbl-num = <3>;
-			qcom,gpio-req-tbl-flags = <0>;
-			qcom,gpio-req-tbl-label = "CAM_RESET1";
-			qcom,gpio-set-tbl-num = <3 3>;
-			qcom,gpio-set-tbl-flags = <0 2>;
-			qcom,gpio-set-tbl-delay = <1000 4000>;
-			qcom,csi-lane-assign = <0x4320>;
-			qcom,csi-lane-mask = <0x7>;
-			qcom,csi-phy-sel = <1>;
-			qcom,camera-type = <1>;
-			qcom,sensor-type = <0>;
+			status = "disable";
 		};
 
 		qcom,camera@90 {
diff --git a/arch/arm/boot/dts/msm8974-cdp.dtsi b/arch/arm/boot/dts/msm8974-cdp.dtsi
index 6004d15..7557fd1 100644
--- a/arch/arm/boot/dts/msm8974-cdp.dtsi
+++ b/arch/arm/boot/dts/msm8974-cdp.dtsi
@@ -11,6 +11,8 @@
  */
 
 /include/ "dsi-panel-toshiba-720p-video.dtsi"
+/include/ "msm8974-camera-sensor.dtsi"
+/include/ "msm8974-leds.dtsi"
 
 / {
 	serial@f991e000 {
@@ -152,6 +154,103 @@
 			vdd-phy-supply = <&spi_eth_vreg>;
 		};
 	};
+
+	sound {
+		compatible = "qcom,msm8974-audio-taiko";
+		qcom,model = "msm8974-taiko-cdp-snd-card";
+
+		qcom,audio-routing =
+			"RX_BIAS", "MCLK",
+			"LDO_H", "MCLK",
+			"Ext Spk Bottom Pos", "LINEOUT1",
+			"Ext Spk Bottom Neg", "LINEOUT3",
+			"Ext Spk Top Pos", "LINEOUT2",
+			"Ext Spk Top Neg", "LINEOUT4",
+			"AMIC1", "MIC BIAS1 Internal1",
+			"MIC BIAS1 Internal1", "Handset Mic",
+			"AMIC2", "MIC BIAS2 External",
+			"MIC BIAS2 External", "Headset Mic",
+			"AMIC3", "MIC BIAS2 External",
+			"MIC BIAS2 External", "ANCRight Headset Mic",
+			"AMIC4", "MIC BIAS2 External",
+			"MIC BIAS2 External", "ANCLeft Headset Mic",
+			"DMIC1", "MIC BIAS1 External",
+			"MIC BIAS1 External", "Digital Mic1",
+			"DMIC2", "MIC BIAS1 External",
+			"MIC BIAS1 External", "Digital Mic2",
+			"DMIC3", "MIC BIAS3 External",
+			"MIC BIAS3 External", "Digital Mic3",
+			"DMIC4", "MIC BIAS3 External",
+			"MIC BIAS3 External", "Digital Mic4",
+			"DMIC5", "MIC BIAS4 External",
+			"MIC BIAS4 External", "Digital Mic5",
+			"DMIC6", "MIC BIAS4 External",
+			"MIC BIAS4 External", "Digital Mic6";
+
+		qcom,cdc-mclk-gpios = <&pm8941_gpios 15 0>;
+		taiko-mclk-clk = <&pm8941_clkdiv1>;
+		qcom,taiko-mclk-clk-freq = <9600000>;
+	};
+};
+
+&spmi_bus {
+	qcom,pm8941@1 {
+		qcom,leds@d800 {
+			status = "okay";
+			qcom,wled_0 {
+				label = "wled";
+				linux,name = "wled:backlight";
+				linux,default-trigger = "bkl-trigger";
+				qcom,cs-out-en;
+				qcom,op-fdbck;
+				qcom,default-state = "off";
+				qcom,max-current = <25>;
+				qcom,ctrl-delay-us = <0>;
+				qcom,boost-curr-lim = <3>;
+				qcom,cp-sel = <0>;
+				qcom,switch-freq = <2>;
+				qcom,ovp-val = <2>;
+				qcom,num-strings = <1>;
+				qcom,id = <0>;
+			};
+		};
+
+		qcom,leds@d900 {
+			status = "disabled";
+		};
+
+		qcom,leds@da00 {
+			status = "disabled";
+		};
+
+		qcom,leds@db00 {
+			status = "disabled";
+		};
+
+		qcom,leds@dc00 {
+			status = "disabled";
+		};
+
+		qcom,leds@dd00 {
+			status = "disabled";
+		};
+
+		qcom,leds@de00 {
+			status = "disabled";
+		};
+
+		qcom,leds@df00 {
+			status = "disabled";
+		};
+
+		qcom,leds@e000 {
+			status = "disabled";
+		};
+
+		qcom,leds@e100 {
+			status = "disabled";
+		};
+	};
 };
 
 &sdcc2 {
diff --git a/arch/arm/boot/dts/msm8974-fluid.dtsi b/arch/arm/boot/dts/msm8974-fluid.dtsi
index 938bc22..cf45ceb 100644
--- a/arch/arm/boot/dts/msm8974-fluid.dtsi
+++ b/arch/arm/boot/dts/msm8974-fluid.dtsi
@@ -11,6 +11,8 @@
  */
 
 /include/ "dsi-panel-toshiba-720p-video.dtsi"
+/include/ "msm8974-camera-sensor.dtsi"
+/include/ "msm8974-leds.dtsi"
 
 / {
 	serial@f991e000 {
@@ -108,6 +110,22 @@
 		};
 	};
 
+	i2c@f9967000 {
+		sii8334@72 {
+			compatible = "qcom,mhl-sii8334";
+			reg = <0x72>;
+			interrupt-parent = <&msmgpio>;
+			interrupts = <82 0x8>;
+			mhl-intr-gpio = <&msmgpio 82 0>;
+			mhl-pwr-gpio = <&msmgpio 12 0>;
+			mhl-rst-gpio = <&pm8941_mpps 8 0>;
+			avcc_18-supply = <&pm8941_l24>;
+			avcc_12-supply = <&pm8941_l2>;
+			smps3a-supply = <&pm8941_s3>;
+			vdda-supply = <&pm8941_l12>;
+		};
+	};
+
 	gpio_keys {
 		compatible = "gpio-keys";
 		input-name = "gpio-keys";
@@ -152,6 +170,103 @@
 			vdd-phy-supply = <&spi_eth_vreg>;
 		};
 	};
+
+	sound {
+		compatible = "qcom,msm8974-audio-taiko";
+		qcom,model = "msm8974-taiko-fluid-snd-card";
+
+		qcom,audio-routing =
+			"RX_BIAS", "MCLK",
+			"LDO_H", "MCLK",
+			"Ext Spk Bottom Pos", "LINEOUT1",
+			"Ext Spk Bottom Neg", "LINEOUT3",
+			"Ext Spk Top Pos", "LINEOUT2",
+			"Ext Spk Top Neg", "LINEOUT4",
+			"AMIC1", "MIC BIAS1 Internal1",
+			"MIC BIAS1 Internal1", "Handset Mic",
+			"AMIC2", "MIC BIAS2 External",
+			"MIC BIAS2 External", "Headset Mic",
+			"AMIC3", "MIC BIAS2 External",
+			"MIC BIAS2 External", "ANCRight Headset Mic",
+			"AMIC4", "MIC BIAS2 External",
+			"MIC BIAS2 External", "ANCLeft Headset Mic",
+			"DMIC1", "MIC BIAS1 External",
+			"MIC BIAS1 External", "Digital Mic1",
+			"DMIC2", "MIC BIAS1 External",
+			"MIC BIAS1 External", "Digital Mic2",
+			"DMIC3", "MIC BIAS3 External",
+			"MIC BIAS3 External", "Digital Mic3",
+			"DMIC4", "MIC BIAS3 External",
+			"MIC BIAS3 External", "Digital Mic4",
+			"DMIC5", "MIC BIAS4 External",
+			"MIC BIAS4 External", "Digital Mic5",
+			"DMIC6", "MIC BIAS4 External",
+			"MIC BIAS4 External", "Digital Mic6";
+
+		qcom,cdc-mclk-gpios = <&pm8941_gpios 15 0>;
+		taiko-mclk-clk = <&pm8941_clkdiv1>;
+		qcom,taiko-mclk-clk-freq = <9600000>;
+	};
+};
+
+&spmi_bus {
+	qcom,pm8941@1 {
+		qcom,leds@d800 {
+			status = "okay";
+			qcom,wled_0 {
+				label = "wled";
+				linux,name = "wled:backlight";
+				linux,default-trigger = "bkl-trigger";
+				qcom,cs-out-en;
+				qcom,op-fdbck;
+				qcom,default-state = "off";
+				qcom,max-current = <25>;
+				qcom,ctrl-delay-us = <0>;
+				qcom,boost-curr-lim = <3>;
+				qcom,cp-sel = <0>;
+				qcom,switch-freq = <2>;
+				qcom,ovp-val = <2>;
+				qcom,num-strings = <1>;
+				qcom,id = <0>;
+			};
+		};
+
+		qcom,leds@d900 {
+			status = "disabled";
+		};
+
+		qcom,leds@da00 {
+			status = "disabled";
+		};
+
+		qcom,leds@db00 {
+			status = "disabled";
+		};
+
+		qcom,leds@dc00 {
+			status = "disabled";
+		};
+
+		qcom,leds@dd00 {
+			status = "disabled";
+		};
+
+		qcom,leds@de00 {
+			status = "disabled";
+		};
+
+		qcom,leds@df00 {
+			status = "disabled";
+		};
+
+		qcom,leds@e000 {
+			status = "disabled";
+		};
+
+		qcom,leds@e100 {
+			status = "disabled";
+		};
+	};
 };
 
 &sdcc1 {
@@ -218,6 +333,13 @@
 	};
 
 	gpio@cb00 { /* GPIO 12 */
+		qcom,mode = <1>;		/* QPNP_PIN_MODE_DIG_OUT */
+		qcom,output-type = <0>;		/* QPNP_PIN_OUT_BUF_CMOS */
+		qcom,pull = <5>;		/* QPNP_PIN_PULL_NO */
+		qcom,vin-sel = <2>;		/* QPNP_PIN_VIN2 */
+		qcom,out-strength = <2>;	/* QPNP_PIN_OUT_STRENGTH_MED */
+		qcom,src-select = <0>;		/* QPNP_PIN_SEL_FUNC_CONSTANT */
+		qcom,master-en = <1>;
 	};
 
 	gpio@cc00 { /* GPIO 13 */
@@ -346,6 +468,12 @@
 	};
 
 	mpp@a700 { /* MPP 8 */
+		qcom,mode = <1>; /* DIG_OUT */
+		qcom,output-type = <0>; /* CMOS */
+		qcom,pull-up = <0>;
+		qcom,vin-sel = <2>; /* PM8941_S3 1.8V > 1.6V */
+		qcom,src-select = <0>; /* CONSTANT */
+		qcom,master-en = <1>; /* ENABLE MPP */
 	};
 };
 
diff --git a/arch/arm/boot/dts/msm8974-leds.dtsi b/arch/arm/boot/dts/msm8974-leds.dtsi
new file mode 100644
index 0000000..89bb687
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-leds.dtsi
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&spmi_bus {
+	qcom,pm8941@1 {
+		qcom,leds@d000 {
+			status = "okay";
+			qcom,rgb_0 {
+				label = "rgb";
+				linux,name = "led:rgb_red";
+				qcom,mode = <0>;
+				qcom,pwm-channel = <6>;
+				qcom,pwm-us = <1000>;
+				qcom,max-current = <12>;
+				qcom,default-state = "off";
+				qcom,id = <3>;
+				linux,default-trigger =
+					"battery-charging";
+			};
+
+			qcom,rgb_1 {
+				label = "rgb";
+				linux,name = "led:rgb_green";
+				qcom,mode = <0>;
+				qcom,pwm-channel = <5>;
+				qcom,pwm-us = <1000>;
+				qcom,max-current = <12>;
+				qcom,default-state = "off";
+				qcom,id = <4>;
+				linux,default-trigger = "battery-full";
+			};
+		};
+
+		qcom,leds@d100 {
+			status = "disabled";
+		};
+
+		qcom,leds@d2000 {
+			status = "disabled";
+		};
+
+		qcom,leds@d300 {
+			status = "okay";
+			qcom,flash_0 {
+				qcom,max-current = <1000>;
+				qcom,default-state = "off";
+				qcom,headroom = <0>;
+				qcom,duration = <1280>;
+				qcom,clamp-curr = <200>;
+				qcom,startup-dly = <1>;
+				qcom,safety-timer;
+				label = "flash";
+				linux,default-trigger =
+					"flash0_trigger";
+				qcom,id = <1>;
+				linux,name = "led:flash_0";
+				qcom,current = <625>;
+			};
+
+			qcom,flash_1 {
+				qcom,max-current = <1000>;
+				qcom,default-state = "off";
+				qcom,headroom = <0>;
+				qcom,duration = <1280>;
+				qcom,clamp-curr = <200>;
+				qcom,startup-dly = <1>;
+				qcom,safety-timer;
+				linux,default-trigger =
+					"flash1_trigger";
+				label = "flash";
+				qcom,id = <2>;
+				linux,name = "led:flash_1";
+				qcom,current = <625>;
+			};
+		};
+
+		qcom,leds@d400 {
+			status = "disabled";
+		};
+
+		qcom,leds@d500 {
+			status = "disabled";
+		};
+
+		qcom,leds@d600 {
+			status = "disabled";
+		};
+
+		qcom,leds@d700 {
+			status = "disabled";
+		};
+	};
+};
+
diff --git a/arch/arm/boot/dts/msm8974-liquid.dtsi b/arch/arm/boot/dts/msm8974-liquid.dtsi
index 68207af..0f65dc8 100644
--- a/arch/arm/boot/dts/msm8974-liquid.dtsi
+++ b/arch/arm/boot/dts/msm8974-liquid.dtsi
@@ -10,6 +10,9 @@
  * GNU General Public License for more details.
  */
 
+/include/ "msm8974-camera-sensor-liquid.dtsi"
+/include/ "msm8974-leds.dtsi"
+
 / {
 	serial@f991e000 {
 		status = "ok";
@@ -74,6 +77,35 @@
 
 	qcom,hdmi_tx@fd922100 {
 		status = "ok";
+
+		qcom,hdmi-tx-mux-sel = <&pm8841_mpps 3 0>;
+		qcom,hdmi-tx-mux-en = <&pm8841_mpps 4 0>;
+	};
+
+	drv2667_vreg: drv2667_vdd_vreg {
+		compatible = "regulator-fixed";
+		regulator-name = "vdd_drv2667";
+	};
+
+	i2c@f9967000 {
+		ti-drv2667@59 {
+			compatible = "ti,drv2667";
+			reg = <0x59>;
+			vdd-supply = <&drv2667_vreg>;
+			vdd-i2c-supply = <&pm8941_s3>;
+			ti,label = "vibrator";
+			ti,gain = <2>;
+			ti,idle-timeout-ms = <20>;
+			ti,max-runtime-ms = <15000>;
+			ti,mode = <2>;
+			ti,wav-seq = [
+				/* wave form id */
+				01
+				/* header size, start and stop bytes */
+				05 80 06 00 09
+				/* repeat, amp, freq, duration, envelope */
+				01 ff 19 02 00];
+		};
 	};
 
 	i2c@f9924000 {
@@ -176,8 +208,15 @@
 		compatible = "regulator-fixed";
 		regulator-name = "ext_5v";
 		gpio = <&pm8941_mpps 2 0>;
+		startup-delay-us = <12000>;
 		enable-active-high;
 	};
+
+	sound {
+		qcom,model = "msm8974-taiko-liquid-snd-card";
+		qcom,ext-spk-amp-supply = <&ext_5v>;
+		qcom,ext-spk-amp-gpio = <&pm8841_mpps 1 0>;
+	};
 };
 
 &usb3 {
@@ -341,9 +380,25 @@
 	};
 
 	gpio@e000 { /* GPIO 33 */
+		qcom,mode = <1>;		/* QPNP_PIN_MODE_DIG_OUT */
+		qcom,output-type = <0>;		/* QPNP_PIN_OUT_BUF_CMOS */
+		qcom,pull = <5>;		/* QPNP_PIN_PULL_NO */
+		qcom,vin-sel = <2>;		/* QPNP_PIN_VIN2 */
+		qcom,out-strength = <2>;	/* QPNP_PIN_OUT_STRENGTH_MED */
+		qcom,src-sel = <0>;		/* QPNP_PIN_SEL_FUNC_CONSTANT */
+		qcom,invert = <1>;
+		qcom,master-en = <1>;
 	};
 
 	gpio@e100 { /* GPIO 34 */
+		qcom,mode = <1>;		/* QPNP_PIN_MODE_DIG_OUT */
+		qcom,output-type = <0>;		/* QPNP_PIN_OUT_BUF_CMOS */
+		qcom,pull = <5>;		/* QPNP_PIN_PULL_NO */
+		qcom,vin-sel = <2>;		/* QPNP_PIN_VIN2 */
+		qcom,out-strength = <2>;	/* QPNP_PIN_OUT_STRENGTH_MED */
+		qcom,src-sel = <0>;		/* QPNP_PIN_SEL_FUNC_CONSTANT */
+		qcom,invert = <0>;
+		qcom,master-en = <1>;
 	};
 
 	gpio@e200 { /* GPIO 35 */
@@ -408,6 +463,12 @@
 &pm8841_mpps {
 
 	mpp@a000 { /* MPP 1 */
+		/* CLASS_D_EN speakers PA */
+		qcom,mode = <1>; /* DIG_OUT */
+		qcom,output-type = <0>;  /* PNP_PIN_OUT_BUF_CMOS */
+		qcom,vin-sel = <2>; /* S3A 1.8v */
+		qcom,src-select = <0>; /* CONSTANT */
+		qcom,master-en = <1>; /* ENABLE MPP */
 	};
 
 	mpp@a100 { /* MPP 2 */
diff --git a/arch/arm/boot/dts/msm8974-mdss.dtsi b/arch/arm/boot/dts/msm8974-mdss.dtsi
index a51a38d..b765611 100644
--- a/arch/arm/boot/dts/msm8974-mdss.dtsi
+++ b/arch/arm/boot/dts/msm8974-mdss.dtsi
@@ -48,11 +48,10 @@
 		qcom,hdmi-tx-max-voltage-level = <0 1800000 1800000>;
 		qcom,hdmi-tx-op-mode = <0 1800000 0>;
 
-		gpios = <&msmgpio 31 0>,
-			<&msmgpio 32 0>,
-			<&msmgpio 33 0>,
-			<&msmgpio 34 0>;
-		qcom,hdmi-tx-gpio-names = "cec-pin", "hpd-ddc-clk", "hpd-ddc-data", "hpd-pin";
+		qcom,hdmi-tx-cec = <&msmgpio 31 0>;
+		qcom,hdmi-tx-ddc-clk = <&msmgpio 32 0>;
+		qcom,hdmi-tx-ddc-data = <&msmgpio 33 0>;
+		qcom,hdmi-tx-hpd = <&msmgpio 34 0>;
 	};
 
 	qcom,mdss_wb_panel {
diff --git a/arch/arm/boot/dts/msm8974-mtp.dtsi b/arch/arm/boot/dts/msm8974-mtp.dtsi
index 6553fc0..8563996 100644
--- a/arch/arm/boot/dts/msm8974-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8974-mtp.dtsi
@@ -11,6 +11,8 @@
  */
 
 /include/ "dsi-panel-toshiba-720p-video.dtsi"
+/include/ "msm8974-camera-sensor.dtsi"
+/include/ "msm8974-leds.dtsi"
 
 / {
 	serial@f991e000 {
@@ -152,6 +154,103 @@
 			vdd-phy-supply = <&spi_eth_vreg>;
 		};
 	};
+
+	sound {
+		compatible = "qcom,msm8974-audio-taiko";
+		qcom,model = "msm8974-taiko-mtp-snd-card";
+
+		qcom,audio-routing =
+			"RX_BIAS", "MCLK",
+			"LDO_H", "MCLK",
+			"Ext Spk Bottom Pos", "LINEOUT1",
+			"Ext Spk Bottom Neg", "LINEOUT3",
+			"Ext Spk Top Pos", "LINEOUT2",
+			"Ext Spk Top Neg", "LINEOUT4",
+			"AMIC1", "MIC BIAS1 Internal1",
+			"MIC BIAS1 Internal1", "Handset Mic",
+			"AMIC2", "MIC BIAS2 External",
+			"MIC BIAS2 External", "Headset Mic",
+			"AMIC3", "MIC BIAS2 External",
+			"MIC BIAS2 External", "ANCRight Headset Mic",
+			"AMIC4", "MIC BIAS2 External",
+			"MIC BIAS2 External", "ANCLeft Headset Mic",
+			"DMIC1", "MIC BIAS1 External",
+			"MIC BIAS1 External", "Digital Mic1",
+			"DMIC2", "MIC BIAS1 External",
+			"MIC BIAS1 External", "Digital Mic2",
+			"DMIC3", "MIC BIAS3 External",
+			"MIC BIAS3 External", "Digital Mic3",
+			"DMIC4", "MIC BIAS3 External",
+			"MIC BIAS3 External", "Digital Mic4",
+			"DMIC5", "MIC BIAS4 External",
+			"MIC BIAS4 External", "Digital Mic5",
+			"DMIC6", "MIC BIAS4 External",
+			"MIC BIAS4 External", "Digital Mic6";
+
+		qcom,cdc-mclk-gpios = <&pm8941_gpios 15 0>;
+		taiko-mclk-clk = <&pm8941_clkdiv1>;
+		qcom,taiko-mclk-clk-freq = <9600000>;
+	};
+};
+
+&spmi_bus {
+	qcom,pm8941@1 {
+		qcom,leds@d800 {
+			status = "okay";
+			qcom,wled_0 {
+				label = "wled";
+				linux,name = "wled:backlight";
+				linux,default-trigger = "bkl-trigger";
+				qcom,cs-out-en;
+				qcom,op-fdbck;
+				qcom,default-state = "off";
+				qcom,max-current = <25>;
+				qcom,ctrl-delay-us = <0>;
+				qcom,boost-curr-lim = <3>;
+				qcom,cp-sel = <0>;
+				qcom,switch-freq = <2>;
+				qcom,ovp-val = <2>;
+				qcom,num-strings = <1>;
+				qcom,id = <0>;
+			};
+		};
+
+		qcom,leds@d900 {
+			status = "disabled";
+		};
+
+		qcom,leds@da00 {
+			status = "disabled";
+		};
+
+		qcom,leds@db00 {
+			status = "disabled";
+		};
+
+		qcom,leds@dc00 {
+			status = "disabled";
+		};
+
+		qcom,leds@dd00 {
+			status = "disabled";
+		};
+
+		qcom,leds@de00 {
+			status = "disabled";
+		};
+
+		qcom,leds@df00 {
+			status = "disabled";
+		};
+
+		qcom,leds@e000 {
+			status = "disabled";
+		};
+
+		qcom,leds@e100 {
+			status = "disabled";
+		};
+	};
 };
 
 &sdcc2 {
@@ -175,6 +274,10 @@
 	qcom,otg-capability;
 };
 
+&pm8941_bms {
+	status = "ok";
+};
+
 &pm8941_chg {
 	status = "ok";
 
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index 495d3fb..3f7e9de 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -71,7 +71,6 @@
 
 	rpm-regulator-smpb2 {
 		status = "okay";
-		qcom,allow-atomic = <1>;
 		pm8841_s2: regulator-s2 {
 			regulator-min-microvolt = <500000>;
 			regulator-max-microvolt = <1050000>;
@@ -131,7 +130,6 @@
 
 	rpm-regulator-smpa2 {
 		status = "okay";
-		qcom,allow-atomic = <1>;
 		pm8941_s2: regulator-s2 {
 			regulator-min-microvolt = <2150000>;
 			regulator-max-microvolt = <2150000>;
@@ -284,7 +282,6 @@
 
 	rpm-regulator-ldoa12 {
 		status = "okay";
-		qcom,allow-atomic = <1>;
 		pm8941_l12: regulator-l12 {
 			parent-supply = <&pm8941_s2>;
 			regulator-min-microvolt = <1800000>;
diff --git a/arch/arm/boot/dts/msm8974-rumi.dtsi b/arch/arm/boot/dts/msm8974-rumi.dtsi
index 5a16be7..33656cd 100644
--- a/arch/arm/boot/dts/msm8974-rumi.dtsi
+++ b/arch/arm/boot/dts/msm8974-rumi.dtsi
@@ -10,6 +10,8 @@
  * GNU General Public License for more details.
  */
 
+/include/ "msm8974-camera-sensor.dtsi"
+
 / {
 	timer {
 		clock-frequency = <5000000>;
diff --git a/arch/arm/boot/dts/msm8974-sim.dtsi b/arch/arm/boot/dts/msm8974-sim.dtsi
index 41e37de..8e8b3c3 100644
--- a/arch/arm/boot/dts/msm8974-sim.dtsi
+++ b/arch/arm/boot/dts/msm8974-sim.dtsi
@@ -11,6 +11,7 @@
  */
 
 /include/ "dsi-panel-sim-video.dtsi"
+/include/ "msm8974-camera-sensor.dtsi"
 
 / {
 	qcom,mdss_dsi@fd922800 {
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 4a041df..19b8828 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -351,14 +351,12 @@
 
 	slim_msm: slim@fe12f000 {
 		cell-index = <1>;
-		compatible = "qcom,slim-msm";
+		compatible = "qcom,slim-ngd";
 		reg = <0xfe12f000 0x35000>,
 		      <0xfe104000 0x20000>;
 		reg-names = "slimbus_physical", "slimbus_bam_physical";
 		interrupts = <0 163 0 0 164 0>;
 		interrupt-names = "slimbus_irq", "slimbus_bam_irq";
-		qcom,min-clk-gear = <10>;
-		qcom,rxreg-access;
 
 		taiko_codec {
 			compatible = "qcom,taiko-slim-pgd";
@@ -594,116 +592,6 @@
 					 <0x1e50008a>, /* LPG_CHAN10 */
 					 <0x1e60008b>, /* LPG_CHAN11 */
 					 <0x1e70008c>; /* LPG_CHAN12 */
-
-		qcom,pm8941@1 {
-			qcom,leds@d300 {
-				status = "okay";
-				qcom,flash_0 {
-					qcom,max-current = <1000>;
-					qcom,default-state = "off";
-					qcom,headroom = <0>;
-					qcom,duration = <1280>;
-					qcom,clamp-curr = <200>;
-					qcom,startup-dly = <1>;
-					qcom,safety-timer;
-					label = "flash";
-					linux,default-trigger =
-						"flash0_trigger";
-					qcom,id = <1>;
-					linux,name = "led:flash_0";
-					qcom,current = <625>;
-				};
-
-				qcom,flash_1 {
-					qcom,max-current = <1000>;
-					qcom,default-state = "off";
-					qcom,headroom = <0>;
-					qcom,duration = <1280>;
-					qcom,clamp-curr = <200>;
-					qcom,startup-dly = <1>;
-					qcom,safety-timer;
-					linux,default-trigger =
-						"flash1_trigger";
-					label = "flash";
-					qcom,id = <2>;
-					linux,name = "led:flash_1";
-					qcom,current = <625>;
-				};
-			};
-
-			qcom,leds@d400 {
-				status = "disabled";
-			};
-
-			qcom,leds@d500 {
-				status = "disabled";
-			};
-
-			qcom,leds@d600 {
-				status = "disabled";
-			};
-
-			qcom,leds@d700 {
-				status = "disabled";
-			};
-
-			qcom,leds@d800 {
-				status = "okay";
-				qcom,wled_0 {
-					label = "wled";
-					linux,name = "wled:backlight";
-					linux,default-trigger = "bkl-trigger";
-					qcom,cs-out-en;
-					qcom,op-fdbck;
-					qcom,default-state = "off";
-					qcom,max-current = <25>;
-					qcom,ctrl-delay-us = <0>;
-					qcom,boost-curr-lim = <3>;
-					qcom,cp-sel = <0>;
-					qcom,switch-freq = <2>;
-					qcom,ovp-val = <2>;
-					qcom,num-strings = <1>;
-					qcom,id = <0>;
-				};
-			};
-
-			qcom,leds@d900 {
-				status = "disabled";
-			};
-
-			qcom,leds@da00 {
-				status = "disabled";
-			};
-
-			qcom,leds@db00 {
-				status = "disabled";
-			};
-
-			qcom,leds@dc00 {
-				status = "disabled";
-			};
-
-			qcom,leds@dd00 {
-				status = "disabled";
-			};
-
-			qcom,leds@de00 {
-				status = "disabled";
-			};
-
-			qcom,leds@df00 {
-				status = "disabled";
-			};
-
-			qcom,leds@e000 {
-				status = "disabled";
-			};
-
-			qcom,leds@e100 {
-				status = "disabled";
-			};
-
-		};
 	};
 
 	i2c@f9967000 { /* BLSP#11 */
@@ -778,13 +666,14 @@
 			  <0xfd4ab000 0x4>;
 		interrupts = <0 131 0>, <0 179 0>, <0 133 0>;
 		interrupt-names = "irq", "otg_irq", "hs_phy_irq";
-		SSUSB_VDDCX-supply = <&pm8841_s2>;
+		ssusb_vdd_dig-supply = <&pm8841_s2_corner>;
 		SSUSB_1p8-supply = <&pm8941_l6>;
-		HSUSB_VDDCX-supply = <&pm8841_s2>;
+		hsusb_vdd_dig-supply = <&pm8841_s2_corner>;
 		HSUSB_1p8-supply = <&pm8941_l6>;
 		HSUSB_3p3-supply = <&pm8941_l24>;
 		vbus_dwc3-supply = <&pm8941_mvs1>;
 		qcom,dwc-usb3-msm-dbm-eps = <4>;
+		qcom,vdd-voltage-level = <1 5 7>;
 
 		qcom,msm-bus,name = "usb3";
 		qcom,msm-bus,num-cases = <2>;
@@ -811,6 +700,7 @@
 
 	qcom,msm-adsp-loader {
 		compatible = "qcom,adsp-loader";
+		qcom,adsp-state = <0>;
 	};
 
 	qcom,msm-pcm {
@@ -987,7 +877,7 @@
 		interrupts = <0 24 1>;
 		vdd_mss-supply = <&pm8841_s3>;
 
-		qcom,is_loadable = <1>;
+		qcom,is-loadable;
 		qcom,firmware-name = "mba";
 		qcom,pil-self-auth = <1>;
 	};
@@ -1083,6 +973,8 @@
 
 	qcom,qseecom@fe806000 {
 		compatible = "qcom,qseecom";
+		reg = <0x7f00000 0x500000>;
+		reg-names = "secapp-region";
 		qcom,msm-bus,name = "qseecom-noc";
 		qcom,msm-bus,num-cases = <4>;
 		qcom,msm-bus,active-only = <0>;
@@ -1266,8 +1158,104 @@
         };
         qcom,msm-mem-hole {
                 compatible = "qcom,msm-mem-hole";
-                qcom,memblock-remove = <0x8400000 0x7b00000>; /* Address and Size of Hole */
+                qcom,memblock-remove = <0x7f00000 0x8000000>; /* Address and Size of Hole */
         };
+
+	qcom,smem@fa00000 {
+		compatible = "qcom,smem";
+		reg = <0xfa00000 0x200000>,
+			<0xfa006000 0x1000>,
+			<0xfc428000 0x4000>;
+		reg-names = "smem", "irq-reg-base", "aux-mem1";
+
+		qcom,smd-modem {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <0>;
+			qcom,smd-irq-offset = <0x8>;
+			qcom,smd-irq-bitmask = <0x1000>;
+			qcom,pil-string = "modem";
+			interrupts = <0 25 1>;
+		};
+
+		qcom,smsm-modem {
+			compatible = "qcom,smsm";
+			qcom,smsm-edge = <0>;
+			qcom,smsm-irq-offset = <0x8>;
+			qcom,smsm-irq-bitmask = <0x2000>;
+			interrupts = <0 26 1>;
+		};
+
+		qcom,smd-adsp {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <1>;
+			qcom,smd-irq-offset = <0x8>;
+			qcom,smd-irq-bitmask = <0x100>;
+			qcom,pil-string = "adsp";
+			interrupts = <0 156 1>;
+		};
+
+		qcom,smsm-adsp {
+			compatible = "qcom,smsm";
+			qcom,smsm-edge = <1>;
+			qcom,smsm-irq-offset = <0x8>;
+			qcom,smsm-irq-bitmask = <0x200>;
+			interrupts = <0 157 1>;
+		};
+
+		qcom,smd-wcnss {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <6>;
+			qcom,smd-irq-offset = <0x8>;
+			qcom,smd-irq-bitmask = <0x20000>;
+			qcom,pil-string = "wcnss";
+			interrupts = <0 142 1>;
+		};
+
+		qcom,smsm-wcnss {
+			compatible = "qcom,smsm";
+			qcom,smsm-edge = <6>;
+			qcom,smsm-irq-offset = <0x8>;
+			qcom,smsm-irq-bitmask = <0x80000>;
+			interrupts = <0 144 1>;
+		};
+
+		qcom,smd-rpm {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <15>;
+			qcom,smd-irq-offset = <0x8>;
+			qcom,smd-irq-bitmask = <0x1>;
+			interrupts = <0 168 1>;
+			qcom,irq-no-suspend;
+		};
+	};
+};
+
+&gdsc_venus {
+	status = "ok";
+};
+
+&gdsc_mdss {
+	status = "ok";
+};
+
+&gdsc_jpeg {
+	status = "ok";
+};
+
+&gdsc_vfe {
+	status = "ok";
+};
+
+&gdsc_oxili_gx {
+	status = "ok";
+};
+
+&gdsc_oxili_cx {
+	status = "ok";
+};
+
+&gdsc_usb_hsic {
+	status = "ok";
 };
 
 /include/ "msm-pm8x41-rpm-regulator.dtsi"
diff --git a/arch/arm/boot/dts/msm9625-cdp.dts b/arch/arm/boot/dts/msm9625-cdp.dts
index ba5bbcf..232fba7 100644
--- a/arch/arm/boot/dts/msm9625-cdp.dts
+++ b/arch/arm/boot/dts/msm9625-cdp.dts
@@ -37,6 +37,14 @@
 			summit,temperature-max = <3>; /* 45 C */
 		};
 	};
+
+	wlan0: qca,wlan {
+		cell-index = <0>;
+		compatible = "qca,ar6004-sdio";
+		qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
+		qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
+		qca,ar6004-vdd-io-supply = <&pm8019_l11>;
+	};
 };
 
 /* PM8019 GPIO and MPP configuration */
diff --git a/arch/arm/boot/dts/msm9625-mtp.dts b/arch/arm/boot/dts/msm9625-mtp.dts
index 7780686..faf86d4 100644
--- a/arch/arm/boot/dts/msm9625-mtp.dts
+++ b/arch/arm/boot/dts/msm9625-mtp.dts
@@ -37,6 +37,14 @@
 			summit,temperature-max = <3>; /* 45 C */
 		};
 	};
+
+	wlan0: qca,wlan {
+		cell-index = <0>;
+		compatible = "qca,ar6004-sdio";
+		qca,chip-pwd-l-gpios = <&msmgpio 62 0>;
+		qca,pm-enable-gpios = <&pm8019_gpios 3 0x0>;
+		qca,ar6004-vdd-io-supply = <&pm8019_l11>;
+	};
 };
 
 /* PM8019 GPIO and MPP configuration */
diff --git a/arch/arm/boot/dts/msm9625-pm.dtsi b/arch/arm/boot/dts/msm9625-pm.dtsi
index d62f7e7..2839864 100644
--- a/arch/arm/boot/dts/msm9625-pm.dtsi
+++ b/arch/arm/boot/dts/msm9625-pm.dtsi
@@ -115,6 +115,21 @@
 
 		qcom,lpm-level@3 {
 			reg = <0x3>;
+			qcom,mode = <3>;        /* MSM_PM_SLEEP_MODE_POWER_COLLAPSE */
+			qcom,xo = <1>;          /* ON */
+			qcom,l2 = <0>;          /* GDHS */
+			qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
+			qcom,vdd-mem-lower-bound = <950000>; /* SVS SOC */
+			qcom,vdd-dig-upper-bound = <4>; /* NORMAL */
+			qcom,vdd-dig-lower-bound = <3>;  /* SVS SOC */
+			qcom,latency-us = <4500>;
+			qcom,ss-power = <5000>;
+			qcom,energy-overhead = <60350000>;
+			qcom,time-overhead = <7300>;
+		};
+
+		qcom,lpm-level@4 {
+			reg = <0x4>;
 			qcom,mode= <3>;         /* MSM_PM_SLEEP_MODE_POWER_COLLAPSE */
 			qcom,xo = <0>;          /* OFF */
 			qcom,l2 = <0>;          /* OFF */
@@ -128,8 +143,8 @@
 			qcom,time-overhead = <13300>;
 		};
 
-		qcom,lpm-level@4 {
-			reg = <0x4>;
+		qcom,lpm-level@5 {
+			reg = <0x5>;
 			qcom,mode= <3>;         /* MSM_PM_SLEEP_MODE_POWER_COLLAPSE */
 			qcom,xo = <0>;          /* OFF */
 			qcom,l2 = <0>;          /* OFF */
diff --git a/arch/arm/boot/dts/msm9625-regulator.dtsi b/arch/arm/boot/dts/msm9625-regulator.dtsi
index b128648..24f616d 100644
--- a/arch/arm/boot/dts/msm9625-regulator.dtsi
+++ b/arch/arm/boot/dts/msm9625-regulator.dtsi
@@ -23,7 +23,6 @@
 
 	rpm-regulator-smpa2 {
 		status = "okay";
-		qcom,allow-atomic = <1>;
 		pm8019_s2: regulator-s2 {
 			regulator-min-microvolt = <1250000>;
 			regulator-max-microvolt = <1250000>;
diff --git a/arch/arm/boot/dts/msm9625.dtsi b/arch/arm/boot/dts/msm9625.dtsi
index 26f8ab7..d1c731e 100644
--- a/arch/arm/boot/dts/msm9625.dtsi
+++ b/arch/arm/boot/dts/msm9625.dtsi
@@ -88,6 +88,47 @@
 		reg = <0xfc42b0c8 0xc8>;
 	};
 
+	hsic@f9a15000 {
+		compatible = "qcom,hsic-host";
+		reg = <0xf9a15000 0x400>;
+		interrupts = <0 136 0>;
+		interrupt-names = "core_irq";
+		HSIC_VDDCX-supply = <&pm8019_l12>;
+		HSIC_GDSC-supply = <&gdsc_usb_hsic>;
+	};
+
+	qcom,usbbam@f9a44000 {
+		compatible = "qcom,usb-bam-msm";
+		reg = <0xf9a44000 0x11000>;
+		reg-names = "hsusb";
+		interrupts = <0 135 0>;
+		interrupt-names = "hsusb";
+		qcom,usb-active-bam = <1>;
+		qcom,usb-total-bam-num = <3>;
+		qcom,usb-bam-num-pipes = <16>;
+		qcom,ignore-core-reset-ack;
+
+		qcom,pipe0 {
+			label = "usb-to-ipa";
+			qcom,usb-bam-type = <1>;
+			qcom,usb-bam-mem-type = <2>;
+			qcom,src-bam-physical-address = <0xf9a44000>;
+			qcom,src-bam-pipe-index = <1>;
+			qcom,data-fifo-size = <0x600>;
+			qcom,descriptor-fifo-size = <0x300>;
+		};
+
+		qcom,pipe1 {
+			label = "ipa-to-usb";
+			qcom,usb-bam-type = <1>;
+			qcom,usb-bam-mem-type = <2>;
+			qcom,dst-bam-physical-address = <0xf9a44000>;
+			qcom,dst-bam-pipe-index = <0>;
+			qcom,data-fifo-size = <0x600>;
+			qcom,descriptor-fifo-size = <0x100>;
+		};
+	};
+
 	qcom,nand@f9ac0000 {
 		compatible = "qcom,msm-nand";
 		reg = <0xf9ac0000 0x1000>,
@@ -98,25 +139,26 @@
 		interrupt-names = "bam_irq";
 	};
 
-	spi@f9928000 {
+	spi@f9924000 {
+		cell-index = <0>;
 		compatible = "qcom,spi-qup-v2";
-		reg = <0xf9928000 0x1000>;
-		interrupts = <0 100 0>;
-		spi-max-frequency = <24000000>;
+		reg = <0xf9924000 0x1000>;
+		interrupts = <0 96 0>;
+		spi-max-frequency = <25000000>;
 		#address-cells = <1>;
 		#size-cells = <0>;
-		gpios = <&msmgpio 23 0>, /* CLK  */
-			<&msmgpio 21 0>, /* MISO */
-			<&msmgpio 20 0>; /* MOSI */
+		gpios = <&msmgpio 7 0>, /* CLK  */
+			<&msmgpio 5 0>, /* MISO */
+			<&msmgpio 4 0>; /* MOSI */
 
-		cs-gpios = <&msmgpio 69 0>;
+		cs-gpios = <&msmgpio 6 0>;
 
 		ethernet-switch@0 {
 			compatible = "simtec,ks8851";
 			reg = <0>;
 			interrupt-parent = <&msmgpio>;
 			interrupts = <75 0>;
-			spi-max-frequency = <5000000>;
+			spi-max-frequency = <4800000>;
 		};
 	};
 
@@ -244,6 +286,42 @@
 		interrupts = <0 29 1>;
 	};
 
+	qcom,ipa@fd4c0000 {
+		compatible = "qcom,ipa";
+		reg = <0xfd4c0000 0x26000>,
+		      <0xfd4c4000 0x14818>;
+		reg-names = "ipa-base", "bam-base";
+		interrupts = <0 252 0>,
+		             <0 253 0>;
+		interrupt-names = "ipa-irq", "bam-irq";
+
+		qcom,pipe1 {
+			label = "a2-to-ipa";
+			qcom,src-bam-physical-address = <0xfc834000>;
+			qcom,ipa-bam-mem-type = <0>;
+			qcom,src-bam-pipe-index = <1>;
+			qcom,dst-bam-physical-address = <0xfd4c0000>;
+			qcom,dst-bam-pipe-index = <6>;
+			qcom,data-fifo-offset = <0x1000>;
+			qcom,data-fifo-size = <0xd00>;
+			qcom,descriptor-fifo-offset = <0x1d00>;
+			qcom,descriptor-fifo-size = <0x300>;
+		};
+
+		qcom,pipe2 {
+			label = "ipa-to-a2";
+			qcom,src-bam-physical-address = <0xfd4c0000>;
+			qcom,ipa-bam-mem-type = <0>;
+			qcom,src-bam-pipe-index = <7>;
+			qcom,dst-bam-physical-address = <0xfc834000>;
+			qcom,dst-bam-pipe-index = <0>;
+			qcom,data-fifo-offset = <0x00>;
+			qcom,data-fifo-size = <0xd00>;
+			qcom,descriptor-fifo-offset = <0xd00>;
+			qcom,descriptor-fifo-size = <0x300>;
+		};
+	};
+
 	qcom,acpuclk@f9010000 {
 		compatible = "qcom,acpuclk-9625";
 		reg = <0xf9010008 0x10>,
@@ -268,6 +346,183 @@
 		qcom,sensors = <5>;
 		qcom,slope = <3200 3200 3200 3200 3200>;
 	};
+
+	qcom,msm-rng@f9bff000 {
+                compatible = "qcom,msm-rng";
+                reg = <0xf9bff000 0x200>;
+                qcom,msm-rng-iface-clk;
+	};
+
+	wcd9xxx_intc: wcd9xxx-irq {
+		compatible = "qcom,wcd9xxx-irq";
+		interrupt-controller;
+		#interrupt-cells = <1>;
+		interrupt-parent = <&msmgpio>;
+		interrupts = <20 0>;
+		interrupt-names = "cdc-int";
+	};
+
+	i2c@f9925000 {
+		cell-index = <3>;
+		compatible = "qcom,i2c-qup";
+		reg = <0xf9925000 0x1000>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "qup_phys_addr";
+		interrupts = <0 97 0>;
+		interrupt-names = "qup_err_intr";
+		qcom,i2c-bus-freq = <100000>;
+		qcom,i2c-src-freq = <24000000>;
+
+		wcd9xxx_codec@0d{
+			compatible = "qcom,wcd9xxx-i2c";
+			reg = <0x0d>;
+			qcom,cdc-reset-gpio = <&msmgpio 22 0>;
+			interrupt-parent = <&wcd9xxx_intc>;
+			interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28>;
+			cdc-vdd-buck-supply = <&pm8019_l11>;
+			qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
+			qcom,cdc-vdd-buck-current = <25000>;
+
+			cdc-vdd-tx-h-supply = <&pm8019_l11>;
+			qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>;
+			qcom,cdc-vdd-tx-h-current = <25000>;
+
+			cdc-vdd-rx-h-supply = <&pm8019_l11>;
+			qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>;
+			qcom,cdc-vdd-rx-h-current = <25000>;
+
+			cdc-vddpx-1-supply = <&pm8019_l11>;
+			qcom,cdc-vddpx-1-voltage = <1800000 1800000>;
+			qcom,cdc-vddpx-1-current = <10000>;
+
+			cdc-vdd-a-1p2v-supply = <&pm8019_l9>;
+			qcom,cdc-vdd-a-1p2v-voltage = <1200000 1200000>;
+			qcom,cdc-vdd-a-1p2v-current = <10000>;
+
+			cdc-vddcx-1-supply = <&pm8019_l9>;
+			qcom,cdc-vddcx-1-voltage = <1200000 1200000>;
+			qcom,cdc-vddcx-1-current = <10000>;
+
+			cdc-vddcx-2-supply = <&pm8019_l9>;
+			qcom,cdc-vddcx-2-voltage = <1200000 1200000>;
+			qcom,cdc-vddcx-2-current = <10000>;
+
+			qcom,cdc-micbias-ldoh-v = <0x3>;
+			qcom,cdc-micbias-cfilt1-mv = <1800>;
+			qcom,cdc-micbias-cfilt2-mv = <2700>;
+			qcom,cdc-micbias-cfilt3-mv = <1800>;
+			qcom,cdc-micbias1-cfilt-sel = <0x0>;
+			qcom,cdc-micbias2-cfilt-sel = <0x1>;
+			qcom,cdc-micbias3-cfilt-sel = <0x2>;
+			qcom,cdc-micbias4-cfilt-sel = <0x2>;
+		};
+
+		wcd9xxx_codec@77{
+			compatible = "qcom,wcd9xxx-i2c";
+			reg = <0x77>;
+		};
+
+		wcd9xxx_codec@66{
+			compatible = "qcom,wcd9xxx-i2c";
+			reg = <0x66>;
+		};
+
+		wcd9xxx_codec@55{
+			compatible = "qcom,wcd9xxx-i2c";
+			reg = <0x55>;
+		};
+	};
+
+	sound {
+		compatible = "qcom,mdm9625-audio-taiko";
+		qcom,model = "mdm9625-taiko-i2s-snd-card";
+
+		qcom,audio-routing =
+			"RX_BIAS", "MCLK",
+			"LDO_H", "MCLK",
+			"Ext Spk Bottom Pos", "LINEOUT1",
+			"Ext Spk Bottom Neg", "LINEOUT3",
+			"Ext Spk Top Pos", "LINEOUT2",
+			"Ext Spk Top Neg", "LINEOUT4",
+			"AMIC1", "MIC BIAS1 External",
+			"MIC BIAS1 External", "Handset Mic",
+			"AMIC2", "MIC BIAS2 External",
+			"MIC BIAS2 External", "Headset Mic",
+			"AMIC3", "MIC BIAS3 Internal1",
+			"MIC BIAS3 Internal1", "ANCRight Headset Mic",
+			"AMIC4", "MIC BIAS1 Internal2",
+			"MIC BIAS1 Internal2", "ANCLeft Headset Mic",
+			"DMIC1", "MIC BIAS1 External",
+			"MIC BIAS1 External", "Digital Mic1",
+			"DMIC2", "MIC BIAS1 External",
+			"MIC BIAS1 External", "Digital Mic2",
+			"DMIC3", "MIC BIAS3 External",
+			"MIC BIAS3 External", "Digital Mic3",
+			"DMIC4", "MIC BIAS3 External",
+			"MIC BIAS3 External", "Digital Mic4",
+			"DMIC5", "MIC BIAS4 External",
+			"MIC BIAS4 External", "Digital Mic5",
+			"DMIC6", "MIC BIAS4 External",
+			"MIC BIAS4 External", "Digital Mic6";
+			qcom,taiko-mclk-clk-freq = <12288000>;
+	};
+
+	qcom,msm-adsp-loader {
+		compatible = "qcom,adsp-loader";
+		qcom,adsp-state = <2>;
+	};
+
+	qcom,msm-pcm {
+		compatible = "qcom,msm-pcm-dsp";
+	};
+
+	qcom,msm-pcm-routing {
+		compatible = "qcom,msm-pcm-routing";
+	};
+
+	qcom,msm-compr-dsp {
+		compatible = "qcom,msm-compr-dsp";
+	};
+
+	qcom,msm-voip-dsp {
+		compatible = "qcom,msm-voip-dsp";
+	};
+
+	qcom,msm-pcm-voice {
+		compatible = "qcom,msm-pcm-voice";
+	};
+
+	qcom,msm-dai-fe {
+		compatible = "qcom,msm-dai-fe";
+	};
+
+	qcom,msm-pcm-afe {
+		compatible = "qcom,msm-pcm-afe";
+	};
+
+	qcom,msm-pcm-hostless {
+		compatible = "qcom,msm-pcm-hostless";
+	};
+
+	qcom,msm-dai-mi2s {
+		compatible = "qcom,msm-dai-mi2s";
+		qcom,msm-dai-q6-mi2s-prim {
+			compatible = "qcom,msm-dai-q6-mi2s";
+			qcom,msm-dai-q6-mi2s-dev-id = <0>;
+			qcom,msm-mi2s-rx-lines = <2>;
+			qcom,msm-mi2s-tx-lines = <1>;
+		};
+	};
+
+	qcom,msm-dai-q6 {
+		compatible = "qcom,msm-dai-q6";
+	};
+
+	qcom,mss {
+		compatible = "qcom,pil-q6v5-mss";
+		interrupts = <0 24 1>;
+	};
 };
 
 /include/ "msm-pm8019-rpm-regulator.dtsi"
@@ -329,10 +584,4 @@
 		qcom,hw-settle-time = <0>;
 		qcom,fast-avg-setup = <0>;
 	};
-
-	qcom,msm-rng@f9bff000 {
-		compatible = "qcom,msm-rng";
-		reg = <0xf9bff000 0x200>;
-		qcom,msm-rng-iface-clk;
-	};
 };
diff --git a/arch/arm/configs/msm8910_defconfig b/arch/arm/configs/msm8910_defconfig
index 5876a0f..2dd4b30 100644
--- a/arch/arm/configs/msm8910_defconfig
+++ b/arch/arm/configs/msm8910_defconfig
@@ -40,11 +40,13 @@
 # CONFIG_MSM_FIQ_SUPPORT is not set
 # CONFIG_MSM_PROC_COMM is not set
 CONFIG_MSM_SMD=y
+CONFIG_MSM_SMD_PKG4=y
 # CONFIG_MSM_HW3D is not set
 CONFIG_MSM_DIRECT_SCLK_ACCESS=y
+CONFIG_MSM_WATCHDOG_V2=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
-# CONFIG_SMP_ON_UP is not set
+CONFIG_SMP=y
 CONFIG_ARM_ARCH_TIMER=y
 CONFIG_HOTPLUG_CPU=y
 CONFIG_PREEMPT=y
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index 33c7718..d5e15f1 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -414,7 +414,6 @@
 CONFIG_USB_EHCI_MSM_HOST4=y
 CONFIG_USB_ACM=y
 CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DEBUG=y
 CONFIG_USB_STORAGE_DATAFAB=y
 CONFIG_USB_STORAGE_FREECOM=y
 CONFIG_USB_STORAGE_ISD200=y
@@ -464,7 +463,6 @@
 CONFIG_ANDROID_RAM_CONSOLE=y
 CONFIG_ANDROID_TIMED_GPIO=y
 CONFIG_ANDROID_LOW_MEMORY_KILLER=y
-CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES=y
 CONFIG_MSM_SSBI=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_BAMDMA=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index 8c11368..386f311 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -417,7 +417,6 @@
 CONFIG_USB_EHCI_MSM_HOST4=y
 CONFIG_USB_ACM=y
 CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DEBUG=y
 CONFIG_USB_STORAGE_DATAFAB=y
 CONFIG_USB_STORAGE_FREECOM=y
 CONFIG_USB_STORAGE_ISD200=y
@@ -467,7 +466,6 @@
 CONFIG_ANDROID_RAM_CONSOLE=y
 CONFIG_ANDROID_TIMED_GPIO=y
 CONFIG_ANDROID_LOW_MEMORY_KILLER=y
-CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES=y
 CONFIG_MSM_SSBI=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_BAMDMA=y
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index 5e1fa4a..f468fe0 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -37,7 +37,6 @@
 CONFIG_EFI_PARTITION=y
 CONFIG_ARCH_MSM=y
 CONFIG_ARCH_MSM8974=y
-CONFIG_ARCH_MSM8226=y
 CONFIG_MSM_KRAIT_TBB_ABORT_HANDLER=y
 # CONFIG_MSM_STACKED_MEMORY is not set
 CONFIG_CPU_HAS_L2_PMU=y
@@ -75,6 +74,7 @@
 CONFIG_STRICT_MEMORY_RWX=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
 # CONFIG_SMP_ON_UP is not set
 CONFIG_ARM_ARCH_TIMER=y
 CONFIG_PREEMPT=y
@@ -217,6 +217,7 @@
 CONFIG_GENLOCK_MISCDEVICE=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
+CONFIG_TI_DRV2667=y
 CONFIG_QSEECOM=y
 CONFIG_SCSI=y
 CONFIG_SCSI_TGT=y
@@ -264,12 +265,13 @@
 CONFIG_SPMI=y
 CONFIG_SPMI_MSM_PMIC_ARB=y
 CONFIG_MSM_QPNP_INT=y
-CONFIG_SLIMBUS_MSM_CTRL=y
+CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_DEBUG_GPIO=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_QPNP_PIN=y
 CONFIG_GPIO_QPNP_PIN_DEBUG=y
 CONFIG_POWER_SUPPLY=y
+CONFIG_SMB350_CHARGER=y
 CONFIG_BATTERY_BQ28400=y
 CONFIG_QPNP_CHARGER=y
 CONFIG_BATTERY_BCL=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index 4d68a72..973eef9 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -219,6 +219,7 @@
 CONFIG_GENLOCK_MISCDEVICE=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
+CONFIG_TI_DRV2667=y
 CONFIG_QSEECOM=y
 CONFIG_SCSI=y
 CONFIG_SCSI_TGT=y
@@ -266,7 +267,7 @@
 CONFIG_SPMI=y
 CONFIG_SPMI_MSM_PMIC_ARB=y
 CONFIG_MSM_QPNP_INT=y
-CONFIG_SLIMBUS_MSM_CTRL=y
+CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_DEBUG_GPIO=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_QPNP_PIN=y
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
index 2b73a3e..1a8bbfc 100644
--- a/arch/arm/configs/msm9625_defconfig
+++ b/arch/arm/configs/msm9625_defconfig
@@ -42,6 +42,9 @@
 CONFIG_MSM_IPC_ROUTER=y
 CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
 CONFIG_MSM_RPM_REGULATOR_SMD=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
 CONFIG_MSM_DIRECT_SCLK_ACCESS=y
 CONFIG_MSM_WATCHDOG_V2=y
 CONFIG_MSM_DLOAD_MODE=y
@@ -109,9 +112,9 @@
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_WLAN is not set
 # CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_EVDEV=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_UINPUT=y
 CONFIG_INPUT_GPIO=m
@@ -121,9 +124,11 @@
 CONFIG_SERIAL_MSM_HSL_CONSOLE=y
 CONFIG_DIAG_CHAR=y
 CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM=y
 CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_QUP=y
+CONFIG_MSM_BUS_SCALING=y
 CONFIG_SPI=y
 CONFIG_SPI_QUP=y
 CONFIG_SPI_SPIDEV=m
@@ -163,6 +168,7 @@
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_DRV_MSM is not set
 CONFIG_RTC_DRV_QPNP=y
+CONFIG_IPA=y
 CONFIG_SPS=y
 CONFIG_USB_BAM=y
 CONFIG_SPS_SUPPORT_BAMDMA=y
@@ -203,3 +209,56 @@
 CONFIG_LIBCRC32C=y
 CONFIG_ENABLE_DEFAULT_TRACERS=y
 CONFIG_MSM_QDSS=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER_XT_MARK=y
+CONFIG_NETFILTER_XT_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_IP_SET=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP_NF_TARGET_ULOG=y
+CONFIG_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_TARGET_ECN=y
+CONFIG_IP_NF_TARGET_TTL=y
+CONFIG_IP_NF_RAW=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_WCD9320_CODEC=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MDM9625=y
+CONFIG_MSM_ADSP_LOADER=m
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index cd5be28..f705388 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -37,6 +37,7 @@
 #define MT_MEMORY_RW		16
 #define MT_MEMORY_RX		17
 #define MT_MEMORY_DMA_READY	18
+#define MT_DEVICE_USER_ACCESSIBLE	19
 
 #ifdef CONFIG_MMU
 extern void iotable_init(struct map_desc *, int);
diff --git a/arch/arm/include/asm/user_accessible_timer.h b/arch/arm/include/asm/user_accessible_timer.h
new file mode 100644
index 0000000..c6d7bd4
--- /dev/null
+++ b/arch/arm/include/asm/user_accessible_timer.h
@@ -0,0 +1,49 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARM_KERNEL_USER_ACCESSIBLE_TIMER_H_
+#define _ARM_KERNEL_USER_ACCESSIBLE_TIMER_H_
+
+#define ARM_USER_ACCESSIBLE_TIMERS_INVALID_PAGE -1
+
+extern unsigned long zero_pfn;
+
+#ifdef CONFIG_ARM_USE_USER_ACCESSIBLE_TIMERS
+#ifndef CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE
+#define CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE 0xfffef000
+#endif
+extern void setup_user_timer_offset(unsigned long addr);
+extern int get_timer_page_address(void);
+static inline int get_user_accessible_timers_base(void)
+{
+	return CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE;
+}
+extern void set_user_accessible_timer_flag(bool flag);
+#else
+#define CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE 0
+static inline void setup_user_timer_offset(unsigned long addr)
+{
+}
+static inline int get_timer_page_address(void)
+{
+	return ARM_USER_ACCESSIBLE_TIMERS_INVALID_PAGE;
+}
+static inline int get_user_accessible_timers_base(void)
+{
+	return 0;
+}
+static inline void set_user_accessible_timer_flag(bool flag)
+{
+}
+#endif
+
+#endif
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 22b0f1e..fc00a23 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -63,6 +63,7 @@
 obj-$(CONFIG_SWP_EMULATE)	+= swp_emulate.o
 CFLAGS_swp_emulate.o		:= -Wa,-march=armv7-a
 obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= hw_breakpoint.o
+obj-$(CONFIG_GENERIC_TIME_VSYSCALL)	+= update_vsyscall_arm.o
 
 obj-$(CONFIG_CPU_XSCALE)	+= xscale-cp0.o
 obj-$(CONFIG_CPU_XSC3)		+= xscale-cp0.o
@@ -73,6 +74,7 @@
 obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o
 AFLAGS_iwmmxt.o			:= -Wa,-mcpu=iwmmxt
 obj-$(CONFIG_ARM_CPU_TOPOLOGY)  += topology.o
+obj-$(CONFIG_ARM_USE_USER_ACCESSIBLE_TIMERS)	+= user_accessible_timer.o
 
 ifneq ($(CONFIG_ARCH_EBSA110),y)
   obj-y		+= io.o
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 7a8c2d6..ddd421c 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -764,6 +764,97 @@
 	.align	5
 	.globl	__kuser_helper_start
 __kuser_helper_start:
+#ifdef GENERIC_TIME_VSYSCALL
+/*
+ * Reference declaration:
+ *
+ *      extern struct timezone __kernel_helper_gtod_timezone
+ *      extern unsigned int __kernel_helper_gtod_seqnum
+ *
+ *  Definition and user space usage example:
+ *
+ *      #define __kernel_helper_gtod_timezone (*(unsigned int*)0xffff0f20)
+ *      #define __kernel_helper_gtod_seqnum   (*(unsigned int*)0xffff0f28)
+ *
+ *      unsigned int prelock, postlock ;
+ *      do {
+ *          prelock = __kernel_helper_gtod_seqnum;
+ *          memcpy(&tz, (void*)&(__kernel_helper_gtod_timezone),
+ *                     sizeof(struct timezone)) ;
+ *          postlock = __kernel_helper_gtod_seqnum;
+ *      } while (prelock != postlock);
+ *
+ * 0xffff0f20-3: tz_minuteswest
+ * 0xffff0f24-7: tz_dsttime
+ * 0xffff0f28-b: sequence #.
+ * 0xffff0f30-3: offset into CONFIG_USER_ACCESSIBLE_TIMER_BASE to get the timer.
+ * 0xffff0f34-7: Feature flag
+ * 0xffff0f38-b: wall-to-mononic: tv_sec
+ * 0xffff0f3c-f: wall-to-mononic: tv_nsec
+ */
+	.globl  __kuser_gtod_timezone
+__kuser_gtod_timezone: @0xffff0f20
+	.word	0
+	.word	0
+	.word	0
+	.word	0
+	.word	0
+	/* This offset is where the flag to enable the
+	 * user accessible timers is located.
+	 */
+	.word	0
+	.word	0
+	.word	0
+	.align	5
+
+/*
+ * Reference declaration:
+ *
+ *      extern struct timeval __kernel_helper_gtod_timeval
+ *      extern unsigned int __kernel_helper_gtod_seqnum
+ *
+ *  Definition and user space usage example:
+ *
+ *      #define __kernel_helper_gtod_timeval (*(unsigned int*)0xffff0f40)
+ *      #define __kernel_helper_gtod_seqnum   (*(unsigned int*)0xffff0f48)
+ *
+ *      unsigned int prelock, postlock ;
+ *      struct gtod {
+ *          uint64_t  cycle_last;
+ *          uint64_t  mask;
+ *          uint32_t  mult;
+ *          uint32_t  shift;
+ *          uint32_t  tv_sec;
+ *          uint32_t  tv_nsec;
+ *      };
+ *      struct gtod gdtod;
+ *
+ *      do {
+ *          prelock = __kernel_helper_gtod_seqnum;
+ *          memcpy(&gdtod, (void*)&(__kernel_helper_gtod_timeval),
+ *                     sizeof(struct gtod)) ;
+ *          postlock = __kernel_helper_gtod_seqnum;
+ *      } while (prelock != postlock);
+ *
+ * 0xffff0f40-7: cycle_last
+ * 0xffff0f48-f: mask
+ * 0xffff0f50-3: mult
+ * 0xffff0f54-7: shift
+ * 0xffff0f58-b: tv_sec
+ * 0xffff0f5c-f: tv_nsec
+ */
+	.globl  __kuser_gtod_timeval
+__kuser_gtod_timeval:  @0xffff0f40
+	.word	0
+	.word	0
+	.word	0
+	.word	0
+	.word	0
+	.word	0
+	.word	0
+	.word	0
+	.align	5
+#endif
 
 /*
  * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index af21496..ac17480 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -680,6 +680,11 @@
 
 const char *arch_vma_name(struct vm_area_struct *vma)
 {
-	return (vma == &gate_vma) ? "[vectors]" : NULL;
+	if (vma == &gate_vma)
+		return "[vectors]";
+	else if (vma == get_user_timers_vma(NULL))
+		return "[timers]";
+	else
+		return NULL;
 }
 #endif
diff --git a/arch/arm/kernel/update_vsyscall_arm.c b/arch/arm/kernel/update_vsyscall_arm.c
new file mode 100644
index 0000000..51f47ae
--- /dev/null
+++ b/arch/arm/kernel/update_vsyscall_arm.c
@@ -0,0 +1,100 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/clocksource.h>
+#include <linux/time.h>
+#include "update_vsyscall_arm.h"
+/*
+ * See entry-armv.S for the offsets into the kernel user helper for
+ * these fields.
+ */
+#define ARM_VSYSCALL_TIMER_TZ			0xf20
+#define ARM_VSYSCALL_TIMER_SEQ			0xf28
+#define ARM_VSYSCALL_TIMER_OFFSET		0xf30
+#define ARM_VSYSCALL_TIMER_WTM_TV_SEC		0xf38
+#define ARM_VSYSCALL_TIMER_WTM_TV_NSEC		0xf3c
+#define ARM_VSYSCALL_TIMER_CYCLE_LAST		0xf40
+#define ARM_VSYSCALL_TIMER_MASK			0xf48
+#define ARM_VSYSCALL_TIMER_MULT			0xf50
+#define ARM_VSYSCALL_TIMER_SHIFT		0xf54
+#define ARM_VSYSCALL_TIMER_TV_SEC		0xf58
+#define ARM_VSYSCALL_TIMER_TV_NSEC		0xf5c
+
+struct kernel_gtod_t {
+	u64  cycle_last;
+	u64  mask;
+	u32  mult;
+	u32  shift;
+	u32  tv_sec;
+	u32  tv_nsec;
+};
+
+struct kernel_tz_t {
+	u32  tz_minuteswest;
+	u32  tz_dsttime;
+};
+
+struct kernel_wtm_t {
+	u32  tv_sec;
+	u32  tv_nsec;
+};
+
+/*
+ * Updates the kernel user helper area with the current timespec
+ * data, as well as additional fields needed to calculate
+ * gettimeofday, clock_gettime, etc.
+ */
+void
+update_vsyscall(struct timespec *ts, struct timespec *wtm,
+	struct clocksource *c, u32 mult)
+{
+	unsigned long vectors = (unsigned long)vectors_page;
+	unsigned long flags;
+	unsigned *seqnum = (unsigned *)(vectors + ARM_VSYSCALL_TIMER_SEQ);
+	struct kernel_gtod_t *dgtod = (struct kernel_gtod_t *)(vectors +
+		ARM_VSYSCALL_TIMER_CYCLE_LAST);
+	struct kernel_wtm_t *dgwtm = (struct kernel_wtm_t *)(vectors +
+		ARM_VSYSCALL_TIMER_WTM_TV_SEC);
+
+	write_seqlock_irqsave(&kuh_time_lock, flags);
+	*seqnum = kuh_time_lock.sequence;
+	dgtod->cycle_last = c->cycle_last;
+	dgtod->mask = c->mask;
+	dgtod->mult = c->mult;
+	dgtod->shift = c->shift;
+	dgtod->tv_sec = ts->tv_sec;
+	dgtod->tv_nsec = ts->tv_nsec;
+	dgwtm->tv_sec = wtm->tv_sec;
+	dgwtm->tv_nsec = wtm->tv_nsec;
+	*seqnum = kuh_time_lock.sequence + 1;
+	write_sequnlock_irqrestore(&kuh_time_lock, flags);
+}
+EXPORT_SYMBOL(update_vsyscall);
+
+void
+update_vsyscall_tz(void)
+{
+	unsigned long vectors = (unsigned long)vectors_page;
+	unsigned long flags;
+	unsigned *seqnum = (unsigned *)(vectors + ARM_VSYSCALL_TIMER_SEQ);
+	struct kernel_tz_t *dgtod = (struct kernel_tz_t *)(vectors +
+		ARM_VSYSCALL_TIMER_TZ);
+
+	write_seqlock_irqsave(&kuh_time_lock, flags);
+	*seqnum = kuh_time_lock.sequence;
+	dgtod->tz_minuteswest = sys_tz.tz_minuteswest;
+	dgtod->tz_dsttime = sys_tz.tz_dsttime;
+	*seqnum = kuh_time_lock.sequence + 1;
+	write_sequnlock_irqrestore(&kuh_time_lock, flags);
+}
+EXPORT_SYMBOL(update_vsyscall_tz);
diff --git a/arch/arm/kernel/update_vsyscall_arm.h b/arch/arm/kernel/update_vsyscall_arm.h
new file mode 100644
index 0000000..d06ca56
--- /dev/null
+++ b/arch/arm/kernel/update_vsyscall_arm.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/export.h>
+#include <linux/clocksource.h>
+#include <linux/time.h>
+
+extern void *vectors_page;
+extern struct timezone sys_tz;
+
+/*
+ * This read-write spinlock protects us from races in SMP while
+ * updating the kernel user helper-embedded time.
+ */
+__cacheline_aligned_in_smp DEFINE_SEQLOCK(kuh_time_lock);
diff --git a/arch/arm/kernel/user_accessible_timer.c b/arch/arm/kernel/user_accessible_timer.c
new file mode 100644
index 0000000..c550c03
--- /dev/null
+++ b/arch/arm/kernel/user_accessible_timer.c
@@ -0,0 +1,132 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <asm/user_accessible_timer.h>
+#include <asm/traps.h>
+
+#define USER_ACCESS_TIMER_OFFSET	0xf30
+#define USER_ACCESS_FEATURE_OFFSET	0xf34
+#define USER_ACCESS_FEATURE_FLAG	0xffff0f20
+
+static struct vm_area_struct user_timers_vma;
+static int __init user_timers_vma_init(void)
+{
+	user_timers_vma.vm_start        = CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE;
+	user_timers_vma.vm_end          = CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE
+						+ PAGE_SIZE;
+	user_timers_vma.vm_page_prot    = PAGE_READONLY;
+	user_timers_vma.vm_flags        = VM_READ | VM_MAYREAD;
+	return 0;
+}
+arch_initcall(user_timers_vma_init);
+
+int in_user_timers_area(struct mm_struct *mm, unsigned long addr)
+{
+	return (addr >= user_timers_vma.vm_start) &&
+		(addr < user_timers_vma.vm_end);
+}
+EXPORT_SYMBOL(in_user_timers_area);
+
+struct vm_area_struct *get_user_timers_vma(struct mm_struct *mm)
+{
+	return &user_timers_vma;
+}
+EXPORT_SYMBOL(get_user_timers_vma);
+
+int get_user_timer_page(struct vm_area_struct *vma,
+	struct mm_struct *mm, unsigned long start, unsigned int gup_flags,
+	struct page **pages, int idx, int *goto_next_page)
+{
+	/* Replicates the earlier work done in mm/memory.c */
+	unsigned long pg = start & PAGE_MASK;
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *pte;
+
+	/* Unset this flag -- this only gets activated if the
+	 * caller should go straight to the next_page label on
+	 * return.
+	 */
+	*goto_next_page = 0;
+
+	/* user gate pages are read-only */
+	if (gup_flags & FOLL_WRITE)
+		return idx ? : -EFAULT;
+	if (pg > TASK_SIZE)
+		pgd = pgd_offset_k(pg);
+	else
+		pgd = pgd_offset_gate(mm, pg);
+	BUG_ON(pgd_none(*pgd));
+	pud = pud_offset(pgd, pg);
+	BUG_ON(pud_none(*pud));
+	pmd = pmd_offset(pud, pg);
+	if (pmd_none(*pmd))
+		return idx ? : -EFAULT;
+	VM_BUG_ON(pmd_trans_huge(*pmd));
+	pte = pte_offset_map(pmd, pg);
+	if (pte_none(*pte)) {
+		pte_unmap(pte);
+		return idx ? : -EFAULT;
+	}
+	vma = get_user_timers_vma(mm);
+	if (pages) {
+		struct page *page;
+
+		page = vm_normal_page(vma, start, *pte);
+		if (!page) {
+			if (!(gup_flags & FOLL_DUMP) &&
+				zero_pfn == pte_pfn(*pte))
+				page = pte_page(*pte);
+			else {
+				pte_unmap(pte);
+				return idx ? : -EFAULT;
+			}
+		}
+		pages[idx] = page;
+		get_page(page);
+	}
+	pte_unmap(pte);
+	/* In this case, set the next page */
+	*goto_next_page = 1;
+	return 0;
+}
+EXPORT_SYMBOL(get_user_timer_page);
+
+void setup_user_timer_offset(unsigned long addr)
+{
+#if defined(CONFIG_CPU_USE_DOMAINS)
+	unsigned long vectors = CONFIG_VECTORS_BASE;
+#else
+	unsigned long vectors = (unsigned long)vectors_page;
+#endif
+	unsigned long *timer_offset = (unsigned long *)(vectors +
+		USER_ACCESS_TIMER_OFFSET);
+	*timer_offset = addr;
+}
+EXPORT_SYMBOL(setup_user_timer_offset);
+
+void set_user_accessible_timer_flag(bool flag)
+{
+#if defined(CONFIG_CPU_USE_DOMAINS)
+	unsigned long vectors = CONFIG_VECTORS_BASE;
+#else
+	unsigned long vectors = (unsigned long)vectors_page;
+#endif
+	unsigned long *timer_offset = (unsigned long *)(vectors +
+		USER_ACCESS_FEATURE_OFFSET);
+	*timer_offset = (flag ? USER_ACCESS_FEATURE_FLAG : 0);
+}
+EXPORT_SYMBOL(set_user_accessible_timer_flag);
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index dbb4328..887fcbf 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -177,6 +177,10 @@
 	select ARM_HAS_SG_CHAIN
 	select MSM_KRAIT_WFE_FIXUP
 	select MSM_ULTRASOUND_A
+	select GENERIC_TIME_VSYSCALL
+	select USE_USER_ACCESSIBLE_TIMERS
+	select ARM_USE_USER_ACCESSIBLE_TIMERS
+	select MSM_USE_USER_ACCESSIBLE_TIMERS
 
 config ARCH_MSM8930
 	bool "MSM8930"
@@ -209,6 +213,10 @@
 	select HOLES_IN_ZONE if SPARSEMEM
 	select ARM_HAS_SG_CHAIN
 	select MSM_KRAIT_WFE_FIXUP
+	select GENERIC_TIME_VSYSCALL
+	select USE_USER_ACCESSIBLE_TIMERS
+	select ARM_USE_USER_ACCESSIBLE_TIMERS
+	select MSM_USE_USER_ACCESSIBLE_TIMERS
 
 config ARCH_APQ8064
 	bool "APQ8064"
@@ -237,6 +245,10 @@
 	select ARM_HAS_SG_CHAIN
 	select MSM_KRAIT_WFE_FIXUP
 	select MSM_ULTRASOUND_A
+	select GENERIC_TIME_VSYSCALL
+	select USE_USER_ACCESSIBLE_TIMERS
+	select ARM_USE_USER_ACCESSIBLE_TIMERS
+	select MSM_USE_USER_ACCESSIBLE_TIMERS
 
 config ARCH_MSM8974
 	bool "MSM8974"
@@ -267,6 +279,7 @@
 	select MEMORY_HOLE_CARVEOUT
 	select MSM_RPM_STATS_LOG
 	select QMI_ENCDEC
+	select DONT_MAP_HOLE_AFTER_MEMBANK0
 
 config ARCH_MPQ8092
 	bool "MPQ8092"
@@ -349,12 +362,14 @@
 	select SPARSE_IRQ
 	select MSM_MULTIMEDIA_USE_ION
 	select MSM_RPM_STATS_LOG
+	select MSM_QDSP6_APRV2
+	select MSM_QDSP6V2_CODECS
+	select MSM_AUDIO_QDSP6V2 if SND_SOC
 
 config ARCH_MSM8910
 	bool "MSM8910"
 	select ARM_GIC
 	select GIC_SECURE
-	select SMP
 	select ARCH_MSM_CORTEXMP
 	select CPU_V7
 	select MSM_SCM if SMP
@@ -370,7 +385,6 @@
 	bool "MSM8226"
 	select ARM_GIC
 	select GIC_SECURE
-	select SMP
 	select ARCH_MSM_CORTEXMP
 	select CPU_V7
 	select MSM_SCM if SMP
@@ -379,6 +393,8 @@
 	select MULTI_IRQ_HANDLER
 	select GPIO_MSM_V3
 	select MSM_GPIOMUX
+	select MSM_NATIVE_RESTART
+	select MSM_RESTART_V2
 endmenu
 
 choice
@@ -2661,4 +2677,12 @@
 	  if apps is not responding and holding lock with irqs disabled.
 	  Modem will then generate an raise a FIQ on this line before sending
 	  SMSM reset.
+
+config MSM_USE_USER_ACCESSIBLE_TIMERS
+	bool "Enables mapping an MSM timer counter page to user space."
+	depends on ARM_USE_USER_ACCESSIBLE_TIMERS
+	help
+	  Enables MSM-specific user accessible timers via a shared
+	  memory page containing the cycle counter.
+
 endif
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 88ed433..2c7424e 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -46,15 +46,15 @@
 endif
 
 obj-$(CONFIG_SMP) += headsmp.o
+ifdef CONFIG_ARCH_MSM_CORTEXMP
 ifdef CONFIG_ARCH_MSM8625
 	obj-$(CONFIG_SMP) += platsmp-8625.o
 else
-ifdef CONFIG_ARCH_MSM8910
 	obj-$(CONFIG_SMP) += platsmp-8910.o
+endif
 else
 	obj-$(CONFIG_SMP) += platsmp.o
 endif
-endif
 obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
 
 obj-$(CONFIG_MSM_CPU_AVS) += avs.o
@@ -282,6 +282,7 @@
 obj-$(CONFIG_MACH_MSM8930_MTP) += board-8930-all.o board-8930-regulator-pm8038.o board-8930-regulator-pm8917.o
 obj-$(CONFIG_MACH_MSM8930_FLUID) += board-8930-all.o board-8930-regulator-pm8038.o board-8930-regulator-pm8917.o
 obj-$(CONFIG_PM8921_BMS) += bms-batterydata.o bms-batterydata-desay.o batterydata-lib.o
+obj-$(CONFIG_QPNP_BMS) += bms-batterydata.o bms-batterydata-desay.o batterydata-lib.o
 obj-$(CONFIG_MACH_APQ8064_CDP) += board-8064-all.o board-8064-regulator.o
 obj-$(CONFIG_MACH_APQ8064_MTP) += board-8064-all.o board-8064-regulator.o
 obj-$(CONFIG_MACH_APQ8064_LIQUID) += board-8064-all.o board-8064-regulator.o
@@ -294,10 +295,11 @@
 obj-$(CONFIG_ARCH_MSM8974) += clock-local2.o clock-pll.o clock-8974.o clock-rpm.o clock-voter.o clock-mdss-8974.o
 obj-$(CONFIG_ARCH_MSM8974) += gdsc.o
 obj-$(CONFIG_ARCH_MSM9625) += gdsc.o
+obj-$(CONFIG_ARCH_MSM8226) += gdsc.o
 obj-$(CONFIG_ARCH_MSM8974) += krait-regulator.o
 obj-$(CONFIG_ARCH_MSM9625) += board-9625.o board-9625-gpiomux.o
 obj-$(CONFIG_ARCH_MSM9625) += clock-local2.o clock-pll.o clock-9625.o clock-rpm.o clock-voter.o acpuclock-9625.o
-obj-$(CONFIG_ARCH_MSM8930) += acpuclock-8930.o acpuclock-8627.o acpuclock-8930aa.o
+obj-$(CONFIG_ARCH_MSM8930) += acpuclock-8930.o acpuclock-8627.o acpuclock-8930aa.o acpuclock-8930ab.o
 obj-$(CONFIG_ARCH_MPQ8092) += board-8092.o board-8092-gpiomux.o
 obj-$(CONFIG_ARCH_MSM8226) += board-8226.o board-8226-gpiomux.o
 obj-$(CONFIG_ARCH_MSM8910) += board-8910.o board-8910-gpiomux.o
@@ -397,6 +399,8 @@
 
 obj-$(CONFIG_MSM_ENABLE_WDOG_DEBUG_CONTROL) += wdog_debug.o
 
+obj-$(CONFIG_MSM_USE_USER_ACCESSIBLE_TIMERS) += timer_page.o
+
 ifdef CONFIG_MSM_CPR
 obj-$(CONFIG_DEBUG_FS) += msm_cpr-debug.o
 endif
diff --git a/arch/arm/mach-msm/acpuclock-8064.c b/arch/arm/mach-msm/acpuclock-8064.c
index cda952f..5c1d3e1 100644
--- a/arch/arm/mach-msm/acpuclock-8064.c
+++ b/arch/arm/mach-msm/acpuclock-8064.c
@@ -213,6 +213,32 @@
 	{ 0, { 0 } }
 };
 
+static struct acpu_level tbl_faster[] __initdata = {
+	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   850000 },
+	{ 0, {   432000, HFPLL, 2, 0x20 }, L2(6),   875000 },
+	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(6),   875000 },
+	{ 0, {   540000, HFPLL, 2, 0x28 }, L2(6),   900000 },
+	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(6),   900000 },
+	{ 0, {   648000, HFPLL, 1, 0x18 }, L2(6),   925000 },
+	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(6),   925000 },
+	{ 0, {   756000, HFPLL, 1, 0x1C }, L2(6),   962500 },
+	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(6),   962500 },
+	{ 0, {   864000, HFPLL, 1, 0x20 }, L2(6),   975000 },
+	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(6),   975000 },
+	{ 0, {   972000, HFPLL, 1, 0x24 }, L2(6),  1000000 },
+	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(6),  1000000 },
+	{ 0, {  1080000, HFPLL, 1, 0x28 }, L2(15), 1050000 },
+	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15), 1050000 },
+	{ 0, {  1188000, HFPLL, 1, 0x2C }, L2(15), 1075000 },
+	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15), 1075000 },
+	{ 0, {  1296000, HFPLL, 1, 0x30 }, L2(15), 1100000 },
+	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15), 1100000 },
+	{ 0, {  1404000, HFPLL, 1, 0x34 }, L2(15), 1112500 },
+	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15), 1112500 },
+	{ 1, {  1512000, HFPLL, 1, 0x38 }, L2(15), 1125000 },
+	{ 0, { 0 } }
+};
+
 static struct acpu_level tbl_PVS0_1700MHz[] __initdata = {
 	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   950000 },
 	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(6),   975000 },
@@ -233,40 +259,40 @@
 };
 
 static struct acpu_level tbl_PVS0_2000MHz[] __initdata = {
-	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   900000 },
-	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(6),   900000 },
-	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(6),   900000 },
-	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(6),   900000 },
-	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(6),   912500 },
-	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(6),   962500 },
-	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(6),   987500 },
-	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15), 1012500 },
-	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15), 1025000 },
-	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15), 1075000 },
-	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15), 1112500 },
-	{ 1, {  1566000, HFPLL, 1, 0x3A }, L2(15), 1150000 },
-	{ 1, {  1674000, HFPLL, 1, 0x3E }, L2(15), 1200000 },
-	{ 1, {  1782000, HFPLL, 1, 0x42 }, L2(15), 1262500 },
-	{ 1, {  1890000, HFPLL, 1, 0x46 }, L2(15), 1300000 },
+	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   950000 },
+	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(6),   950000 },
+	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(6),   950000 },
+	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(6),   950000 },
+	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(6),   962500 },
+	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(6),   975000 },
+	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(6),  1000000 },
+	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15), 1025000 },
+	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15), 1037500 },
+	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15), 1062500 },
+	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15), 1100000 },
+	{ 1, {  1566000, HFPLL, 1, 0x3A }, L2(15), 1125000 },
+	{ 1, {  1674000, HFPLL, 1, 0x3E }, L2(15), 1175000 },
+	{ 1, {  1782000, HFPLL, 1, 0x42 }, L2(15), 1225000 },
+	{ 1, {  1890000, HFPLL, 1, 0x46 }, L2(15), 1287500 },
 	{ 0, { 0 } }
 };
 
 static struct acpu_level tbl_PVS1_2000MHz[] __initdata = {
-	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   900000 },
-	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(6),   900000 },
-	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(6),   900000 },
-	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(6),   900000 },
-	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(6),   900000 },
-	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(6),   962500 },
-	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(6),   987500 },
+	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   925000 },
+	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(6),   925000 },
+	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(6),   925000 },
+	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(6),   925000 },
+	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(6),   937500 },
+	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(6),   950000 },
+	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(6),   975000 },
 	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15), 1000000 },
 	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15), 1012500 },
-	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15), 1062500 },
-	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15), 1087500 },
-	{ 1, {  1566000, HFPLL, 1, 0x3A }, L2(15), 1125000 },
-	{ 1, {  1674000, HFPLL, 1, 0x3E }, L2(15), 1187500 },
-	{ 1, {  1782000, HFPLL, 1, 0x42 }, L2(15), 1237500 },
-	{ 1, {  1890000, HFPLL, 1, 0x46 }, L2(15), 1275000 },
+	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15), 1037500 },
+	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15), 1075000 },
+	{ 1, {  1566000, HFPLL, 1, 0x3A }, L2(15), 1100000 },
+	{ 1, {  1674000, HFPLL, 1, 0x3E }, L2(15), 1137500 },
+	{ 1, {  1782000, HFPLL, 1, 0x42 }, L2(15), 1187500 },
+	{ 1, {  1890000, HFPLL, 1, 0x46 }, L2(15), 1250000 },
 	{ 0, { 0 } }
 };
 
@@ -275,17 +301,17 @@
 	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(6),   900000 },
 	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(6),   900000 },
 	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(6),   900000 },
-	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(6),   900000 },
-	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(6),   950000 },
-	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(6),   975000 },
-	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15),  987500 },
-	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15), 1000000 },
-	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15), 1050000 },
-	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15), 1075000 },
-	{ 1, {  1566000, HFPLL, 1, 0x3A }, L2(15), 1112500 },
-	{ 1, {  1674000, HFPLL, 1, 0x3E }, L2(15), 1162500 },
-	{ 1, {  1782000, HFPLL, 1, 0x42 }, L2(15), 1212500 },
-	{ 1, {  1890000, HFPLL, 1, 0x46 }, L2(15), 1250000 },
+	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(6),   912500 },
+	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(6),   925000 },
+	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(6),   950000 },
+	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15),  975000 },
+	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15),  987500 },
+	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15), 1012500 },
+	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15), 1050000 },
+	{ 1, {  1566000, HFPLL, 1, 0x3A }, L2(15), 1075000 },
+	{ 1, {  1674000, HFPLL, 1, 0x3E }, L2(15), 1112500 },
+	{ 1, {  1782000, HFPLL, 1, 0x42 }, L2(15), 1162500 },
+	{ 1, {  1890000, HFPLL, 1, 0x46 }, L2(15), 1212500 },
 	{ 0, { 0 } }
 };
 
@@ -295,44 +321,44 @@
 	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(6),   900000 },
 	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(6),   900000 },
 	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(6),   900000 },
-	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(6),   925000 },
-	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(6),   950000 },
+	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(6),   912500 },
+	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(6),   937500 },
 	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15),  962500 },
 	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15),  975000 },
-	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15), 1012500 },
-	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15), 1037500 },
-	{ 1, {  1566000, HFPLL, 1, 0x3A }, L2(15), 1075000 },
-	{ 1, {  1674000, HFPLL, 1, 0x3E }, L2(15), 1112500 },
-	{ 1, {  1782000, HFPLL, 1, 0x42 }, L2(15), 1162500 },
-	{ 1, {  1890000, HFPLL, 1, 0x46 }, L2(15), 1200000 },
+	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15), 1000000 },
+	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15), 1025000 },
+	{ 1, {  1566000, HFPLL, 1, 0x3A }, L2(15), 1050000 },
+	{ 1, {  1674000, HFPLL, 1, 0x3E }, L2(15), 1087500 },
+	{ 1, {  1782000, HFPLL, 1, 0x42 }, L2(15), 1137500 },
+	{ 1, {  1890000, HFPLL, 1, 0x46 }, L2(15), 1175000 },
 	{ 0, { 0 } }
 };
 
 static struct acpu_level tbl_PVS4_2000MHz[] __initdata = {
-	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   900000 },
-	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(6),   900000 },
-	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(6),   900000 },
-	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(6),   900000 },
-	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(6),   900000 },
+	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   875000 },
+	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(6),   875000 },
+	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(6),   875000 },
+	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(6),   875000 },
+	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(6),   887500 },
 	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(6),   900000 },
 	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(6),   925000 },
-	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15),  937500 },
-	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15),  950000 },
+	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15),  950000 },
+	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15),  962500 },
 	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15),  975000 },
 	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15), 1000000 },
 	{ 1, {  1566000, HFPLL, 1, 0x3A }, L2(15), 1037500 },
-	{ 1, {  1674000, HFPLL, 1, 0x3E }, L2(15), 1062500 },
+	{ 1, {  1674000, HFPLL, 1, 0x3E }, L2(15), 1075000 },
 	{ 1, {  1782000, HFPLL, 1, 0x42 }, L2(15), 1112500 },
 	{ 1, {  1890000, HFPLL, 1, 0x46 }, L2(15), 1150000 },
 	{ 0, { 0 } }
 };
 
 static struct acpu_level tbl_PVS5_2000MHz[] __initdata = {
-	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   900000 },
-	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(6),   900000 },
-	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(6),   900000 },
-	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(6),   900000 },
-	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(6),   900000 },
+	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   875000 },
+	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(6),   875000 },
+	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(6),   875000 },
+	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(6),   875000 },
+	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(6),   887500 },
 	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(6),   900000 },
 	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(6),   925000 },
 	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15),  937500 },
@@ -340,18 +366,18 @@
 	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15),  962500 },
 	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15),  987500 },
 	{ 1, {  1566000, HFPLL, 1, 0x3A }, L2(15), 1012500 },
-	{ 1, {  1674000, HFPLL, 1, 0x3E }, L2(15), 1037500 },
+	{ 1, {  1674000, HFPLL, 1, 0x3E }, L2(15), 1050000 },
 	{ 1, {  1782000, HFPLL, 1, 0x42 }, L2(15), 1087500 },
 	{ 1, {  1890000, HFPLL, 1, 0x46 }, L2(15), 1125000 },
 	{ 0, { 0 } }
 };
 
 static struct acpu_level tbl_PVS6_2000MHz[] __initdata = {
-	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   900000 },
-	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(6),   900000 },
-	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(6),   900000 },
-	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(6),   900000 },
-	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(6),   900000 },
+	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   875000 },
+	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(6),   875000 },
+	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(6),   875000 },
+	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(6),   875000 },
+	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(6),   887500 },
 	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(6),   900000 },
 	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(6),   925000 },
 	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15),  937500 },
@@ -369,7 +395,7 @@
 	[0][PVS_SLOW]    = {tbl_slow, sizeof(tbl_slow),     0 },
 	[0][PVS_NOMINAL] = {tbl_nom,  sizeof(tbl_nom),  25000 },
 	[0][PVS_FAST]    = {tbl_fast, sizeof(tbl_fast), 25000 },
-	[0][PVS_FASTER]  = {tbl_fast, sizeof(tbl_fast), 25000 },
+	[0][PVS_FASTER]  = {tbl_faster, sizeof(tbl_faster), 25000 },
 
 	[1][0] = { tbl_PVS0_1700MHz, sizeof(tbl_PVS0_1700MHz),     0 },
 	[1][1] = { tbl_PVS0_1700MHz, sizeof(tbl_PVS0_1700MHz),     0 },
diff --git a/arch/arm/mach-msm/acpuclock-8930ab.c b/arch/arm/mach-msm/acpuclock-8930ab.c
new file mode 100644
index 0000000..764ae41
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock-8930ab.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <mach/rpm-regulator.h>
+#include <mach/msm_bus_board.h>
+#include <mach/msm_bus.h>
+
+#include "acpuclock.h"
+#include "acpuclock-krait.h"
+
+/* Corner type vreg VDD values */
+#define LVL_NONE	RPM_VREG_CORNER_NONE
+#define LVL_LOW		RPM_VREG_CORNER_LOW
+#define LVL_NOM		RPM_VREG_CORNER_NOMINAL
+#define LVL_HIGH	RPM_VREG_CORNER_HIGH
+
+static struct hfpll_data hfpll_data __initdata = {
+	.mode_offset = 0x00,
+	.l_offset = 0x08,
+	.m_offset = 0x0C,
+	.n_offset = 0x10,
+	.config_offset = 0x04,
+	.config_val = 0x7845C665,
+	.has_droop_ctl = true,
+	.droop_offset = 0x14,
+	.droop_val = 0x0108C000,
+	.low_vdd_l_max = 37,
+	.nom_vdd_l_max = 74,
+	.vdd[HFPLL_VDD_NONE] = LVL_NONE,
+	.vdd[HFPLL_VDD_LOW]  = LVL_LOW,
+	.vdd[HFPLL_VDD_NOM]  = LVL_NOM,
+	.vdd[HFPLL_VDD_HIGH] = LVL_HIGH,
+};
+
+static struct scalable scalable_pm8917[] __initdata = {
+	[CPU0] = {
+		.hfpll_phys_base = 0x00903200,
+		.aux_clk_sel_phys = 0x02088014,
+		.aux_clk_sel = 3,
+		.sec_clk_sel = 2,
+		.l2cpmr_iaddr = 0x4501,
+		.vreg[VREG_CORE] = { "krait0", 1300000 },
+		.vreg[VREG_MEM]  = { "krait0_mem", 1150000 },
+		.vreg[VREG_DIG]  = { "krait0_dig", 1150000 },
+		.vreg[VREG_HFPLL_A] = { "krait0_s8", 2050000 },
+		.vreg[VREG_HFPLL_B] = { "krait0_l23", 1800000 },
+	},
+	[CPU1] = {
+		.hfpll_phys_base = 0x00903300,
+		.aux_clk_sel_phys = 0x02098014,
+		.aux_clk_sel = 3,
+		.sec_clk_sel = 2,
+		.l2cpmr_iaddr = 0x5501,
+		.vreg[VREG_CORE] = { "krait1", 1300000 },
+		.vreg[VREG_MEM]  = { "krait1_mem", 1150000 },
+		.vreg[VREG_DIG]  = { "krait1_dig", 1150000 },
+		.vreg[VREG_HFPLL_A] = { "krait1_s8", 2050000 },
+		.vreg[VREG_HFPLL_B] = { "krait1_l23", 1800000 },
+	},
+	[L2] = {
+		.hfpll_phys_base = 0x00903400,
+		.aux_clk_sel_phys = 0x02011028,
+		.aux_clk_sel = 3,
+		.sec_clk_sel = 2,
+		.l2cpmr_iaddr = 0x0500,
+		.vreg[VREG_HFPLL_A] = { "l2_s8", 2050000 },
+		.vreg[VREG_HFPLL_B] = { "l2_l23", 1800000 },
+	},
+};
+
+static struct scalable scalable[] __initdata = {
+	[CPU0] = {
+		.hfpll_phys_base = 0x00903200,
+		.aux_clk_sel_phys = 0x02088014,
+		.aux_clk_sel = 3,
+		.sec_clk_sel = 2,
+		.l2cpmr_iaddr = 0x4501,
+		.vreg[VREG_CORE] = { "krait0", 1300000 },
+		.vreg[VREG_MEM]  = { "krait0_mem", 1150000 },
+		.vreg[VREG_DIG]  = { "krait0_dig", 1150000 },
+		.vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
+	},
+	[CPU1] = {
+		.hfpll_phys_base = 0x00903300,
+		.aux_clk_sel_phys = 0x02098014,
+		.aux_clk_sel = 3,
+		.sec_clk_sel = 2,
+		.l2cpmr_iaddr = 0x5501,
+		.vreg[VREG_CORE] = { "krait1", 1300000 },
+		.vreg[VREG_MEM]  = { "krait1_mem", 1150000 },
+		.vreg[VREG_DIG]  = { "krait1_dig", 1150000 },
+		.vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
+	},
+	[L2] = {
+		.hfpll_phys_base = 0x00903400,
+		.aux_clk_sel_phys = 0x02011028,
+		.aux_clk_sel = 3,
+		.sec_clk_sel = 2,
+		.l2cpmr_iaddr = 0x0500,
+		.vreg[VREG_HFPLL_A] = { "l2_hfpll", 1800000 },
+	},
+};
+
+static struct msm_bus_paths bw_level_tbl[] __initdata = {
+	[0] =  BW_MBPS(640), /* At least  80 MHz on bus. */
+	[1] = BW_MBPS(1064), /* At least 133 MHz on bus. */
+	[2] = BW_MBPS(1600), /* At least 200 MHz on bus. */
+	[3] = BW_MBPS(2128), /* At least 266 MHz on bus. */
+	[4] = BW_MBPS(3200), /* At least 400 MHz on bus. */
+	[5] = BW_MBPS(4800), /* At least 600 MHz on bus. */
+};
+
+static struct msm_bus_scale_pdata bus_scale_data __initdata = {
+	.usecase = bw_level_tbl,
+	.num_usecases = ARRAY_SIZE(bw_level_tbl),
+	.active_only = 1,
+	.name = "acpuclk-8930ab",
+};
+
+/* TODO: Update new L2 freqs once they are available */
+static struct l2_level l2_freq_tbl[] __initdata = {
+	[0]  = { {  384000, PLL_8, 0, 0x00 },  LVL_NOM, 1050000, 1 },
+	[1]  = { {  432000, HFPLL, 2, 0x20 },  LVL_NOM, 1050000, 2 },
+	[2]  = { {  486000, HFPLL, 2, 0x24 },  LVL_NOM, 1050000, 2 },
+	[3]  = { {  540000, HFPLL, 2, 0x28 },  LVL_NOM, 1050000, 2 },
+	[4]  = { {  594000, HFPLL, 1, 0x16 },  LVL_NOM, 1050000, 2 },
+	[5]  = { {  648000, HFPLL, 1, 0x18 },  LVL_NOM, 1050000, 4 },
+	[6]  = { {  702000, HFPLL, 1, 0x1A },  LVL_NOM, 1050000, 4 },
+	[7]  = { {  756000, HFPLL, 1, 0x1C }, LVL_HIGH, 1150000, 4 },
+	[8]  = { {  810000, HFPLL, 1, 0x1E }, LVL_HIGH, 1150000, 4 },
+	[9]  = { {  864000, HFPLL, 1, 0x20 }, LVL_HIGH, 1150000, 4 },
+	[10] = { {  918000, HFPLL, 1, 0x22 }, LVL_HIGH, 1150000, 5 },
+	[11] = { {  972000, HFPLL, 1, 0x24 }, LVL_HIGH, 1150000, 5 },
+	[12] = { { 1026000, HFPLL, 1, 0x26 }, LVL_HIGH, 1150000, 5 },
+	[13] = { { 1080000, HFPLL, 1, 0x28 }, LVL_HIGH, 1150000, 5 },
+	[14] = { { 1134000, HFPLL, 1, 0x2A }, LVL_HIGH, 1150000, 5 },
+	[15] = { { 1188000, HFPLL, 1, 0x2C }, LVL_HIGH, 1150000, 5 },
+	{ }
+};
+
+static struct acpu_level acpu_freq_tbl_slow[] __initdata = {
+	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   950000 },
+	{ 1, {   432000, HFPLL, 2, 0x20 }, L2(5),   975000 },
+	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(5),   975000 },
+	{ 1, {   540000, HFPLL, 2, 0x28 }, L2(5),  1000000 },
+	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(5),  1000000 },
+	{ 1, {   648000, HFPLL, 1, 0x18 }, L2(5),  1025000 },
+	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(5),  1025000 },
+	{ 1, {   756000, HFPLL, 1, 0x1C }, L2(10), 1075000 },
+	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(10), 1075000 },
+	{ 1, {   864000, HFPLL, 1, 0x20 }, L2(10), 1100000 },
+	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(10), 1100000 },
+	{ 1, {   972000, HFPLL, 1, 0x24 }, L2(10), 1125000 },
+	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(10), 1125000 },
+	{ 1, {  1080000, HFPLL, 1, 0x28 }, L2(15), 1175000 },
+	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15), 1175000 },
+	{ 1, {  1188000, HFPLL, 1, 0x2C }, L2(15), 1200000 },
+	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15), 1200000 },
+	{ 1, {  1296000, HFPLL, 1, 0x30 }, L2(15), 1225000 },
+	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15), 1225000 },
+	{ 1, {  1404000, HFPLL, 1, 0x34 }, L2(15), 1237500 },
+	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15), 1237500 },
+	{ 1, {  1512000, HFPLL, 1, 0x38 }, L2(15), 1250000 },
+	{ 1, {  1566000, HFPLL, 1, 0x3A }, L2(15), 1250000 },
+	{ 1, {  1620000, HFPLL, 1, 0x3C }, L2(15), 1262500 },
+	{ 1, {  1674000, HFPLL, 1, 0x3E }, L2(15), 1262500 },
+	{ 1, {  1728000, HFPLL, 1, 0x40 }, L2(15), 1287500 },
+	{ 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_nom[] __initdata = {
+	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   950000 },
+	{ 1, {   432000, HFPLL, 2, 0x20 }, L2(5),   975000 },
+	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(5),   975000 },
+	{ 1, {   540000, HFPLL, 2, 0x28 }, L2(5),  1000000 },
+	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(5),  1000000 },
+	{ 1, {   648000, HFPLL, 1, 0x18 }, L2(5),  1025000 },
+	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(5),  1025000 },
+	{ 1, {   756000, HFPLL, 1, 0x1C }, L2(10), 1075000 },
+	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(10), 1075000 },
+	{ 1, {   864000, HFPLL, 1, 0x20 }, L2(10), 1100000 },
+	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(10), 1100000 },
+	{ 1, {   972000, HFPLL, 1, 0x24 }, L2(10), 1125000 },
+	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(10), 1125000 },
+	{ 1, {  1080000, HFPLL, 1, 0x28 }, L2(15), 1175000 },
+	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15), 1175000 },
+	{ 1, {  1188000, HFPLL, 1, 0x2C }, L2(15), 1200000 },
+	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15), 1200000 },
+	{ 1, {  1296000, HFPLL, 1, 0x30 }, L2(15), 1225000 },
+	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15), 1225000 },
+	{ 1, {  1404000, HFPLL, 1, 0x34 }, L2(15), 1237500 },
+	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15), 1237500 },
+	{ 1, {  1512000, HFPLL, 1, 0x38 }, L2(15), 1250000 },
+	{ 1, {  1566000, HFPLL, 1, 0x3A }, L2(15), 1250000 },
+	{ 1, {  1620000, HFPLL, 1, 0x3C }, L2(15), 1262500 },
+	{ 1, {  1674000, HFPLL, 1, 0x3E }, L2(15), 1262500 },
+	{ 1, {  1728000, HFPLL, 1, 0x40 }, L2(15), 1287500 },
+	{ 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_fast[] __initdata = {
+	{ 1, {   384000, PLL_8, 0, 0x00 }, L2(0),   950000 },
+	{ 1, {   432000, HFPLL, 2, 0x20 }, L2(5),   975000 },
+	{ 1, {   486000, HFPLL, 2, 0x24 }, L2(5),   975000 },
+	{ 1, {   540000, HFPLL, 2, 0x28 }, L2(5),  1000000 },
+	{ 1, {   594000, HFPLL, 1, 0x16 }, L2(5),  1000000 },
+	{ 1, {   648000, HFPLL, 1, 0x18 }, L2(5),  1025000 },
+	{ 1, {   702000, HFPLL, 1, 0x1A }, L2(5),  1025000 },
+	{ 1, {   756000, HFPLL, 1, 0x1C }, L2(10), 1075000 },
+	{ 1, {   810000, HFPLL, 1, 0x1E }, L2(10), 1075000 },
+	{ 1, {   864000, HFPLL, 1, 0x20 }, L2(10), 1100000 },
+	{ 1, {   918000, HFPLL, 1, 0x22 }, L2(10), 1100000 },
+	{ 1, {   972000, HFPLL, 1, 0x24 }, L2(10), 1125000 },
+	{ 1, {  1026000, HFPLL, 1, 0x26 }, L2(10), 1125000 },
+	{ 1, {  1080000, HFPLL, 1, 0x28 }, L2(15), 1175000 },
+	{ 1, {  1134000, HFPLL, 1, 0x2A }, L2(15), 1175000 },
+	{ 1, {  1188000, HFPLL, 1, 0x2C }, L2(15), 1200000 },
+	{ 1, {  1242000, HFPLL, 1, 0x2E }, L2(15), 1200000 },
+	{ 1, {  1296000, HFPLL, 1, 0x30 }, L2(15), 1225000 },
+	{ 1, {  1350000, HFPLL, 1, 0x32 }, L2(15), 1225000 },
+	{ 1, {  1404000, HFPLL, 1, 0x34 }, L2(15), 1237500 },
+	{ 1, {  1458000, HFPLL, 1, 0x36 }, L2(15), 1237500 },
+	{ 1, {  1512000, HFPLL, 1, 0x38 }, L2(15), 1250000 },
+	{ 1, {  1566000, HFPLL, 1, 0x3A }, L2(15), 1250000 },
+	{ 1, {  1620000, HFPLL, 1, 0x3C }, L2(15), 1262500 },
+	{ 1, {  1674000, HFPLL, 1, 0x3E }, L2(15), 1262500 },
+	{ 1, {  1728000, HFPLL, 1, 0x40 }, L2(15), 1287500 },
+	{ 0, { 0 } }
+};
+
+/* TODO: Update boost voltage once the pvs data is available */
+static struct pvs_table pvs_tables[NUM_SPEED_BINS][NUM_PVS] __initdata = {
+[0][PVS_SLOW]    = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow),     0 },
+[0][PVS_NOMINAL] = { acpu_freq_tbl_nom,  sizeof(acpu_freq_tbl_nom),      0 },
+[0][PVS_FAST]    = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast),     0 },
+};
+
+static struct acpuclk_krait_params acpuclk_8930ab_params __initdata = {
+	.scalable = scalable,
+	.scalable_size = sizeof(scalable),
+	.hfpll_data = &hfpll_data,
+	.pvs_tables = pvs_tables,
+	.l2_freq_tbl = l2_freq_tbl,
+	.l2_freq_tbl_size = sizeof(l2_freq_tbl),
+	.bus_scale = &bus_scale_data,
+	.pte_efuse_phys = 0x007000C0,
+	.stby_khz = 384000,
+};
+
+static int __init acpuclk_8930ab_probe(struct platform_device *pdev)
+{
+	struct acpuclk_platform_data *pdata = pdev->dev.platform_data;
+	if (pdata && pdata->uses_pm8917)
+		acpuclk_8930ab_params.scalable = scalable_pm8917;
+
+	return acpuclk_krait_init(&pdev->dev, &acpuclk_8930ab_params);
+}
+
+static struct platform_driver acpuclk_8930ab_driver = {
+	.driver = {
+		.name = "acpuclk-8930ab",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init acpuclk_8930ab_init(void)
+{
+	return platform_driver_probe(&acpuclk_8930ab_driver,
+				     acpuclk_8930ab_probe);
+}
+device_initcall(acpuclk_8930ab_init);
diff --git a/arch/arm/mach-msm/acpuclock-8974.c b/arch/arm/mach-msm/acpuclock-8974.c
index 61213cf..0fbd6dc 100644
--- a/arch/arm/mach-msm/acpuclock-8974.c
+++ b/arch/arm/mach-msm/acpuclock-8974.c
@@ -113,14 +113,14 @@
 };
 
 static struct l2_level l2_freq_tbl[] __initdata = {
-	[0]  = { {  300000, PLL_0, 0,   0 }, LVL_LOW,   950000, 0 },
-	[1]  = { {  384000, HFPLL, 2,  40 }, LVL_NOM,   950000, 1 },
-	[2]  = { {  460800, HFPLL, 2,  48 }, LVL_NOM,   950000, 1 },
-	[3]  = { {  537600, HFPLL, 1,  28 }, LVL_NOM,   950000, 2 },
-	[4]  = { {  576000, HFPLL, 1,  30 }, LVL_NOM,   950000, 2 },
-	[5]  = { {  652800, HFPLL, 1,  34 }, LVL_NOM,   950000, 2 },
-	[6]  = { {  729600, HFPLL, 1,  38 }, LVL_NOM,   950000, 2 },
-	[7]  = { {  806400, HFPLL, 1,  42 }, LVL_NOM,   950000, 2 },
+	[0]  = { {  300000, PLL_0, 0,   0 }, LVL_LOW,  1050000, 0 },
+	[1]  = { {  345600, HFPLL, 2,  36 }, LVL_NOM,  1050000, 1 },
+	[2]  = { {  422400, HFPLL, 2,  44 }, LVL_NOM,  1050000, 1 },
+	[3]  = { {  499200, HFPLL, 2,  52 }, LVL_NOM,  1050000, 2 },
+	[4]  = { {  576000, HFPLL, 1,  30 }, LVL_NOM,  1050000, 2 },
+	[5]  = { {  652800, HFPLL, 1,  34 }, LVL_NOM,  1050000, 2 },
+	[6]  = { {  729600, HFPLL, 1,  38 }, LVL_NOM,  1050000, 2 },
+	[7]  = { {  806400, HFPLL, 1,  42 }, LVL_NOM,  1050000, 2 },
 	[8]  = { {  883200, HFPLL, 1,  46 }, LVL_HIGH, 1050000, 2 },
 	[9]  = { {  960000, HFPLL, 1,  50 }, LVL_HIGH, 1050000, 2 },
 	[10] = { { 1036800, HFPLL, 1,  54 }, LVL_HIGH, 1050000, 3 },
@@ -143,30 +143,30 @@
 };
 
 static struct acpu_level acpu_freq_tbl[] __initdata = {
-	{ 1, {  300000, PLL_0, 0,   0 }, L2(0),   950000,  100000 },
-	{ 1, {  384000, HFPLL, 2,  40 }, L2(3),   950000, 3200000 },
-	{ 1, {  460800, HFPLL, 2,  48 }, L2(3),   950000, 3200000 },
-	{ 1, {  537600, HFPLL, 1,  28 }, L2(5),   950000, 3200000 },
-	{ 1, {  576000, HFPLL, 1,  30 }, L2(5),   950000, 3200000 },
-	{ 1, {  652800, HFPLL, 1,  34 }, L2(5),   950000, 3200000 },
-	{ 1, {  729600, HFPLL, 1,  38 }, L2(5),   950000, 3200000 },
-	{ 1, {  806400, HFPLL, 1,  42 }, L2(7),   950000, 3200000 },
-	{ 1, {  883200, HFPLL, 1,  46 }, L2(7),   950000, 3200000 },
-	{ 1, {  960000, HFPLL, 1,  50 }, L2(7),   950000, 3200000 },
-	{ 1, { 1036800, HFPLL, 1,  54 }, L2(7),   950000, 3200000 },
-	{ 1, { 1113600, HFPLL, 1,  58 }, L2(12), 1050000, 3200000 },
-	{ 1, { 1190400, HFPLL, 1,  62 }, L2(12), 1050000, 3200000 },
-	{ 1, { 1267200, HFPLL, 1,  66 }, L2(12), 1050000, 3200000 },
-	{ 1, { 1344000, HFPLL, 1,  70 }, L2(15), 1050000, 3200000 },
-	{ 1, { 1420800, HFPLL, 1,  74 }, L2(15), 1050000, 3200000 },
-	{ 1, { 1497600, HFPLL, 1,  78 }, L2(16), 1050000, 3200000 },
-	{ 0, { 1574400, HFPLL, 1,  82 }, L2(20), 1050000, 3200000 },
-	{ 0, { 1651200, HFPLL, 1,  86 }, L2(20), 1050000, 3200000 },
-	{ 0, { 1728000, HFPLL, 1,  90 }, L2(20), 1050000, 3200000 },
-	{ 0, { 1804800, HFPLL, 1,  94 }, L2(25), 1050000, 3200000 },
-	{ 0, { 1881600, HFPLL, 1,  98 }, L2(25), 1050000, 3200000 },
-	{ 0, { 1958400, HFPLL, 1, 102 }, L2(25), 1050000, 3200000 },
-	{ 0, { 1996800, HFPLL, 1, 104 }, L2(25), 1050000, 3200000 },
+	{ 1, {  300000, PLL_0, 0,   0 }, L2(0),   850000,  100000 },
+	{ 0, {  345600, HFPLL, 2,  36 }, L2(0),   850000, 3200000 },
+	{ 1, {  422400, HFPLL, 2,  44 }, L2(0),   850000, 3200000 },
+	{ 0, {  499200, HFPLL, 2,  52 }, L2(0),   850000, 3200000 },
+	{ 1, {  576000, HFPLL, 1,  30 }, L2(0),   850000, 3200000 },
+	{ 1, {  652800, HFPLL, 1,  34 }, L2(16),  850000, 3200000 },
+	{ 0, {  729600, HFPLL, 1,  38 }, L2(16),  850000, 3200000 },
+	{ 1, {  806400, HFPLL, 1,  42 }, L2(16),  850000, 3200000 },
+	{ 1, {  883200, HFPLL, 1,  46 }, L2(16),  870000, 3200000 },
+	{ 1, {  960000, HFPLL, 1,  50 }, L2(16),  880000, 3200000 },
+	{ 1, { 1036800, HFPLL, 1,  54 }, L2(16),  900000, 3200000 },
+	{ 1, { 1113600, HFPLL, 1,  58 }, L2(16),  915000, 3200000 },
+	{ 1, { 1190400, HFPLL, 1,  62 }, L2(16),  935000, 3200000 },
+	{ 1, { 1267200, HFPLL, 1,  66 }, L2(16),  950000, 3200000 },
+	{ 1, { 1344000, HFPLL, 1,  70 }, L2(16),  970000, 3200000 },
+	{ 1, { 1420800, HFPLL, 1,  74 }, L2(16),  985000, 3200000 },
+	{ 1, { 1497600, HFPLL, 1,  78 }, L2(16), 1000000, 3200000 },
+	{ 1, { 1574400, HFPLL, 1,  82 }, L2(16), 1015000, 3200000 },
+	{ 1, { 1651200, HFPLL, 1,  86 }, L2(16), 1030000, 3200000 },
+	{ 1, { 1728000, HFPLL, 1,  90 }, L2(16), 1050000, 3200000 },
+	{ 0, { 1804800, HFPLL, 1,  94 }, L2(16), 1050000, 3200000 },
+	{ 0, { 1881600, HFPLL, 1,  98 }, L2(16), 1050000, 3200000 },
+	{ 0, { 1958400, HFPLL, 1, 102 }, L2(16), 1050000, 3200000 },
+	{ 0, { 1996800, HFPLL, 1, 104 }, L2(16), 1050000, 3200000 },
 	{ 0, { 0 } }
 };
 
diff --git a/arch/arm/mach-msm/acpuclock-krait.c b/arch/arm/mach-msm/acpuclock-krait.c
index bf57eab..10c4d6c 100644
--- a/arch/arm/mach-msm/acpuclock-krait.c
+++ b/arch/arm/mach-msm/acpuclock-krait.c
@@ -33,6 +33,7 @@
 #include <mach/rpm-regulator.h>
 #include <mach/rpm-regulator-smd.h>
 #include <mach/msm_bus.h>
+#include <mach/msm_dcvs.h>
 
 #include "acpuclock.h"
 #include "acpuclock-krait.h"
@@ -951,6 +952,17 @@
 static void __init cpufreq_table_init(void) {}
 #endif
 
+static void __init dcvs_freq_init(void)
+{
+	int i;
+
+	for (i = 0; drv.acpu_freq_tbl[i].speed.khz != 0; i++)
+		if (drv.acpu_freq_tbl[i].use_for_scaling)
+			msm_dcvs_register_cpu_freq(
+				drv.acpu_freq_tbl[i].speed.khz,
+				drv.acpu_freq_tbl[i].vdd_core / 1000);
+}
+
 static int __cpuinit acpuclk_cpu_callback(struct notifier_block *nfb,
 					    unsigned long action, void *hcpu)
 {
@@ -965,7 +977,11 @@
 		/* Fall through. */
 	case CPU_UP_CANCELED:
 		acpuclk_krait_set_rate(cpu, hot_unplug_khz, SETRATE_HOTPLUG);
+
+		regulator_disable(sc->vreg[VREG_CORE].reg);
 		regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, 0);
+		regulator_set_voltage(sc->vreg[VREG_CORE].reg, 0,
+						sc->vreg[VREG_CORE].max_vdd);
 		break;
 	case CPU_UP_PREPARE:
 		if (!sc->initialized) {
@@ -976,10 +992,20 @@
 		}
 		if (WARN_ON(!prev_khz[cpu]))
 			return NOTIFY_BAD;
+
+		rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
+					sc->vreg[VREG_CORE].cur_vdd,
+					sc->vreg[VREG_CORE].max_vdd);
+		if (rc < 0)
+			return NOTIFY_BAD;
 		rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
 						sc->vreg[VREG_CORE].cur_ua);
 		if (rc < 0)
 			return NOTIFY_BAD;
+		rc = regulator_enable(sc->vreg[VREG_CORE].reg);
+		if (rc < 0)
+			return NOTIFY_BAD;
+
 		acpuclk_krait_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
 		break;
 	default:
@@ -1156,6 +1182,7 @@
 	hw_init();
 
 	cpufreq_table_init();
+	dcvs_freq_init();
 	acpuclk_register(&acpuclk_krait_data);
 	register_hotcpu_notifier(&acpuclk_cpu_notifier);
 
diff --git a/arch/arm/mach-msm/board-8064-pmic.c b/arch/arm/mach-msm/board-8064-pmic.c
index a358bba..fe2d2d2 100644
--- a/arch/arm/mach-msm/board-8064-pmic.c
+++ b/arch/arm/mach-msm/board-8064-pmic.c
@@ -419,14 +419,14 @@
 
 static struct pm8xxx_ccadc_platform_data
 apq8064_pm8xxx_ccadc_pdata = {
-	.r_sense		= 10,
+	.r_sense_uohm		= 10000,
 	.calib_delay_ms		= 600000,
 };
 
 static struct pm8921_bms_platform_data
 apq8064_pm8921_bms_pdata __devinitdata = {
 	.battery_type			= BATT_UNKNOWN,
-	.r_sense			= 10,
+	.r_sense_uohm			= 10000,
 	.v_cutoff			= 3400,
 	.max_voltage_uv			= MAX_VOLTAGE_MV * 1000,
 	.rconn_mohm			= 18,
diff --git a/arch/arm/mach-msm/board-8064-regulator.c b/arch/arm/mach-msm/board-8064-regulator.c
index 851f7d9..a66495d 100644
--- a/arch/arm/mach-msm/board-8064-regulator.c
+++ b/arch/arm/mach-msm/board-8064-regulator.c
@@ -455,7 +455,8 @@
 	{ \
 		.constraints = { \
 			.name		= _name, \
-			.valid_ops_mask	= REGULATOR_CHANGE_VOLTAGE, \
+			.valid_ops_mask	= REGULATOR_CHANGE_VOLTAGE | \
+					  REGULATOR_CHANGE_STATUS, \
 			.min_uV		= _min_uV, \
 			.max_uV		= _max_uV, \
 		}, \
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index 58f6f66..95246a7 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -693,12 +693,6 @@
 static struct msm_bus_vectors hsic_init_vectors[] = {
 	{
 		.src = MSM_BUS_MASTER_SPS,
-		.dst = MSM_BUS_SLAVE_EBI_CH0,
-		.ab = 0,
-		.ib = 0,
-	},
-	{
-		.src = MSM_BUS_MASTER_SPS,
 		.dst = MSM_BUS_SLAVE_SPS,
 		.ab = 0,
 		.ib = 0,
@@ -709,15 +703,9 @@
 static struct msm_bus_vectors hsic_max_vectors[] = {
 	{
 		.src = MSM_BUS_MASTER_SPS,
-		.dst = MSM_BUS_SLAVE_EBI_CH0,
-		.ab = 60000000,		/* At least 480Mbps on bus. */
-		.ib = 960000000,	/* MAX bursts rate */
-	},
-	{
-		.src = MSM_BUS_MASTER_SPS,
 		.dst = MSM_BUS_SLAVE_SPS,
 		.ab = 0,
-		.ib = 512000000, /*vote for 64Mhz dfab clk rate*/
+		.ib = 256000000, /*vote for 32Mhz dfab clk rate*/
 	},
 };
 
@@ -2123,6 +2111,21 @@
 	0x24, 0x30, 0x0f,
 };
 
+/* 8064AB has a different command to assert apc_pdn */
+static uint8_t spm_power_collapse_without_rpm_krait_v3[] __initdata = {
+	0x00, 0x24, 0x84, 0x10,
+	0x09, 0x03, 0x01,
+	0x10, 0x84, 0x30, 0x0C,
+	0x24, 0x30, 0x0f,
+};
+
+static uint8_t spm_power_collapse_with_rpm_krait_v3[] __initdata = {
+	0x00, 0x24, 0x84, 0x10,
+	0x09, 0x07, 0x01, 0x0B,
+	0x10, 0x84, 0x30, 0x0C,
+	0x24, 0x30, 0x0f,
+};
+
 static struct msm_spm_seq_entry msm_spm_boot_cpu_seq_list[] __initdata = {
 	[0] = {
 		.mode = MSM_SPM_MODE_CLOCK_GATING,
@@ -2275,6 +2278,27 @@
 	},
 };
 
+static void __init apq8064ab_update_krait_spm(void)
+{
+	int i;
+
+	/* Update the SPM sequences for SPC and PC */
+	for (i = 0; i < ARRAY_SIZE(msm_spm_data); i++) {
+		int j;
+		struct msm_spm_platform_data *pdata = &msm_spm_data[i];
+		for (j = 0; j < pdata->num_modes; j++) {
+			if (pdata->modes[j].cmd ==
+					spm_power_collapse_without_rpm)
+				pdata->modes[j].cmd =
+				spm_power_collapse_without_rpm_krait_v3;
+			else if (pdata->modes[j].cmd ==
+					spm_power_collapse_with_rpm)
+				pdata->modes[j].cmd =
+				spm_power_collapse_with_rpm_krait_v3;
+		}
+	}
+}
+
 static void __init apq8064_init_buses(void)
 {
 	msm_bus_rpm_set_mt_mask();
@@ -2515,6 +2539,7 @@
 	&msm_pil_vidc,
 	&msm_gss,
 	&apq8064_rtb_device,
+	&apq8064_dcvs_device,
 	&apq8064_msm_gov_device,
 	&apq8064_device_cache_erp,
 	&msm8960_device_ebi1_ch0_erp,
@@ -3370,6 +3395,7 @@
 static void __init apq8064_common_init(void)
 {
 	u32 platform_version = socinfo_get_platform_version();
+	struct msm_rpmrs_level rpmrs_level;
 
 	if (socinfo_get_pmic_model() == PMIC_MODEL_PM8917)
 		apq8064_pm8917_pdata_fixup();
@@ -3446,8 +3472,12 @@
 	}
 
 	enable_ddr3_regulator();
-	msm_hsic_pdata.swfi_latency =
-		msm_rpmrs_levels[0].latency_us;
+	rpmrs_level =
+		msm_rpmrs_levels[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT];
+	msm_hsic_pdata.swfi_latency = rpmrs_level.latency_us;
+	rpmrs_level =
+		msm_rpmrs_levels[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE];
+	msm_hsic_pdata.standalone_latency = rpmrs_level.latency_us;
 	if (machine_is_apq8064_mtp()) {
 		msm_hsic_pdata.log2_irq_thresh = 5,
 		apq8064_device_hsic_host.dev.platform_data = &msm_hsic_pdata;
@@ -3478,6 +3508,8 @@
 		apq8064_init_dsps();
 		platform_device_register(&msm_8960_riva);
 	}
+	if (cpu_is_apq8064ab())
+		apq8064ab_update_krait_spm();
 	msm_spm_init(msm_spm_data, ARRAY_SIZE(msm_spm_data));
 	msm_spm_l2_init(msm_spm_l2_data);
 	BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
diff --git a/arch/arm/mach-msm/board-8226.c b/arch/arm/mach-msm/board-8226.c
index e5263c7..3e90489 100644
--- a/arch/arm/mach-msm/board-8226.c
+++ b/arch/arm/mach-msm/board-8226.c
@@ -33,6 +33,7 @@
 #include <mach/board.h>
 #include <mach/gpiomux.h>
 #include <mach/msm_iomap.h>
+#include <mach/restart.h>
 #ifdef CONFIG_ION_MSM
 #include <mach/ion.h>
 #endif
@@ -63,6 +64,12 @@
 	CLK_DUMMY("iface_clk",  BLSP1_UART_CLK, "f991f000.serial", OFF),
 	CLK_DUMMY("iface_clk",  HSUSB_IFACE_CLK, "f9a55000.usb", OFF),
 	CLK_DUMMY("core_clk",	HSUSB_CORE_CLK, "f9a55000.usb", OFF),
+	CLK_DUMMY("iface_clk",	NULL,		"msm_sdcc.1", OFF),
+	CLK_DUMMY("core_clk",	NULL,		"msm_sdcc.1", OFF),
+	CLK_DUMMY("bus_clk",	NULL,		"msm_sdcc.1", OFF),
+	CLK_DUMMY("iface_clk",	NULL,		"msm_sdcc.2", OFF),
+	CLK_DUMMY("core_clk",	NULL,		"msm_sdcc.2", OFF),
+	CLK_DUMMY("bus_clk",	NULL,		"msm_sdcc.2", OFF),
 };
 
 static struct clock_init_data msm_dummy_clock_init_data __initdata = {
@@ -70,6 +77,14 @@
 	.size = ARRAY_SIZE(msm_clocks_dummy),
 };
 
+static struct of_dev_auxdata msm8226_auxdata_lookup[] __initdata = {
+	OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF9824000, \
+			"msm_sdcc.1", NULL),
+	OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF98A4000, \
+			"msm_sdcc.2", NULL),
+	{}
+};
+
 static struct reserve_info msm8226_reserve_info __initdata = {
 	.memtype_reserve_table = msm8226_reserve_table,
 	.paddr_to_memtype = msm8226_paddr_to_memtype,
@@ -89,13 +104,16 @@
 
 void __init msm8226_init(void)
 {
+	struct of_dev_auxdata *adata = msm8226_auxdata_lookup;
+
 	msm8226_init_gpiomux();
+
 	msm_clock_init(&msm_dummy_clock_init_data);
 
 	if (socinfo_init() < 0)
 		pr_err("%s: socinfo_init() failed\n", __func__);
 
-	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+	of_platform_populate(NULL, of_default_bus_match_table, adata, NULL);
 }
 
 static const char *msm8226_dt_match[] __initconst = {
@@ -111,5 +129,6 @@
 	.timer = &msm_dt_timer,
 	.dt_compat = msm8226_dt_match,
 	.reserve = msm8226_reserve,
-	.init_very_early = msm8226_early_memory
+	.init_very_early = msm8226_early_memory,
+	.restart = msm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-msm/board-8910.c b/arch/arm/mach-msm/board-8910.c
index b031dac..42fe1ea 100644
--- a/arch/arm/mach-msm/board-8910.c
+++ b/arch/arm/mach-msm/board-8910.c
@@ -35,12 +35,29 @@
 #ifdef CONFIG_ION_MSM
 #include <mach/ion.h>
 #endif
+#include <mach/msm_memtypes.h>
 #include <mach/socinfo.h>
 #include <mach/board.h>
 #include <mach/clk-provider.h>
 #include "board-dt.h"
 #include "clock.h"
 
+static struct memtype_reserve msm8910_reserve_table[] __initdata = {
+	[MEMTYPE_SMI] = {
+	},
+	[MEMTYPE_EBI0] = {
+		.flags	=	MEMTYPE_FLAGS_1M_ALIGN,
+	},
+	[MEMTYPE_EBI1] = {
+		.flags	=	MEMTYPE_FLAGS_1M_ALIGN,
+	},
+};
+
+static int msm8910_paddr_to_memtype(unsigned int paddr)
+{
+	return MEMTYPE_EBI1;
+}
+
 static struct clk_lookup msm_clocks_dummy[] = {
 	CLK_DUMMY("core_clk",   BLSP1_UART_CLK, "f991f000.serial", OFF),
 	CLK_DUMMY("iface_clk",  BLSP1_UART_CLK, "f991f000.serial", OFF),
@@ -67,6 +84,22 @@
 	{}
 };
 
+static struct reserve_info msm8910_reserve_info __initdata = {
+	.memtype_reserve_table = msm8910_reserve_table,
+	.paddr_to_memtype = msm8910_paddr_to_memtype,
+};
+
+static void __init msm8910_early_memory(void)
+{
+	reserve_info = &msm8910_reserve_info;
+	of_scan_flat_dt(dt_scan_for_memory_reserve, msm8910_reserve_table);
+}
+
+static void __init msm8910_reserve(void)
+{
+	msm_reserve();
+}
+
 void __init msm8910_init(void)
 {
 	struct of_dev_auxdata *adata = msm8910_auxdata_lookup;
@@ -93,4 +126,6 @@
 	.timer = &msm_dt_timer,
 	.dt_compat = msm8910_dt_match,
 	.restart = msm_restart,
+	.reserve = msm8910_reserve,
+	.init_very_early = msm8910_early_memory
 MACHINE_END
diff --git a/arch/arm/mach-msm/board-8930-camera.c b/arch/arm/mach-msm/board-8930-camera.c
index 083d5ab..be55031 100644
--- a/arch/arm/mach-msm/board-8930-camera.c
+++ b/arch/arm/mach-msm/board-8930-camera.c
@@ -244,7 +244,7 @@
 		.src = MSM_BUS_MASTER_VFE,
 		.dst = MSM_BUS_SLAVE_EBI_CH0,
 		.ab  = 27648000,
-		.ib  = 110592000,
+		.ib  = 2656000000UL,
 	},
 	{
 		.src = MSM_BUS_MASTER_VPE,
@@ -265,7 +265,7 @@
 		.src = MSM_BUS_MASTER_VFE,
 		.dst = MSM_BUS_SLAVE_EBI_CH0,
 		.ab  = 274406400,
-		.ib  = 561807360,
+		.ib  = 2656000000UL,
 	},
 	{
 		.src = MSM_BUS_MASTER_VPE,
@@ -285,8 +285,8 @@
 	{
 		.src = MSM_BUS_MASTER_VFE,
 		.dst = MSM_BUS_SLAVE_EBI_CH0,
-		.ab  = 274423680,
-		.ib  = 1097694720,
+		.ab  = 600000000,
+		.ib  = 2656000000UL,
 	},
 	{
 		.src = MSM_BUS_MASTER_VPE,
@@ -306,8 +306,8 @@
 	{
 		.src = MSM_BUS_MASTER_VFE,
 		.dst = MSM_BUS_SLAVE_EBI_CH0,
-		.ab  = 302071680,
-		.ib  = 1208286720,
+		.ab  = 600000000,
+		.ib  = 2656000000UL,
 	},
 	{
 		.src = MSM_BUS_MASTER_VPE,
@@ -327,8 +327,8 @@
 	{
 		.src = MSM_BUS_MASTER_VFE,
 		.dst = MSM_BUS_SLAVE_EBI_CH0,
-		.ab  = 348192000,
-		.ib  = 617103360,
+		.ab  = 600000000,
+		.ib  = 4264000000UL,
 	},
 	{
 		.src = MSM_BUS_MASTER_VPE,
@@ -349,7 +349,7 @@
 		.src = MSM_BUS_MASTER_VFE,
 		.dst = MSM_BUS_SLAVE_EBI_CH0,
 		.ab  = 302071680,
-		.ib  = 1208286720,
+		.ib  = 2656000000UL,
 	},
 	{
 		.src = MSM_BUS_MASTER_VPE,
@@ -365,6 +365,27 @@
 	},
 };
 
+static struct msm_bus_vectors cam_adv_video_vectors[] = {
+	{
+		.src = MSM_BUS_MASTER_VFE,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 274406400,
+		.ib  = 2656000000UL,
+	},
+	{
+		.src = MSM_BUS_MASTER_VPE,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 206807040,
+		.ib  = 488816640,
+	},
+	{
+		.src = MSM_BUS_MASTER_JPEG_ENC,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 0,
+		.ib  = 0,
+	},
+};
+
 
 static struct msm_bus_paths cam_bus_client_config[] = {
 	{
@@ -395,6 +416,11 @@
 		ARRAY_SIZE(cam_dual_vectors),
 		cam_dual_vectors,
 	},
+	{
+		ARRAY_SIZE(cam_adv_video_vectors),
+		cam_adv_video_vectors,
+	},
+
 };
 
 static struct msm_bus_scale_pdata cam_bus_client_pdata = {
diff --git a/arch/arm/mach-msm/board-8930-display.c b/arch/arm/mach-msm/board-8930-display.c
index a0bfabf..4506ea7 100644
--- a/arch/arm/mach-msm/board-8930-display.c
+++ b/arch/arm/mach-msm/board-8930-display.c
@@ -132,6 +132,15 @@
 };
 
 static bool dsi_power_on;
+static struct mipi_dsi_panel_platform_data novatek_pdata;
+static void pm8917_gpio_set_backlight(int bl_level)
+{
+	int gpio24 = PM8917_GPIO_PM_TO_SYS(24);
+	if (bl_level > 0)
+		gpio_set_value_cansleep(gpio24, 1);
+	else
+		gpio_set_value_cansleep(gpio24, 0);
+}
 
 /*
  * TODO: When physical 8930/PM8038 hardware becomes
@@ -214,9 +223,13 @@
 					rc);
 				return -ENODEV;
 			}
+			gpio_set_value_cansleep(gpio24, 0);
+			novatek_pdata.gpio_set_backlight =
+				pm8917_gpio_set_backlight;
 		}
 		dsi_power_on = true;
 	}
+
 	if (on) {
 		rc = regulator_set_optimum_mode(reg_l8, 100000);
 		if (rc < 0) {
@@ -256,8 +269,6 @@
 		gpio_set_value(DISP_RST_GPIO, 1);
 		gpio_set_value(DISP_3D_2D_MODE, 1);
 		usleep(20);
-		if (socinfo_get_pmic_model() == PMIC_MODEL_PM8917)
-			gpio_set_value_cansleep(gpio24, 1);
 	} else {
 
 		gpio_set_value(DISP_RST_GPIO, 0);
@@ -294,8 +305,6 @@
 		}
 		gpio_set_value(DISP_3D_2D_MODE, 0);
 		usleep(20);
-		if (socinfo_get_pmic_model() == PMIC_MODEL_PM8917)
-			gpio_set_value_cansleep(gpio24, 0);
 	}
 	return 0;
 }
@@ -441,7 +450,7 @@
 #ifdef CONFIG_MSM_BUS_SCALING
 	.mdp_bus_scale_table = &mdp_bus_scale_pdata,
 #endif
-	.mdp_rev = MDP_REV_42,
+	.mdp_rev = MDP_REV_43,
 #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
 	.mem_hid = BIT(ION_CP_MM_HEAP_ID),
 #else
@@ -740,11 +749,29 @@
 		return 0;
 
 	if (on) {
+		if (!(hdmi_msm_data.is_mhl_enabled)) {
+			rc = gpio_request(HDMI_MHL_MUX_GPIO, "MHL_HDMI_MUX");
+			if (rc < 0) {
+				pr_err("gpio hdmi_mhl mux req failed:%d\n",
+					rc);
+				return rc;
+			}
+			rc = gpio_direction_output(HDMI_MHL_MUX_GPIO, 1);
+			if (rc < 0) {
+				pr_err("set gpio hdmi_mhl dir failed:%d\n",
+					rc);
+				goto error0;
+			}
+			gpio_set_value(HDMI_MHL_MUX_GPIO, 1);
+			pr_debug("set gpio hdmi mhl mux %d to 1\n",
+				HDMI_MHL_MUX_GPIO);
+		}
+
 		rc = gpio_request(100, "HDMI_DDC_CLK");
 		if (rc) {
 			pr_err("'%s'(%d) gpio_request failed, rc=%d\n",
 				"HDMI_DDC_CLK", 100, rc);
-			return rc;
+			goto error0;
 		}
 		rc = gpio_request(101, "HDMI_DDC_DATA");
 		if (rc) {
@@ -760,6 +787,8 @@
 		}
 		pr_debug("%s(on): success\n", __func__);
 	} else {
+		if (!(hdmi_msm_data.is_mhl_enabled))
+			gpio_free(HDMI_MHL_MUX_GPIO);
 		gpio_free(100);
 		gpio_free(101);
 		gpio_free(102);
@@ -773,6 +802,9 @@
 	gpio_free(101);
 error1:
 	gpio_free(100);
+error0:
+	if (!(hdmi_msm_data.is_mhl_enabled))
+		gpio_free(HDMI_MHL_MUX_GPIO);
 	return rc;
 }
 
diff --git a/arch/arm/mach-msm/board-8930-pmic.c b/arch/arm/mach-msm/board-8930-pmic.c
index d35a907..618f83b 100644
--- a/arch/arm/mach-msm/board-8930-pmic.c
+++ b/arch/arm/mach-msm/board-8930-pmic.c
@@ -437,7 +437,7 @@
 };
 
 static struct pm8xxx_ccadc_platform_data pm8xxx_ccadc_pdata = {
-	.r_sense		= 10,
+	.r_sense_uohm		= 10000,
 	.calib_delay_ms		= 600000,
 };
 
@@ -465,7 +465,7 @@
 
 static struct pm8921_bms_platform_data pm8921_bms_pdata __devinitdata = {
 	.battery_type			= BATT_UNKNOWN,
-	.r_sense			= 10,
+	.r_sense_uohm			= 10000,
 	.v_cutoff			= 3400,
 	.max_voltage_uv			= MAX_VOLTAGE_MV * 1000,
 	.shutdown_soc_valid_limit	= 20,
diff --git a/arch/arm/mach-msm/board-8930-regulator-pm8038.c b/arch/arm/mach-msm/board-8930-regulator-pm8038.c
index 727c4c6..eaebea0 100644
--- a/arch/arm/mach-msm/board-8930-regulator-pm8038.c
+++ b/arch/arm/mach-msm/board-8930-regulator-pm8038.c
@@ -189,12 +189,14 @@
 	REGULATOR_SUPPLY("krait0",		"acpuclk-8627"),
 	REGULATOR_SUPPLY("krait0",		"acpuclk-8930"),
 	REGULATOR_SUPPLY("krait0",		"acpuclk-8930aa"),
+	REGULATOR_SUPPLY("krait0",		"acpuclk-8930ab"),
 };
 VREG_CONSUMERS(S6) = {
 	REGULATOR_SUPPLY("8038_s6",		NULL),
 	REGULATOR_SUPPLY("krait1",		"acpuclk-8627"),
 	REGULATOR_SUPPLY("krait1",		"acpuclk-8930"),
 	REGULATOR_SUPPLY("krait1",		"acpuclk-8930aa"),
+	REGULATOR_SUPPLY("krait1",		"acpuclk-8930ab"),
 };
 VREG_CONSUMERS(LVS1) = {
 	REGULATOR_SUPPLY("8038_lvs1",		NULL),
@@ -447,7 +449,8 @@
 	{ \
 		.constraints = { \
 			.name		= _name, \
-			.valid_ops_mask	= REGULATOR_CHANGE_VOLTAGE, \
+			.valid_ops_mask	= REGULATOR_CHANGE_VOLTAGE | \
+					  REGULATOR_CHANGE_STATUS, \
 			.min_uV		= _min_uV, \
 			.max_uV		= _max_uV, \
 		}, \
@@ -490,7 +493,7 @@
 	/*	ID a_on pd ss min_uV   max_uV  supply sys_uA  freq  fm  ss_fm */
 	RPM_SMPS(S1, 0, 1, 1,  500000, 1150000, NULL, 100000, 4p80, AUTO, LPM),
 	RPM_SMPS(S2, 1, 1, 1, 1400000, 1400000, NULL, 100000, 1p60, AUTO, LPM),
-	RPM_SMPS(S3, 0, 1, 1, 1150000, 1150000, NULL, 100000, 3p20, AUTO, LPM),
+	RPM_SMPS(S3, 0, 1, 1, 1150000, 1150000, NULL, 100000, 3p20, AUTO, AUTO),
 	RPM_SMPS(S4, 1, 1, 1, 1950000, 2200000, NULL, 100000, 1p60, AUTO, LPM),
 
 	/*	ID     a_on pd ss min_uV   max_uV  supply  sys_uA init_ip */
@@ -564,6 +567,14 @@
 	RPM_REG_MAP(L24,            0, 2, "krait1_mem",   "acpuclk-8930aa"),
 	RPM_REG_MAP(VDD_DIG_CORNER, 0, 1, "krait0_dig",   "acpuclk-8930aa"),
 	RPM_REG_MAP(VDD_DIG_CORNER, 0, 2, "krait1_dig",   "acpuclk-8930aa"),
+
+	RPM_REG_MAP(L23,            0, 1, "krait0_hfpll", "acpuclk-8930ab"),
+	RPM_REG_MAP(L23,            0, 2, "krait1_hfpll", "acpuclk-8930ab"),
+	RPM_REG_MAP(L23,            0, 6, "l2_hfpll",     "acpuclk-8930ab"),
+	RPM_REG_MAP(L24,            0, 1, "krait0_mem",   "acpuclk-8930ab"),
+	RPM_REG_MAP(L24,            0, 2, "krait1_mem",   "acpuclk-8930ab"),
+	RPM_REG_MAP(VDD_DIG_CORNER, 0, 1, "krait0_dig",   "acpuclk-8930ab"),
+	RPM_REG_MAP(VDD_DIG_CORNER, 0, 2, "krait1_dig",   "acpuclk-8930ab"),
 };
 
 struct rpm_regulator_platform_data
diff --git a/arch/arm/mach-msm/board-8930-regulator-pm8917.c b/arch/arm/mach-msm/board-8930-regulator-pm8917.c
index 33e38ab..9a2967a 100644
--- a/arch/arm/mach-msm/board-8930-regulator-pm8917.c
+++ b/arch/arm/mach-msm/board-8930-regulator-pm8917.c
@@ -34,6 +34,7 @@
 	REGULATOR_SUPPLY("8917_l2",		NULL),
 	REGULATOR_SUPPLY("iris_vdddig",		"wcnss_wlan.0"),
 	REGULATOR_SUPPLY("dsi_vdda",		"mipi_dsi.1"),
+	REGULATOR_SUPPLY("dsi_pll_vdda",	"mdp.0"),
 	REGULATOR_SUPPLY("mipi_csi_vdd",	"msm_csid.0"),
 	REGULATOR_SUPPLY("mipi_csi_vdd",	"msm_csid.1"),
 	REGULATOR_SUPPLY("mipi_csi_vdd",	"msm_csid.2"),
@@ -117,6 +118,7 @@
 VREG_CONSUMERS(L23) = {
 	REGULATOR_SUPPLY("8917_l23",		NULL),
 	REGULATOR_SUPPLY("dsi_vddio",		"mipi_dsi.1"),
+	REGULATOR_SUPPLY("dsi_pll_vddio",	"mdp.0"),
 	REGULATOR_SUPPLY("hdmi_avdd",		"hdmi_msm.0"),
 	REGULATOR_SUPPLY("hdmi_vcc",		"hdmi_msm.0"),
 	REGULATOR_SUPPLY("pll_vdd",		"pil_riva"),
@@ -204,12 +206,14 @@
 	REGULATOR_SUPPLY("krait0",		"acpuclk-8627"),
 	REGULATOR_SUPPLY("krait0",		"acpuclk-8930"),
 	REGULATOR_SUPPLY("krait0",		"acpuclk-8930aa"),
+	REGULATOR_SUPPLY("krait0",		"acpuclk-8930ab"),
 };
 VREG_CONSUMERS(S6) = {
 	REGULATOR_SUPPLY("8917_s6",		NULL),
 	REGULATOR_SUPPLY("krait1",		"acpuclk-8627"),
 	REGULATOR_SUPPLY("krait1",		"acpuclk-8930"),
 	REGULATOR_SUPPLY("krait1",		"acpuclk-8930aa"),
+	REGULATOR_SUPPLY("krait1",		"acpuclk-8930ab"),
 };
 VREG_CONSUMERS(S7) = {
 	REGULATOR_SUPPLY("8917_s7",		NULL),
@@ -483,7 +487,8 @@
 	{ \
 		.constraints = { \
 			.name		= _name, \
-			.valid_ops_mask	= REGULATOR_CHANGE_VOLTAGE, \
+			.valid_ops_mask	= REGULATOR_CHANGE_VOLTAGE | \
+					  REGULATOR_CHANGE_STATUS, \
 			.min_uV		= _min_uV, \
 			.max_uV		= _max_uV, \
 		}, \
@@ -629,6 +634,18 @@
 	RPM_REG_MAP(L24,            0, 2, "krait1_mem",   "acpuclk-8930aa"),
 	RPM_REG_MAP(VDD_DIG_CORNER, 0, 1, "krait0_dig",   "acpuclk-8930aa"),
 	RPM_REG_MAP(VDD_DIG_CORNER, 0, 2, "krait1_dig",   "acpuclk-8930aa"),
+
+	RPM_REG_MAP(L23,            0, 1, "krait0_l23",   "acpuclk-8930ab"),
+	RPM_REG_MAP(S8,             0, 1, "krait0_s8",    "acpuclk-8930ab"),
+	RPM_REG_MAP(L23,            0, 2, "krait1_l23",   "acpuclk-8930ab"),
+	RPM_REG_MAP(S8,             0, 2, "krait1_s8",    "acpuclk-8930ab"),
+	RPM_REG_MAP(L23,            0, 6, "l2_l23",       "acpuclk-8930ab"),
+	RPM_REG_MAP(S8,             0, 6, "l2_s8",        "acpuclk-8930ab"),
+	RPM_REG_MAP(L24,            0, 1, "krait0_mem",   "acpuclk-8930ab"),
+	RPM_REG_MAP(L24,            0, 2, "krait1_mem",   "acpuclk-8930ab"),
+	RPM_REG_MAP(VDD_DIG_CORNER, 0, 1, "krait0_dig",   "acpuclk-8930ab"),
+	RPM_REG_MAP(VDD_DIG_CORNER, 0, 2, "krait1_dig",   "acpuclk-8930ab"),
+
 };
 
 struct rpm_regulator_platform_data
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index 1b487fa..512ae72 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -112,7 +112,6 @@
 #define KS8851_IRQ_GPIO		90
 #define HAP_SHIFT_LVL_OE_GPIO	47
 
-#define HDMI_MHL_MUX_GPIO       73
 #define MHL_GPIO_INT            72
 #define MHL_GPIO_RESET          71
 #define MHL_GPIO_PWR_EN         5
@@ -2756,6 +2755,9 @@
 
 	pdata = msm8930_device_acpuclk.dev.platform_data;
 	pdata->uses_pm8917 = true;
+
+	pdata = msm8930ab_device_acpuclk.dev.platform_data;
+	pdata->uses_pm8917 = true;
 }
 
 static void __init msm8930_cdp_init(void)
@@ -2829,6 +2831,8 @@
 		platform_device_register(&msm8930_device_acpuclk);
 	else if (cpu_is_msm8930aa())
 		platform_device_register(&msm8930aa_device_acpuclk);
+	else if (cpu_is_msm8930ab())
+		platform_device_register(&msm8930ab_device_acpuclk);
 	platform_add_devices(early_common_devices,
 				ARRAY_SIZE(early_common_devices));
 	if (socinfo_get_pmic_model() != PMIC_MODEL_PM8917)
diff --git a/arch/arm/mach-msm/board-8930.h b/arch/arm/mach-msm/board-8930.h
index 055576f..dbcfa9d 100644
--- a/arch/arm/mach-msm/board-8930.h
+++ b/arch/arm/mach-msm/board-8930.h
@@ -164,5 +164,7 @@
 #define MSM_8930_GSBI10_QUP_I2C_BUS_ID 10
 #define MSM_8930_GSBI12_QUP_I2C_BUS_ID 12
 
+#define HDMI_MHL_MUX_GPIO       73
+
 extern struct msm_rtb_platform_data msm8930_rtb_pdata;
 extern struct msm_cache_dump_platform_data msm8930_cache_dump_pdata;
diff --git a/arch/arm/mach-msm/board-8960-display.c b/arch/arm/mach-msm/board-8960-display.c
index 3052902..ecf5ec6 100644
--- a/arch/arm/mach-msm/board-8960-display.c
+++ b/arch/arm/mach-msm/board-8960-display.c
@@ -988,6 +988,12 @@
 
 void __init msm8960_init_fb(void)
 {
+	uint32_t soc_platform_version = socinfo_get_version();
+
+
+	if (SOCINFO_VERSION_MAJOR(soc_platform_version) >= 3)
+		mdp_pdata.mdp_rev = MDP_REV_43;
+
 	if (cpu_is_msm8960ab())
 		mdp_pdata.mdp_rev = MDP_REV_44;
 
diff --git a/arch/arm/mach-msm/board-8960-pmic.c b/arch/arm/mach-msm/board-8960-pmic.c
index 1a3d90d..2071a55 100644
--- a/arch/arm/mach-msm/board-8960-pmic.c
+++ b/arch/arm/mach-msm/board-8960-pmic.c
@@ -424,7 +424,7 @@
 
 static struct pm8921_bms_platform_data pm8921_bms_pdata __devinitdata = {
 	.battery_type			= BATT_UNKNOWN,
-	.r_sense			= 10,
+	.r_sense_uohm			= 10000,
 	.v_cutoff			= 3400,
 	.max_voltage_uv			= MAX_VOLTAGE_MV * 1000,
 	.rconn_mohm			= 18,
@@ -552,7 +552,7 @@
 };
 
 static struct pm8xxx_ccadc_platform_data pm8xxx_ccadc_pdata = {
-	.r_sense		= 10,
+	.r_sense_uohm		= 10000,
 	.calib_delay_ms		= 600000,
 };
 
diff --git a/arch/arm/mach-msm/board-8960-regulator.c b/arch/arm/mach-msm/board-8960-regulator.c
index f9e2c8e..397411d 100644
--- a/arch/arm/mach-msm/board-8960-regulator.c
+++ b/arch/arm/mach-msm/board-8960-regulator.c
@@ -382,7 +382,8 @@
 	{ \
 		.constraints = { \
 			.name		= _name, \
-			.valid_ops_mask	= REGULATOR_CHANGE_VOLTAGE, \
+			.valid_ops_mask	= REGULATOR_CHANGE_VOLTAGE | \
+					  REGULATOR_CHANGE_STATUS, \
 			.min_uV		= _min_uV, \
 			.max_uV		= _max_uV, \
 		}, \
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 167923f..9efc60a 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -1593,26 +1593,41 @@
 };
 
 static uint8_t spm_wfi_cmd_sequence[] __initdata = {
-			0x03, 0x0f,
+	0x03, 0x0f,
 };
 
 static uint8_t spm_retention_cmd_sequence[] __initdata = {
-			0x00, 0x05, 0x03, 0x0D,
-			0x0B, 0x00, 0x0f,
+	0x00, 0x05, 0x03, 0x0D,
+	0x0B, 0x00, 0x0f,
 };
 
 static uint8_t spm_power_collapse_without_rpm[] __initdata = {
-			0x00, 0x24, 0x54, 0x10,
-			0x09, 0x03, 0x01,
-			0x10, 0x54, 0x30, 0x0C,
-			0x24, 0x30, 0x0f,
+	0x00, 0x24, 0x54, 0x10,
+	0x09, 0x03, 0x01,
+	0x10, 0x54, 0x30, 0x0C,
+	0x24, 0x30, 0x0f,
 };
 
 static uint8_t spm_power_collapse_with_rpm[] __initdata = {
-			0x00, 0x24, 0x54, 0x10,
-			0x09, 0x07, 0x01, 0x0B,
-			0x10, 0x54, 0x30, 0x0C,
-			0x24, 0x30, 0x0f,
+	0x00, 0x24, 0x54, 0x10,
+	0x09, 0x07, 0x01, 0x0B,
+	0x10, 0x54, 0x30, 0x0C,
+	0x24, 0x30, 0x0f,
+};
+
+/* 8960AB has a different command to assert apc_pdn */
+static uint8_t spm_power_collapse_without_rpm_krait_v3[] __initdata = {
+	0x00, 0x24, 0x84, 0x10,
+	0x09, 0x03, 0x01,
+	0x10, 0x84, 0x30, 0x0C,
+	0x24, 0x30, 0x0f,
+};
+
+static uint8_t spm_power_collapse_with_rpm_krait_v3[] __initdata = {
+	0x00, 0x24, 0x84, 0x10,
+	0x09, 0x07, 0x01, 0x0B,
+	0x10, 0x84, 0x30, 0x0C,
+	0x24, 0x30, 0x0f,
 };
 
 static struct msm_spm_seq_entry msm_spm_boot_cpu_seq_list[] __initdata = {
@@ -1621,13 +1636,11 @@
 		.notify_rpm = false,
 		.cmd = spm_wfi_cmd_sequence,
 	},
-
 	[1] = {
 		.mode = MSM_SPM_MODE_POWER_RETENTION,
 		.notify_rpm = false,
 		.cmd = spm_retention_cmd_sequence,
 	},
-
 	[2] = {
 		.mode = MSM_SPM_MODE_POWER_COLLAPSE,
 		.notify_rpm = false,
@@ -3216,15 +3229,32 @@
 	msm_tsens_early_init(&msm_tsens_pdata);
 }
 
-static void __init msm8960_reset_spm_avs(void)
+static void __init msm8960ab_update_krait_spm(void)
 {
 	int i;
 
+	/* Reset the AVS registers until we have support for AVS */
 	for (i = 0; i < ARRAY_SIZE(msm_spm_data); i++) {
 		struct msm_spm_platform_data *pdata = &msm_spm_data[i];
 		pdata->reg_init_values[MSM_SPM_REG_SAW2_AVS_CTL] = 0;
 		pdata->reg_init_values[MSM_SPM_REG_SAW2_AVS_HYSTERESIS] = 0;
 	}
+
+	/* Update the SPM sequences for SPC and PC */
+	for (i = 0; i < ARRAY_SIZE(msm_spm_data); i++) {
+		int j;
+		struct msm_spm_platform_data *pdata = &msm_spm_data[i];
+		for (j = 0; j < pdata->num_modes; j++) {
+			if (pdata->modes[j].cmd ==
+					spm_power_collapse_without_rpm)
+				pdata->modes[j].cmd =
+				spm_power_collapse_without_rpm_krait_v3;
+			else if (pdata->modes[j].cmd ==
+					spm_power_collapse_with_rpm)
+				pdata->modes[j].cmd =
+				spm_power_collapse_with_rpm_krait_v3;
+		}
+	}
 }
 
 static void __init msm8960_cdp_init(void)
@@ -3282,7 +3312,7 @@
 	msm8960_gfx_init();
 
 	if (cpu_is_msm8960ab())
-		msm8960_reset_spm_avs();
+		msm8960ab_update_krait_spm();
 	msm_spm_init(msm_spm_data, ARRAY_SIZE(msm_spm_data));
 	msm_spm_l2_init(msm_spm_l2_data);
 
diff --git a/arch/arm/mach-msm/board-8974-gpiomux.c b/arch/arm/mach-msm/board-8974-gpiomux.c
index 50b59e1..89ad4ef 100644
--- a/arch/arm/mach-msm/board-8974-gpiomux.c
+++ b/arch/arm/mach-msm/board-8974-gpiomux.c
@@ -152,6 +152,26 @@
 
 };
 
+static struct gpiomux_setting mhl_suspend_config = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct gpiomux_setting mhl_active_1_cfg = {
+	.func = GPIOMUX_FUNC_1,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_UP,
+	.dir = GPIOMUX_OUT_HIGH,
+};
+
+static struct gpiomux_setting mhl_active_2_cfg = {
+	.func = GPIOMUX_FUNC_1,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_UP,
+};
+
+
 static struct gpiomux_setting hdmi_suspend_cfg = {
 	.func = GPIOMUX_FUNC_GPIO,
 	.drv = GPIOMUX_DRV_2MA,
@@ -170,6 +190,34 @@
 	.pull = GPIOMUX_PULL_DOWN,
 };
 
+static struct msm_gpiomux_config msm_mhl_configs[] __initdata = {
+	{
+		/* mhl-sii8334 pwr */
+		.gpio = 12,
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &mhl_suspend_config,
+			[GPIOMUX_ACTIVE]    = &mhl_active_1_cfg,
+		},
+	},
+	{
+		/* mhl-sii8334 intr */
+		.gpio = 82,
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &mhl_suspend_config,
+			[GPIOMUX_ACTIVE]    = &mhl_active_1_cfg,
+		},
+	},
+	{
+		/* mhl-sii8334 reset */
+		.gpio = 8,
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &mhl_suspend_config,
+			[GPIOMUX_ACTIVE]    = &mhl_active_2_cfg,
+		},
+	},
+};
+
+
 static struct msm_gpiomux_config msm_hdmi_configs[] __initdata = {
 	{
 		.gpio = 31,
@@ -575,4 +623,5 @@
 	msm_gpiomux_install(msm_taiko_config, ARRAY_SIZE(msm_taiko_config));
 
 	msm_gpiomux_install(msm_hdmi_configs, ARRAY_SIZE(msm_hdmi_configs));
+	msm_gpiomux_install(msm_mhl_configs, ARRAY_SIZE(msm_mhl_configs));
 }
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index 98a82b1..c47b688 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -66,177 +66,22 @@
 	return MEMTYPE_EBI1;
 }
 
-static struct resource smd_resource[] = {
-	{
-		.name	= "modem_smd_in",
-		.start	= 32 + 25,		/* mss_sw_to_kpss_ipc_irq0  */
-		.flags	= IORESOURCE_IRQ,
-	},
-	{
-		.name	= "modem_smsm_in",
-		.start	= 32 + 26,		/* mss_sw_to_kpss_ipc_irq1  */
-		.flags	= IORESOURCE_IRQ,
-	},
-	{
-		.name	= "adsp_smd_in",
-		.start	= 32 + 156,		/* lpass_to_kpss_ipc_irq0  */
-		.flags	= IORESOURCE_IRQ,
-	},
-	{
-		.name	= "adsp_smsm_in",
-		.start	= 32 + 157,		/* lpass_to_kpss_ipc_irq1  */
-		.flags	= IORESOURCE_IRQ,
-	},
-	{
-		.name	= "wcnss_smd_in",
-		.start	= 32 + 142,		/* WcnssAppsSmdMedIrq  */
-		.flags	= IORESOURCE_IRQ,
-	},
-	{
-		.name	= "wcnss_smsm_in",
-		.start	= 32 + 144,		/* RivaAppsWlanSmsmIrq  */
-		.flags	= IORESOURCE_IRQ,
-	},
-	{
-		.name	= "rpm_smd_in",
-		.start	= 32 + 168,		/* rpm_to_kpss_ipc_irq4  */
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct smd_subsystem_config smd_config_list[] = {
-	{
-		.irq_config_id = SMD_MODEM,
-		.subsys_name = "modem",
-		.edge = SMD_APPS_MODEM,
-
-		.smd_int.irq_name = "modem_smd_in",
-		.smd_int.flags = IRQF_TRIGGER_RISING,
-		.smd_int.irq_id = -1,
-		.smd_int.device_name = "smd_dev",
-		.smd_int.dev_id = 0,
-		.smd_int.out_bit_pos = 1 << 12,
-		.smd_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
-		.smd_int.out_offset = 0x8,
-
-		.smsm_int.irq_name = "modem_smsm_in",
-		.smsm_int.flags = IRQF_TRIGGER_RISING,
-		.smsm_int.irq_id = -1,
-		.smsm_int.device_name = "smsm_dev",
-		.smsm_int.dev_id = 0,
-		.smsm_int.out_bit_pos = 1 << 13,
-		.smsm_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
-		.smsm_int.out_offset = 0x8,
-	},
-	{
-		.irq_config_id = SMD_Q6,
-		.subsys_name = "adsp",
-		.edge = SMD_APPS_QDSP,
-
-		.smd_int.irq_name = "adsp_smd_in",
-		.smd_int.flags = IRQF_TRIGGER_RISING,
-		.smd_int.irq_id = -1,
-		.smd_int.device_name = "smd_dev",
-		.smd_int.dev_id = 0,
-		.smd_int.out_bit_pos = 1 << 8,
-		.smd_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
-		.smd_int.out_offset = 0x8,
-
-		.smsm_int.irq_name = "adsp_smsm_in",
-		.smsm_int.flags = IRQF_TRIGGER_RISING,
-		.smsm_int.irq_id = -1,
-		.smsm_int.device_name = "smsm_dev",
-		.smsm_int.dev_id = 0,
-		.smsm_int.out_bit_pos = 1 << 9,
-		.smsm_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
-		.smsm_int.out_offset = 0x8,
-	},
-	{
-		.irq_config_id = SMD_WCNSS,
-		.subsys_name = "wcnss",
-		.edge = SMD_APPS_WCNSS,
-
-		.smd_int.irq_name = "wcnss_smd_in",
-		.smd_int.flags = IRQF_TRIGGER_RISING,
-		.smd_int.irq_id = -1,
-		.smd_int.device_name = "smd_dev",
-		.smd_int.dev_id = 0,
-		.smd_int.out_bit_pos = 1 << 17,
-		.smd_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
-		.smd_int.out_offset = 0x8,
-
-		.smsm_int.irq_name = "wcnss_smsm_in",
-		.smsm_int.flags = IRQF_TRIGGER_RISING,
-		.smsm_int.irq_id = -1,
-		.smsm_int.device_name = "smsm_dev",
-		.smsm_int.dev_id = 0,
-		.smsm_int.out_bit_pos = 1 << 19,
-		.smsm_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
-		.smsm_int.out_offset = 0x8,
-	},
-	{
-		.irq_config_id = SMD_RPM,
-		.subsys_name = NULL, /* do not use PIL to load RPM */
-		.edge = SMD_APPS_RPM,
-
-		.smd_int.irq_name = "rpm_smd_in",
-		.smd_int.flags = IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND,
-		.smd_int.irq_id = -1,
-		.smd_int.device_name = "smd_dev",
-		.smd_int.dev_id = 0,
-		.smd_int.out_bit_pos = 1 << 0,
-		.smd_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
-		.smd_int.out_offset = 0x8,
-
-		.smsm_int.irq_name = NULL, /* RPM does not support SMSM */
-		.smsm_int.flags = 0,
-		.smsm_int.irq_id = 0,
-		.smsm_int.device_name = NULL,
-		.smsm_int.dev_id = 0,
-		.smsm_int.out_bit_pos = 0,
-		.smsm_int.out_base = NULL,
-		.smsm_int.out_offset = 0,
-	},
-};
-
-static struct smd_smem_regions aux_smem_areas[] = {
-	{
-		.phys_addr = (void *)(0xfc428000),
-		.size = 0x4000,
-	},
-};
-
-static struct smd_subsystem_restart_config smd_ssr_cfg = {
-	.disable_smsm_reset_handshake = 1,
-};
-
-static struct smd_platform smd_platform_data = {
-	.num_ss_configs = ARRAY_SIZE(smd_config_list),
-	.smd_ss_configs = smd_config_list,
-	.smd_ssr_config = &smd_ssr_cfg,
-	.num_smem_areas = ARRAY_SIZE(aux_smem_areas),
-	.smd_smem_areas = aux_smem_areas,
-};
-
-struct platform_device msm_device_smd_8974 = {
-	.name	= "msm_smd",
-	.id	= -1,
-	.resource = smd_resource,
-	.num_resources = ARRAY_SIZE(smd_resource),
-	.dev = {
-		.platform_data = &smd_platform_data,
-	}
-};
-
 static struct reserve_info msm8974_reserve_info __initdata = {
 	.memtype_reserve_table = msm8974_reserve_table,
 	.paddr_to_memtype = msm8974_paddr_to_memtype,
 };
 
-static void __init msm8974_early_memory(void)
+void __init msm_8974_reserve(void)
 {
 	reserve_info = &msm8974_reserve_info;
 	of_scan_flat_dt(dt_scan_for_memory_reserve, msm8974_reserve_table);
+	msm_reserve();
+}
+
+static void __init msm8974_early_memory(void)
+{
+	reserve_info = &msm8974_reserve_info;
+	of_scan_flat_dt(dt_scan_for_memory_hole, msm8974_reserve_table);
 }
 
 #define BIMC_BASE	0xfc380000
@@ -426,11 +271,6 @@
 				ARRAY_SIZE(msm_bus_8974_devices));
 };
 
-void __init msm8974_add_devices(void)
-{
-	platform_device_register(&msm_device_smd_8974);
-}
-
 /*
  * Used to satisfy dependencies for devices that need to be
  * run early or in a particular order. Most likely your device doesn't fall
@@ -536,7 +376,6 @@
 	regulator_has_full_constraints();
 	of_platform_populate(NULL, of_default_bus_match_table, adata, NULL);
 
-	msm8974_add_devices();
 	msm8974_add_drivers();
 }
 
@@ -557,7 +396,7 @@
 	.handle_irq = gic_handle_irq,
 	.timer = &msm_dt_timer,
 	.dt_compat = msm8974_dt_match,
-	.reserve = msm_reserve,
+	.reserve = msm_8974_reserve,
 	.init_very_early = msm8974_init_very_early,
 	.restart = msm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-msm/board-9615.c b/arch/arm/mach-msm/board-9615.c
index e0a6b4d..39060ad 100644
--- a/arch/arm/mach-msm/board-9615.c
+++ b/arch/arm/mach-msm/board-9615.c
@@ -924,6 +924,10 @@
 	&msm_cpudai_auxpcm_tx,
 	&msm_cpudai_sec_auxpcm_rx,
 	&msm_cpudai_sec_auxpcm_tx,
+	&msm_cpudai_stub,
+	&msm_cpudai_incall_music_rx,
+	&msm_cpudai_incall_record_rx,
+	&msm_cpudai_incall_record_tx,
 
 #if defined(CONFIG_CRYPTO_DEV_QCRYPTO) || \
 		defined(CONFIG_CRYPTO_DEV_QCRYPTO_MODULE)
diff --git a/arch/arm/mach-msm/board-9625-gpiomux.c b/arch/arm/mach-msm/board-9625-gpiomux.c
index 6f36ef2..686bb41 100644
--- a/arch/arm/mach-msm/board-9625-gpiomux.c
+++ b/arch/arm/mach-msm/board-9625-gpiomux.c
@@ -25,13 +25,13 @@
 };
 
 static struct gpiomux_setting gpio_spi_cs_config = {
-	.func = GPIOMUX_FUNC_9,
+	.func = GPIOMUX_FUNC_1,
 	.drv = GPIOMUX_DRV_12MA,
 	.pull = GPIOMUX_PULL_NONE,
 };
 
 static struct gpiomux_setting gpio_spi_config = {
-	.func = GPIOMUX_FUNC_2,
+	.func = GPIOMUX_FUNC_1,
 	.drv = GPIOMUX_DRV_12MA,
 	.pull = GPIOMUX_PULL_NONE,
 };
@@ -44,6 +44,30 @@
 
 static struct msm_gpiomux_config msm_blsp_configs[] __initdata = {
 	{
+		.gpio      = 4,		/* BLSP1 QUP2 SPI_DATA_MOSI */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gpio_spi_config,
+		},
+	},
+	{
+		.gpio      = 5,		/* BLSP1 QUP2 SPI_DATA_MISO */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gpio_spi_config,
+		},
+	},
+	{
+		.gpio      = 6,		/* BLSP1 QUP2 SPI_CS_N */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gpio_spi_cs_config,
+		},
+	},
+	{
+		.gpio      = 7,		/* BLSP1 QUP2 SPI_CLK */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gpio_spi_config,
+		},
+	},
+	{
 		.gpio      = 8,	       /* BLSP1 UART TX */
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &gpio_uart_config,
@@ -67,31 +91,72 @@
 			[GPIOMUX_SUSPENDED] = &gpio_i2c_config,
 		},
 	},
-	{
-		.gpio      = 69,		/* BLSP6 QUP SPI_CS_N */
-		.settings = {
-			[GPIOMUX_SUSPENDED] = &gpio_spi_cs_config,
-		},
-	},
-	{
-		.gpio      = 20,		/* BLSP6 QUP SPI_DATA_MOSI */
-		.settings = {
-			[GPIOMUX_SUSPENDED] = &gpio_spi_config,
-		},
-	},
-	{
-		.gpio      = 21,		/* BLSP6 QUP SPI_DATA_MISO */
-		.settings = {
-			[GPIOMUX_SUSPENDED] = &gpio_spi_config,
-		},
-	},
-	{
-		.gpio      = 23,		/* BLSP6 QUP SPI_CLK */
-		.settings = {
-			[GPIOMUX_SUSPENDED] = &gpio_spi_config,
-		},
-	},
+};
 
+static struct gpiomux_setting  mi2s_active_cfg = {
+	.func = GPIOMUX_FUNC_1,
+	.drv = GPIOMUX_DRV_8MA,
+	.pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting  mi2s_suspend_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct gpiomux_setting codec_reset = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_6MA,
+	.pull = GPIOMUX_PULL_NONE,
+	.dir = GPIOMUX_OUT_LOW,
+};
+
+static struct msm_gpiomux_config mdm9625_mi2s_configs[] __initdata = {
+	{
+		.gpio	= 12,		/* mi2s ws */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &mi2s_suspend_cfg,
+			[GPIOMUX_ACTIVE] = &mi2s_active_cfg,
+		},
+	},
+	{
+		.gpio	= 15,		/* mi2s sclk */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &mi2s_suspend_cfg,
+			[GPIOMUX_ACTIVE] = &mi2s_active_cfg,
+		},
+	},
+	{
+		.gpio	= 14,		/* mi2s dout */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &mi2s_suspend_cfg,
+			[GPIOMUX_ACTIVE] = &mi2s_active_cfg,
+		},
+	},
+	{
+		.gpio	= 13,		/* mi2s din */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &mi2s_suspend_cfg,
+			[GPIOMUX_ACTIVE] = &mi2s_active_cfg,
+		},
+	},
+	{
+		.gpio	= 71,		/* mi2s mclk */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &mi2s_suspend_cfg,
+			[GPIOMUX_ACTIVE] = &mi2s_active_cfg,
+		},
+	},
+};
+
+static struct msm_gpiomux_config mdm9625_cdc_reset_config[] __initdata = {
+	{
+		.gpio   = 22,           /* SYS_RST_N */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &codec_reset,
+		},
+	}
 };
 
 static struct gpiomux_setting sdc3_clk_active_cfg = {
@@ -194,6 +259,23 @@
 	},
 };
 
+static struct gpiomux_setting sdc2_card_det_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_DOWN,
+	.dir = GPIOMUX_IN,
+};
+
+struct msm_gpiomux_config sdc2_card_det_config[] __initdata = {
+	{
+		.gpio = 66,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &sdc2_card_det_cfg,
+			[GPIOMUX_SUSPENDED] = &sdc2_card_det_cfg,
+		},
+	},
+};
+
 void __init msm9625_init_gpiomux(void)
 {
 	int rc;
@@ -208,4 +290,10 @@
 	msm_gpiomux_install(sdc3_configs, ARRAY_SIZE(sdc3_configs));
 	msm_gpiomux_install(wlan_ath6kl_configs,
 		ARRAY_SIZE(wlan_ath6kl_configs));
+	msm_gpiomux_install(mdm9625_mi2s_configs,
+			ARRAY_SIZE(mdm9625_mi2s_configs));
+	msm_gpiomux_install(mdm9625_cdc_reset_config,
+			ARRAY_SIZE(mdm9625_cdc_reset_config));
+	msm_gpiomux_install(sdc2_card_det_config,
+		ARRAY_SIZE(sdc2_card_det_config));
 }
diff --git a/arch/arm/mach-msm/board-9625.c b/arch/arm/mach-msm/board-9625.c
index 3e5fc9d..42f3f41 100644
--- a/arch/arm/mach-msm/board-9625.c
+++ b/arch/arm/mach-msm/board-9625.c
@@ -101,8 +101,6 @@
 static struct of_dev_auxdata msm9625_auxdata_lookup[] __initdata = {
 	OF_DEV_AUXDATA("qcom,msm-lsuart-v14", 0xF991F000, \
 			"msm_serial_hsl.0", NULL),
-	OF_DEV_AUXDATA("qcom,spi-qup-v2", 0xF9928000, \
-			"spi_qsd.1", NULL),
 	OF_DEV_AUXDATA("qcom,spmi-pmic-arb", 0xFC4C0000, \
 			"spmi-pmic-arb.0", NULL),
 	OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF98A4000, \
@@ -111,6 +109,8 @@
 			"msm_sdcc.3", NULL),
 	OF_DEV_AUXDATA("qcom,msm-tsens", 0xFC4A8000, \
 			"msm-tsens", NULL),
+	OF_DEV_AUXDATA("qcom,usb-bam-msm", 0xF9A44000, \
+			"usb_bam", NULL),
 	{}
 };
 
diff --git a/arch/arm/mach-msm/clock-7x30.c b/arch/arm/mach-msm/clock-7x30.c
index e1390db..3f59035 100644
--- a/arch/arm/mach-msm/clock-7x30.c
+++ b/arch/arm/mach-msm/clock-7x30.c
@@ -279,8 +279,8 @@
 	.en_mask = BIT(1),
 	.status_reg = PLL1_STATUS_BASE_REG,
 	.status_mask = BIT(16),
-	.parent = &tcxo_clk.c,
 	.c = {
+		.parent = &tcxo_clk.c,
 		.dbg_name = "pll1_clk",
 		.rate = 768000000,
 		.ops = &clk_ops_pll_vote,
@@ -293,8 +293,8 @@
 	.en_mask = BIT(2),
 	.status_reg = PLL2_STATUS_BASE_REG,
 	.status_mask = BIT(16),
-	.parent = &tcxo_clk.c,
 	.c = {
+		.parent = &tcxo_clk.c,
 		.dbg_name = "pll2_clk",
 		.rate = 806400000, /* TODO: Support scaling */
 		.ops = &clk_ops_pll_vote,
@@ -307,8 +307,8 @@
 	.en_mask = BIT(3),
 	.status_reg = PLL3_STATUS_BASE_REG,
 	.status_mask = BIT(16),
-	.parent = &lpxo_clk.c,
 	.c = {
+		.parent = &lpxo_clk.c,
 		.dbg_name = "pll3_clk",
 		.rate = 737280000,
 		.ops = &clk_ops_pll_vote,
@@ -321,8 +321,8 @@
 	.en_mask = BIT(4),
 	.status_reg = PLL4_STATUS_BASE_REG,
 	.status_mask = BIT(16),
-	.parent = &lpxo_clk.c,
 	.c = {
+		.parent = &lpxo_clk.c,
 		.dbg_name = "pll4_clk",
 		.rate = 891000000,
 		.ops = &clk_ops_pll_vote,
@@ -363,8 +363,8 @@
 		.halt_check = HALT_VOTED,
 		.halt_bit = 2,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "axi_li_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(axi_li_apps_clk.c),
@@ -379,8 +379,8 @@
 		.halt_check = HALT_VOTED,
 		.halt_bit = 14,
 	},
-	.parent = &axi_li_apps_clk.c,
 	.c = {
+		.parent = &axi_li_apps_clk.c,
 		.dbg_name = "axi_li_adsp_a_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(axi_li_adsp_a_clk.c),
@@ -395,8 +395,8 @@
 		.halt_check = HALT_VOTED,
 		.halt_bit = 19,
 	},
-	.parent = &axi_li_apps_clk.c,
 	.c = {
+		.parent = &axi_li_apps_clk.c,
 		.dbg_name = "axi_li_jpeg_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(axi_li_jpeg_clk.c),
@@ -411,8 +411,8 @@
 		.halt_check = HALT_VOTED,
 		.halt_bit = 23,
 	},
-	.parent = &axi_li_apps_clk.c,
 	.c = {
+		.parent = &axi_li_apps_clk.c,
 		.dbg_name = "axi_li_vfe_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(axi_li_vfe_clk.c),
@@ -427,8 +427,8 @@
 		.halt_check = HALT_VOTED,
 		.halt_bit = 29,
 	},
-	.parent = &axi_li_apps_clk.c,
 	.c = {
+		.parent = &axi_li_apps_clk.c,
 		.dbg_name = "axi_mdp_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(axi_mdp_clk.c),
@@ -443,8 +443,8 @@
 		.halt_check = HALT_VOTED,
 		.halt_bit = 3,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "axi_li_vg_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(axi_li_vg_clk.c),
@@ -459,8 +459,8 @@
 		.halt_check = HALT_VOTED,
 		.halt_bit = 21,
 	},
-	.parent = &axi_li_vg_clk.c,
 	.c = {
+		.parent = &axi_li_vg_clk.c,
 		.dbg_name = "axi_grp_2d_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(axi_grp_2d_clk.c),
@@ -475,8 +475,8 @@
 		.halt_check = HALT_VOTED,
 		.halt_bit = 22,
 	},
-	.parent = &axi_li_vg_clk.c,
 	.c = {
+		.parent = &axi_li_vg_clk.c,
 		.dbg_name = "axi_li_grp_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(axi_li_grp_clk.c),
@@ -491,8 +491,8 @@
 		.halt_check = HALT_VOTED,
 		.halt_bit = 20,
 	},
-	.parent = &axi_li_vg_clk.c,
 	.c = {
+		.parent = &axi_li_vg_clk.c,
 		.dbg_name = "axi_mfc_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(axi_mfc_clk.c),
@@ -508,8 +508,8 @@
 		.halt_bit = 22,
 		.reset_mask = P_AXI_ROTATOR_CLK,
 	},
-	.parent = &axi_li_vg_clk.c,
 	.c = {
+		.parent = &axi_li_vg_clk.c,
 		.dbg_name = "axi_rotator_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(axi_rotator_clk.c),
@@ -524,8 +524,8 @@
 		.halt_check = HALT_VOTED,
 		.halt_bit = 21,
 	},
-	.parent = &axi_li_vg_clk.c,
 	.c = {
+		.parent = &axi_li_vg_clk.c,
 		.dbg_name = "axi_vpe_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(axi_vpe_clk.c),
@@ -542,8 +542,8 @@
 		.halt_bit = 5,
 		.reset_mask = P_ADM_CLK,
 	},
-	.parent = &axi_li_apps_clk.c,
 	.c = {
+		.parent = &axi_li_apps_clk.c,
 		.dbg_name = "adm_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(adm_clk.c),
@@ -558,8 +558,8 @@
 		.halt_check = HALT_VOTED,
 		.halt_bit = 15,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "adm_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(adm_p_clk.c),
@@ -575,8 +575,8 @@
 		.halt_bit = 6,
 		.reset_mask = P_CE_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "ce_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(ce_clk.c),
@@ -592,8 +592,8 @@
 		.halt_bit = 9,
 		.reset_mask = P_CAMIF_PAD_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "camif_pad_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camif_pad_p_clk.c),
@@ -609,8 +609,8 @@
 		.halt_bit = 30,
 		.reset_mask = P_CSI0_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "csi0_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(csi0_p_clk.c),
@@ -626,8 +626,8 @@
 		.halt_bit = 3,
 		.reset_mask = P_EMDH_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "emdh_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(emdh_p_clk.c),
@@ -643,8 +643,8 @@
 		.halt_bit = 24,
 		.reset_mask = P_GRP_2D_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "grp_2d_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(grp_2d_p_clk.c),
@@ -660,8 +660,8 @@
 		.halt_bit = 17,
 		.reset_mask = P_GRP_3D_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "grp_3d_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(grp_3d_p_clk.c),
@@ -677,8 +677,8 @@
 		.halt_bit = 24,
 		.reset_mask = P_JPEG_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "jpeg_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(jpeg_p_clk.c),
@@ -694,8 +694,8 @@
 		.halt_bit = 7,
 		.reset_mask = P_LPA_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "lpa_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(lpa_p_clk.c),
@@ -711,8 +711,8 @@
 		.halt_bit = 6,
 		.reset_mask = P_MDP_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "mdp_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdp_p_clk.c),
@@ -728,8 +728,8 @@
 		.halt_bit = 26,
 		.reset_mask = P_MFC_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "mfc_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mfc_p_clk.c),
@@ -745,8 +745,8 @@
 		.halt_bit = 4,
 		.reset_mask = P_PMDH_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "pmdh_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(pmdh_p_clk.c),
@@ -762,8 +762,8 @@
 		.halt_bit = 23,
 		.reset_mask = P_ROTATOR_IMEM_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "rotator_imem_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(rotator_imem_clk.c),
@@ -779,8 +779,8 @@
 		.halt_bit = 25,
 		.reset_mask = P_ROTATOR_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "rotator_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(rotator_p_clk.c),
@@ -796,8 +796,8 @@
 		.halt_bit = 7,
 		.reset_mask = P_SDC1_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "sdc1_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(sdc1_p_clk.c),
@@ -813,8 +813,8 @@
 		.halt_bit = 8,
 		.reset_mask = P_SDC2_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "sdc2_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(sdc2_p_clk.c),
@@ -830,8 +830,8 @@
 		.halt_bit = 27,
 		.reset_mask = P_SDC3_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "sdc3_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(sdc3_p_clk.c),
@@ -847,8 +847,8 @@
 		.halt_bit = 28,
 		.reset_mask = P_SDC4_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "sdc4_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(sdc4_p_clk.c),
@@ -864,8 +864,8 @@
 		.halt_bit = 10,
 		.reset_mask = P_SPI_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "spi_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(spi_p_clk.c),
@@ -881,8 +881,8 @@
 		.halt_bit = 18,
 		.reset_mask = P_TSIF_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "tsif_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(tsif_p_clk.c),
@@ -897,8 +897,8 @@
 		.halt_check = HALT_VOTED,
 		.halt_bit = 17,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "uart1dm_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(uart1dm_p_clk.c),
@@ -913,8 +913,8 @@
 		.halt_check = HALT_VOTED,
 		.halt_bit = 26,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "uart2dm_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(uart2dm_p_clk.c),
@@ -930,8 +930,8 @@
 		.halt_bit = 8,
 		.reset_mask = P_USB_HS2_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "usb_hs2_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_hs2_p_clk.c),
@@ -947,8 +947,8 @@
 		.halt_bit = 9,
 		.reset_mask = P_USB_HS3_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "usb_hs3_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_hs3_p_clk.c),
@@ -964,8 +964,8 @@
 		.halt_bit = 25,
 		.reset_mask = P_USB_HS_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "usb_hs_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_hs_p_clk.c),
@@ -981,8 +981,8 @@
 		.halt_bit = 27,
 		.reset_mask = P_VFE_P_CLK,
 	},
-	.parent = &glbl_root_clk.c,
 	.c = {
+		.parent = &glbl_root_clk.c,
 		.dbg_name = "vfe_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(vfe_p_clk.c),
@@ -1320,8 +1320,8 @@
 		.halt_bit = 18,
 		.reset_mask = P_GRP_3D_CLK,
 	},
-	.parent = &grp_3d_src_clk.c,
 	.c = {
+		.parent = &grp_3d_src_clk.c,
 		.dbg_name = "grp_3d_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(grp_3d_clk.c),
@@ -1336,8 +1336,8 @@
 		.halt_bit = 19,
 		.reset_mask = P_IMEM_CLK,
 	},
-	.parent = &grp_3d_src_clk.c,
 	.c = {
+		.parent = &grp_3d_src_clk.c,
 		.dbg_name = "imem_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(imem_clk.c),
@@ -1542,8 +1542,8 @@
 		.halt_bit = 29,
 		.reset_mask = P_MDP_LCDC_PAD_PCLK_CLK,
 	},
-	.parent = &mdp_lcdc_pclk_clk.c,
 	.c = {
+		.parent = &mdp_lcdc_pclk_clk.c,
 		.dbg_name = "mdp_lcdc_pad_pclk_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdp_lcdc_pad_pclk_clk.c),
@@ -1616,8 +1616,8 @@
 		.halt_bit = 13,
 		.reset_mask = P_MI2S_CODEC_RX_S_CLK,
 	},
-	.parent = &mi2s_codec_rx_m_clk.c,
 	.c = {
+		.parent = &mi2s_codec_rx_m_clk.c,
 		.dbg_name = "mi2s_codec_rx_s_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mi2s_codec_rx_s_clk.c),
@@ -1656,8 +1656,8 @@
 		.halt_bit = 11,
 		.reset_mask = P_MI2S_CODEC_TX_S_CLK,
 	},
-	.parent = &mi2s_codec_tx_m_clk.c,
 	.c = {
+		.parent = &mi2s_codec_tx_m_clk.c,
 		.dbg_name = "mi2s_codec_tx_s_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mi2s_codec_tx_s_clk.c),
@@ -1702,8 +1702,8 @@
 		.halt_bit = 3,
 		.reset_mask = P_MI2S_S_CLK,
 	},
-	.parent = &mi2s_m_clk.c,
 	.c = {
+		.parent = &mi2s_m_clk.c,
 		.dbg_name = "mi2s_s_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mi2s_s_clk.c),
@@ -1763,8 +1763,8 @@
 		.halt_bit = 17,
 		.reset_mask = P_SDAC_M_CLK,
 	},
-	.parent = &sdac_clk.c,
 	.c = {
+		.parent = &sdac_clk.c,
 		.dbg_name = "sdac_m_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(sdac_m_clk.c),
@@ -1807,8 +1807,8 @@
 		.halt_bit = 7,
 		.reset_mask = P_HDMI_CLK,
 	},
-	.parent = &tv_clk.c,
 	.c = {
+		.parent = &tv_clk.c,
 		.dbg_name = "hdmi_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(hdmi_clk.c),
@@ -1823,8 +1823,8 @@
 		.halt_bit = 27,
 		.reset_mask = P_TV_DAC_CLK,
 	},
-	.parent = &tv_clk.c,
 	.c = {
+		.parent = &tv_clk.c,
 		.dbg_name = "tv_dac_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(tv_dac_clk.c),
@@ -1839,8 +1839,8 @@
 		.halt_bit = 10,
 		.reset_mask = P_TV_ENC_CLK,
 	},
-	.parent = &tv_clk.c,
 	.c = {
+		.parent = &tv_clk.c,
 		.dbg_name = "tv_enc_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(tv_enc_clk.c),
@@ -1856,8 +1856,8 @@
 		.halt_bit = 11,
 		.reset_mask = P_TSIF_REF_CLK,
 	},
-	.parent = &tv_clk.c,
 	.c = {
+		.parent = &tv_clk.c,
 		.dbg_name = "tsif_ref_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(tsif_ref_clk.c),
@@ -1915,8 +1915,8 @@
 		.halt_bit = 27,
 		.reset_mask = P_USB_HS_CORE_CLK,
 	},
-	.parent = &usb_hs_src_clk.c,
 	.c = {
+		.parent = &usb_hs_src_clk.c,
 		.dbg_name = "usb_hs_core_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_hs_core_clk.c),
@@ -1931,8 +1931,8 @@
 		.halt_bit = 3,
 		.reset_mask = P_USB_HS2_CLK,
 	},
-	.parent = &usb_hs_src_clk.c,
 	.c = {
+		.parent = &usb_hs_src_clk.c,
 		.dbg_name = "usb_hs2_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_hs2_clk.c),
@@ -1947,8 +1947,8 @@
 		.halt_bit = 28,
 		.reset_mask = P_USB_HS2_CORE_CLK,
 	},
-	.parent = &usb_hs_src_clk.c,
 	.c = {
+		.parent = &usb_hs_src_clk.c,
 		.dbg_name = "usb_hs2_core_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_hs2_core_clk.c),
@@ -1963,8 +1963,8 @@
 		.halt_bit = 2,
 		.reset_mask = P_USB_HS3_CLK,
 	},
-	.parent = &usb_hs_src_clk.c,
 	.c = {
+		.parent = &usb_hs_src_clk.c,
 		.dbg_name = "usb_hs3_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_hs3_clk.c),
@@ -1979,8 +1979,8 @@
 		.halt_bit = 29,
 		.reset_mask = P_USB_HS3_CORE_CLK,
 	},
-	.parent = &usb_hs_src_clk.c,
 	.c = {
+		.parent = &usb_hs_src_clk.c,
 		.dbg_name = "usb_hs3_core_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_hs3_core_clk.c),
@@ -2062,8 +2062,8 @@
 		.halt_bit = 9,
 		.reset_mask = P_VFE_MDC_CLK,
 	},
-	.parent = &vfe_clk.c,
 	.c = {
+		.parent = &vfe_clk.c,
 		.dbg_name = "vfe_mdc_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(vfe_mdc_clk.c),
@@ -2078,8 +2078,8 @@
 		.halt_bit = 13,
 		.reset_mask = P_VFE_CAMIF_CLK,
 	},
-	.parent = &vfe_clk.c,
 	.c = {
+		.parent = &vfe_clk.c,
 		.dbg_name = "vfe_camif_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(vfe_camif_clk.c),
@@ -2094,8 +2094,8 @@
 		.halt_bit = 16,
 		.reset_mask = P_CSI0_VFE_CLK,
 	},
-	.parent = &vfe_clk.c,
 	.c = {
+		.parent = &vfe_clk.c,
 		.dbg_name = "csi0_vfe_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(csi0_vfe_clk.c),
@@ -2219,8 +2219,8 @@
 		.halt_bit = 11,
 		.reset_mask = P_MFC_DIV2_CLK,
 	},
-	.parent = &mfc_clk.c,
 	.c = {
+		.parent = &mfc_clk.c,
 		.dbg_name = "mfc_div2_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mfc_div2_clk.c),
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 5e5e274..a4d7e61 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -525,8 +525,8 @@
 
 static struct pll_clk pll2_clk = {
 	.mode_reg = MM_PLL1_MODE_REG,
-	.parent = &pxo_clk.c,
 	.c = {
+		.parent = &pxo_clk.c,
 		.dbg_name = "pll2_clk",
 		.rate = 800000000,
 		.ops = &clk_ops_local_pll,
@@ -536,8 +536,8 @@
 
 static struct pll_clk pll3_clk = {
 	.mode_reg = BB_MMCC_PLL2_MODE_REG,
-	.parent = &pxo_clk.c,
 	.c = {
+		.parent = &pxo_clk.c,
 		.dbg_name = "pll3_clk",
 		.rate = 1200000000,
 		.ops = &clk_ops_local_pll,
@@ -555,8 +555,8 @@
 	.en_mask = BIT(4),
 	.status_reg = LCC_PLL0_STATUS_REG,
 	.status_mask = BIT(16),
-	.parent = &pxo_clk.c,
 	.c = {
+		.parent = &pxo_clk.c,
 		.dbg_name = "pll4_clk",
 		.rate = 393216000,
 		.ops = &clk_ops_pll_vote,
@@ -569,8 +569,8 @@
 	.en_mask = BIT(8),
 	.status_reg = BB_PLL8_STATUS_REG,
 	.status_mask = BIT(16),
-	.parent = &pxo_clk.c,
 	.c = {
+		.parent = &pxo_clk.c,
 		.dbg_name = "pll8_clk",
 		.rate = 384000000,
 		.ops = &clk_ops_pll_vote,
@@ -583,8 +583,8 @@
 	.en_mask = BIT(14),
 	.status_reg = BB_PLL14_STATUS_REG,
 	.status_mask = BIT(16),
-	.parent = &pxo_clk.c,
 	.c = {
+		.parent = &pxo_clk.c,
 		.dbg_name = "pll14_clk",
 		.rate = 480000000,
 		.ops = &clk_ops_pll_vote,
@@ -594,8 +594,8 @@
 
 static struct pll_clk pll15_clk = {
 	.mode_reg = MM_PLL3_MODE_REG,
-	.parent = &pxo_clk.c,
 	.c = {
+		.parent = &pxo_clk.c,
 		.dbg_name = "pll15_clk",
 		.rate = 975000000,
 		.ops = &clk_ops_local_pll,
@@ -1701,8 +1701,8 @@
 		.halt_reg = CLK_HALT_CFPB_STATEA_REG,
 		.halt_bit = 24,
 	},
-	.parent = &usb_hsic_xcvr_fs_clk.c,
 	.c = {
+		.parent = &usb_hsic_xcvr_fs_clk.c,
 		.dbg_name = "usb_hsic_system_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_hsic_system_clk.c),
@@ -1743,8 +1743,8 @@
 		.halt_reg = CLK_HALT_CFPB_STATEA_REG,
 		.halt_bit = 19,
 	},
-	.parent = &usb_hsic_hsic_src_clk.c,
 	.c = {
+		.parent = &usb_hsic_hsic_src_clk.c,
 		.dbg_name = "usb_hsic_hsic_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_hsic_hsic_clk.c),
@@ -1823,8 +1823,8 @@
 		.halt_reg = CLK_HALT_CFPB_STATEA_REG,
 		.halt_bit = 15,
 	},
-	.parent = &usb_fs1_src_clk.c,
 	.c = {
+		.parent = &usb_fs1_src_clk.c,
 		.dbg_name = "usb_fs1_xcvr_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_fs1_xcvr_clk.c),
@@ -1840,8 +1840,8 @@
 		.halt_reg = CLK_HALT_CFPB_STATEA_REG,
 		.halt_bit = 16,
 	},
-	.parent = &usb_fs1_src_clk.c,
 	.c = {
+		.parent = &usb_fs1_src_clk.c,
 		.dbg_name = "usb_fs1_sys_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_fs1_sys_clk.c),
@@ -1858,8 +1858,8 @@
 		.halt_reg = CLK_HALT_CFPB_STATEA_REG,
 		.halt_bit = 12,
 	},
-	.parent = &usb_fs2_src_clk.c,
 	.c = {
+		.parent = &usb_fs2_src_clk.c,
 		.dbg_name = "usb_fs2_xcvr_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_fs2_xcvr_clk.c),
@@ -1875,8 +1875,8 @@
 		.halt_reg = CLK_HALT_CFPB_STATEA_REG,
 		.halt_bit = 13,
 	},
-	.parent = &usb_fs2_src_clk.c,
 	.c = {
+		.parent = &usb_fs2_src_clk.c,
 		.dbg_name = "usb_fs2_sys_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_fs2_sys_clk.c),
@@ -1962,8 +1962,8 @@
 		.halt_reg = CLK_HALT_MSS_SMPSS_MISC_STATE_REG,
 		.halt_bit = 5,
 	},
-	.parent = &ce3_src_clk.c,
 	.c = {
+		.parent = &ce3_src_clk.c,
 		.dbg_name = "ce3_core_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(ce3_core_clk.c),
@@ -1979,8 +1979,8 @@
 		.halt_reg = CLK_HALT_AFAB_SFAB_STATEB_REG,
 		.halt_bit = 16,
 	},
-	.parent = &ce3_src_clk.c,
 	.c = {
+		.parent = &ce3_src_clk.c,
 		.dbg_name = "ce3_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(ce3_p_clk.c),
@@ -2027,8 +2027,8 @@
 		.halt_reg = CLK_HALT_MSS_SMPSS_MISC_STATE_REG,
 		.halt_bit = 26,
 	},
-	.parent = &sata_src_clk.c,
 	.c = {
+		.parent = &sata_src_clk.c,
 		.dbg_name = "sata_rxoob_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(sata_rxoob_clk.c),
@@ -2042,8 +2042,8 @@
 		.halt_reg = CLK_HALT_MSS_SMPSS_MISC_STATE_REG,
 		.halt_bit = 25,
 	},
-	.parent = &sata_src_clk.c,
 	.c = {
+		.parent = &sata_src_clk.c,
 		.dbg_name = "sata_pmalive_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(sata_pmalive_clk.c),
@@ -2057,8 +2057,8 @@
 		.halt_reg = CLK_HALT_MSS_SMPSS_MISC_STATE_REG,
 		.halt_bit = 24,
 	},
-	.parent = &pxo_clk.c,
 	.c = {
+		.parent = &pxo_clk.c,
 		.dbg_name = "sata_phy_ref_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(sata_phy_ref_clk.c),
@@ -2750,8 +2750,8 @@
 		.halt_reg = DBG_BUS_VEC_B_REG,
 		.halt_bit = 13,
 	},
-	.parent = &csi0_src_clk.c,
 	.c = {
+		.parent = &csi0_src_clk.c,
 		.dbg_name = "csi0_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(csi0_clk.c),
@@ -2767,8 +2767,8 @@
 		.halt_reg = DBG_BUS_VEC_I_REG,
 		.halt_bit = 9,
 	},
-	.parent = &csi0_src_clk.c,
 	.c = {
+		.parent = &csi0_src_clk.c,
 		.dbg_name = "csi0_phy_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(csi0_phy_clk.c),
@@ -2806,8 +2806,8 @@
 		.halt_reg = DBG_BUS_VEC_B_REG,
 		.halt_bit = 14,
 	},
-	.parent = &csi1_src_clk.c,
 	.c = {
+		.parent = &csi1_src_clk.c,
 		.dbg_name = "csi1_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(csi1_clk.c),
@@ -2823,8 +2823,8 @@
 		.halt_reg = DBG_BUS_VEC_I_REG,
 		.halt_bit = 10,
 	},
-	.parent = &csi1_src_clk.c,
 	.c = {
+		.parent = &csi1_src_clk.c,
 		.dbg_name = "csi1_phy_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(csi1_phy_clk.c),
@@ -2862,8 +2862,8 @@
 		.halt_reg = DBG_BUS_VEC_B_REG,
 		.halt_bit = 29,
 	},
-	.parent = &csi2_src_clk.c,
 	.c = {
+		.parent = &csi2_src_clk.c,
 		.dbg_name = "csi2_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(csi2_clk.c),
@@ -2879,8 +2879,8 @@
 		.halt_reg = DBG_BUS_VEC_I_REG,
 		.halt_bit = 29,
 	},
-	.parent = &csi2_src_clk.c,
 	.c = {
+		.parent = &csi2_src_clk.c,
 		.dbg_name = "csi2_phy_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(csi2_phy_clk.c),
@@ -2977,6 +2977,7 @@
 	mb();
 	udelay(1);
 	rdi->cur_rate = rate;
+	c->parent = mux_map[rate];
 	spin_unlock(&local_clock_reg_lock);
 
 	if (rdi->enabled)
@@ -3038,11 +3039,6 @@
 	return branch_reset(&to_pix_rdi_clk(c)->b, action);
 }
 
-static struct clk *pix_rdi_clk_get_parent(struct clk *c)
-{
-	return pix_rdi_mux_map[to_pix_rdi_clk(c)->cur_rate];
-}
-
 static int pix_rdi_clk_list_rate(struct clk *c, unsigned n)
 {
 	if (pix_rdi_mux_map[n])
@@ -3064,6 +3060,7 @@
 	rdi->cur_rate = reg & rdi->s_mask ? 1 : 0;
 	reg = readl_relaxed(rdi->s2_reg);
 	rdi->cur_rate = reg & rdi->s2_mask ? 2 : rdi->cur_rate;
+	c->parent = pix_rdi_mux_map[rdi->cur_rate];
 
 	return HANDOFF_ENABLED_CLK;
 }
@@ -3078,7 +3075,6 @@
 	.get_rate = pix_rdi_clk_get_rate,
 	.list_rate = pix_rdi_clk_list_rate,
 	.reset = pix_rdi_clk_reset,
-	.get_parent = pix_rdi_clk_get_parent,
 };
 
 static struct pix_rdi_clk csi_pix_clk = {
@@ -3220,8 +3216,8 @@
 		.halt_reg = DBG_BUS_VEC_I_REG,
 		.halt_bit = 17,
 	},
-	.parent = &csiphy_timer_src_clk.c,
 	.c = {
+		.parent = &csiphy_timer_src_clk.c,
 		.dbg_name = "csi0phy_timer_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(csi0phy_timer_clk.c),
@@ -3235,8 +3231,8 @@
 		.halt_reg = DBG_BUS_VEC_I_REG,
 		.halt_bit = 18,
 	},
-	.parent = &csiphy_timer_src_clk.c,
 	.c = {
+		.parent = &csiphy_timer_src_clk.c,
 		.dbg_name = "csi1phy_timer_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(csi1phy_timer_clk.c),
@@ -3250,8 +3246,8 @@
 		.halt_reg = DBG_BUS_VEC_I_REG,
 		.halt_bit = 30,
 	},
-	.parent = &csiphy_timer_src_clk.c,
 	.c = {
+		.parent = &csiphy_timer_src_clk.c,
 		.dbg_name = "csi2phy_timer_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(csi2phy_timer_clk.c),
@@ -3719,8 +3715,8 @@
 		.halt_reg = DBG_BUS_VEC_J_REG,
 		.halt_bit = 25,
 	},
-	.parent = &vcap_clk.c,
 	.c = {
+		.parent = &vcap_clk.c,
 		.dbg_name = "vcap_npl_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(vcap_npl_clk.c),
@@ -3937,8 +3933,8 @@
 		.retain_reg = MDP_LUT_CC_REG,
 		.retain_mask = BIT(31),
 	},
-	.parent = &mdp_clk.c,
 	.c = {
+		.parent = &mdp_clk.c,
 		.dbg_name = "lut_mdp_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(lut_mdp_clk.c),
@@ -4058,18 +4054,13 @@
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 }
 
-static struct clk *hdmi_pll_clk_get_parent(struct clk *c)
-{
-	return &pxo_clk.c;
-}
-
 static struct clk_ops clk_ops_hdmi_pll = {
 	.enable = hdmi_pll_clk_enable,
 	.disable = hdmi_pll_clk_disable,
-	.get_parent = hdmi_pll_clk_get_parent,
 };
 
 static struct clk hdmi_pll_clk = {
+	.parent = &pxo_clk.c,
 	.dbg_name = "hdmi_pll_clk",
 	.ops = &clk_ops_hdmi_pll,
 	.vdd_class = &vdd_sr2_hdmi_pll,
@@ -4176,8 +4167,8 @@
 		.halt_reg = DBG_BUS_VEC_D_REG,
 		.halt_bit = 9,
 	},
-	.parent = &tv_src_clk.c,
 	.c = {
+		.parent = &tv_src_clk.c,
 		.dbg_name = "tv_enc_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(tv_enc_clk.c),
@@ -4191,8 +4182,8 @@
 		.halt_reg = DBG_BUS_VEC_D_REG,
 		.halt_bit = 10,
 	},
-	.parent = &tv_src_clk.c,
 	.c = {
+		.parent = &tv_src_clk.c,
 		.dbg_name = "tv_dac_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(tv_dac_clk.c),
@@ -4210,8 +4201,8 @@
 		.retain_reg = TV_CC2_REG,
 		.retain_mask = BIT(10),
 	},
-	.parent = &tv_src_clk.c,
 	.c = {
+		.parent = &tv_src_clk.c,
 		.dbg_name = "mdp_tv_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdp_tv_clk.c),
@@ -4227,8 +4218,8 @@
 		.halt_reg = DBG_BUS_VEC_D_REG,
 		.halt_bit = 11,
 	},
-	.parent = &tv_src_clk.c,
 	.c = {
+		.parent = &tv_src_clk.c,
 		.dbg_name = "hdmi_tv_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(hdmi_tv_clk.c),
@@ -4242,8 +4233,8 @@
 		.halt_reg = DBG_BUS_VEC_J_REG,
 		.halt_bit = 27,
 	},
-	.parent = &tv_src_clk.c,
 	.c = {
+		.parent = &tv_src_clk.c,
 		.dbg_name = "rgb_tv_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(rgb_tv_clk.c),
@@ -4257,8 +4248,8 @@
 		.halt_reg = DBG_BUS_VEC_J_REG,
 		.halt_bit = 26,
 	},
-	.parent = &tv_src_clk.c,
 	.c = {
+		.parent = &tv_src_clk.c,
 		.dbg_name = "npl_tv_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(npl_tv_clk.c),
@@ -4480,8 +4471,8 @@
 		.halt_reg = DBG_BUS_VEC_B_REG,
 		.halt_bit = 8,
 	},
-	.parent = &vfe_clk.c,
 	.c = {
+		.parent = &vfe_clk.c,
 		.dbg_name = "csi_vfe_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(csi_vfe_clk.c),
@@ -4753,8 +4744,8 @@
 		.halt_check = ENABLE,
 		.halt_bit = 1,
 	},
-	.parent = &audio_slimbus_clk.c,
 	.c = {
+		.parent = &audio_slimbus_clk.c,
 		.dbg_name = "sps_slimbus_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(sps_slimbus_clk.c),
@@ -4768,8 +4759,8 @@
 		.halt_reg = CLK_HALT_DFAB_STATE_REG,
 		.halt_bit = 28,
 	},
-	.parent = &sps_slimbus_clk.c,
 	.c = {
+		.parent = &sps_slimbus_clk.c,
 		.dbg_name = "slimbus_xo_src_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(slimbus_xo_src_clk.c),
@@ -6328,12 +6319,12 @@
 	 */
 	/*
 	 * Initialize MM AHB registers: Enable the FPB clock and disable HW
-	 * gating on 8627 and 8960 for all clocks. Also set VFE_AHB's
+	 * gating on 8627, 8960 and 8930ab for all clocks. Also set VFE_AHB's
 	 * FORCE_CORE_ON bit to prevent its memory from being collapsed when
 	 * the clock is halted. The sleep and wake-up delays are set to safe
 	 * values.
 	 */
-	if (cpu_is_msm8627() || cpu_is_msm8960ab()) {
+	if (cpu_is_msm8627() || cpu_is_msm8960ab() || cpu_is_msm8930ab()) {
 		rmwreg(0x00000003, AHB_EN_REG,  0x6C000103);
 		writel_relaxed(0x000007F9, AHB_EN2_REG);
 	} else {
@@ -6353,7 +6344,7 @@
 	 * delays to safe values. */
 	if (cpu_is_msm8960ab() || (cpu_is_msm8960() &&
 			SOCINFO_VERSION_MAJOR(socinfo_get_version()) < 3) ||
-			cpu_is_msm8627()) {
+			cpu_is_msm8627() || cpu_is_msm8930ab()) {
 		rmwreg(0x000007F9, MAXI_EN_REG,  0x0803FFFF);
 		rmwreg(0x3027FCFF, MAXI_EN2_REG, 0x3A3FFFFF);
 	} else {
@@ -6366,12 +6357,13 @@
 
 	if (cpu_is_apq8064() || cpu_is_apq8064ab())
 		rmwreg(0x019FECFF, MAXI_EN5_REG, 0x01FFEFFF);
-	if (cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8627())
+	if (cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8627() ||
+	    cpu_is_msm8930ab())
 		rmwreg(0x000004FF, MAXI_EN5_REG, 0x00000FFF);
 	if (cpu_is_msm8960ab())
 		rmwreg(0x009FE000, MAXI_EN5_REG, 0x01FFE000);
 
-	if (cpu_is_msm8627())
+	if (cpu_is_msm8627() || cpu_is_msm8930ab())
 		rmwreg(0x000003C7, SAXI_EN_REG,  0x00003FFF);
 	else if (cpu_is_msm8960ab())
 		rmwreg(0x000001C6, SAXI_EN_REG,  0x00001DF6);
@@ -6412,7 +6404,7 @@
 		rmwreg(0x00000001, DSI2_PIXEL_CC2_REG, 0x00000001);
 
 	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm8930aa() ||
-	    cpu_is_msm8627())
+	    cpu_is_msm8627() || cpu_is_msm8930ab())
 		rmwreg(0x80FF0000, TV_CC_REG,        0xE1FFC010);
 	if (cpu_is_msm8960ab())
 		rmwreg(0x00000000, TV_CC_REG,        0x00004010);
@@ -6674,7 +6666,7 @@
 	}
 	clk_set_rate(&usb_fs1_src_clk.c, 60000000);
 	if (cpu_is_msm8960ab() || cpu_is_msm8960() || cpu_is_msm8930() ||
-		cpu_is_msm8930aa() || cpu_is_msm8627())
+		cpu_is_msm8930aa() || cpu_is_msm8627() || cpu_is_msm8930ab())
 		clk_set_rate(&usb_fs2_src_clk.c, 60000000);
 	clk_set_rate(&usb_hsic_xcvr_fs_clk.c, 60000000);
 	clk_set_rate(&usb_hsic_hsic_src_clk.c, 480000000);
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 777e0bf..c0a553f 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -694,9 +694,9 @@
 	.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE_REG,
 	.status_reg = (void __iomem *)GPLL0_STATUS_REG,
 	.status_mask = BIT(17),
-	.parent = &cxo_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.rate = 600000000,
 		.dbg_name = "gpll0_clk_src",
 		.ops = &clk_ops_pll_vote,
@@ -709,9 +709,9 @@
 	.en_mask = BIT(1),
 	.status_reg = (void __iomem *)GPLL1_STATUS_REG,
 	.status_mask = BIT(17),
-	.parent = &cxo_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.rate = 480000000,
 		.dbg_name = "gpll1_clk_src",
 		.ops = &clk_ops_pll_vote,
@@ -724,9 +724,9 @@
 	.en_mask = BIT(0),
 	.status_reg = (void __iomem *)LPAPLL_STATUS_REG,
 	.status_mask = BIT(17),
-	.parent = &cxo_clk_src.c,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.rate = 491520000,
 		.dbg_name = "lpapll0_clk_src",
 		.ops = &clk_ops_pll_vote,
@@ -739,9 +739,9 @@
 	.en_mask = BIT(0),
 	.status_reg = (void __iomem *)MMPLL0_STATUS_REG,
 	.status_mask = BIT(17),
-	.parent = &cxo_clk_src.c,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "mmpll0_clk_src",
 		.rate = 800000000,
 		.ops = &clk_ops_pll_vote,
@@ -754,9 +754,9 @@
 	.en_mask = BIT(1),
 	.status_reg = (void __iomem *)MMPLL1_STATUS_REG,
 	.status_mask = BIT(17),
-	.parent = &cxo_clk_src.c,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "mmpll1_clk_src",
 		.rate = 846000000,
 		.ops = &clk_ops_pll_vote,
@@ -767,9 +767,9 @@
 static struct pll_clk mmpll3_clk_src = {
 	.mode_reg = (void __iomem *)MMPLL3_MODE_REG,
 	.status_reg = (void __iomem *)MMPLL3_STATUS_REG,
-	.parent = &cxo_clk_src.c,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "mmpll3_clk_src",
 		.rate = 1000000000,
 		.ops = &clk_ops_local_pll,
@@ -1512,10 +1512,10 @@
 
 static struct branch_clk gcc_blsp1_qup1_i2c_apps_clk = {
 	.cbcr_reg = BLSP1_QUP1_I2C_APPS_CBCR,
-	.parent = &cxo_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup1_i2c_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup1_i2c_apps_clk.c),
@@ -1524,9 +1524,9 @@
 
 static struct branch_clk gcc_blsp1_qup1_spi_apps_clk = {
 	.cbcr_reg = BLSP1_QUP1_SPI_APPS_CBCR,
-	.parent = &blsp1_qup1_spi_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_qup1_spi_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup1_spi_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup1_spi_apps_clk.c),
@@ -1535,10 +1535,10 @@
 
 static struct branch_clk gcc_blsp1_qup2_i2c_apps_clk = {
 	.cbcr_reg = BLSP1_QUP2_I2C_APPS_CBCR,
-	.parent = &cxo_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup2_i2c_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup2_i2c_apps_clk.c),
@@ -1547,9 +1547,9 @@
 
 static struct branch_clk gcc_blsp1_qup2_spi_apps_clk = {
 	.cbcr_reg = BLSP1_QUP2_SPI_APPS_CBCR,
-	.parent = &blsp1_qup2_spi_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_qup2_spi_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup2_spi_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup2_spi_apps_clk.c),
@@ -1558,10 +1558,10 @@
 
 static struct branch_clk gcc_blsp1_qup3_i2c_apps_clk = {
 	.cbcr_reg = BLSP1_QUP3_I2C_APPS_CBCR,
-	.parent = &cxo_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup3_i2c_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup3_i2c_apps_clk.c),
@@ -1570,9 +1570,9 @@
 
 static struct branch_clk gcc_blsp1_qup3_spi_apps_clk = {
 	.cbcr_reg = BLSP1_QUP3_SPI_APPS_CBCR,
-	.parent = &blsp1_qup3_spi_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_qup3_spi_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup3_spi_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup3_spi_apps_clk.c),
@@ -1581,10 +1581,10 @@
 
 static struct branch_clk gcc_blsp1_qup4_i2c_apps_clk = {
 	.cbcr_reg = BLSP1_QUP4_I2C_APPS_CBCR,
-	.parent = &cxo_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup4_i2c_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup4_i2c_apps_clk.c),
@@ -1593,9 +1593,9 @@
 
 static struct branch_clk gcc_blsp1_qup4_spi_apps_clk = {
 	.cbcr_reg = BLSP1_QUP4_SPI_APPS_CBCR,
-	.parent = &blsp1_qup4_spi_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_qup4_spi_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup4_spi_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup4_spi_apps_clk.c),
@@ -1604,10 +1604,10 @@
 
 static struct branch_clk gcc_blsp1_qup5_i2c_apps_clk = {
 	.cbcr_reg = BLSP1_QUP5_I2C_APPS_CBCR,
-	.parent = &cxo_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup5_i2c_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup5_i2c_apps_clk.c),
@@ -1616,9 +1616,9 @@
 
 static struct branch_clk gcc_blsp1_qup5_spi_apps_clk = {
 	.cbcr_reg = BLSP1_QUP5_SPI_APPS_CBCR,
-	.parent = &blsp1_qup5_spi_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_qup5_spi_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup5_spi_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup5_spi_apps_clk.c),
@@ -1627,10 +1627,10 @@
 
 static struct branch_clk gcc_blsp1_qup6_i2c_apps_clk = {
 	.cbcr_reg = BLSP1_QUP6_I2C_APPS_CBCR,
-	.parent = &cxo_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup6_i2c_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup6_i2c_apps_clk.c),
@@ -1639,9 +1639,9 @@
 
 static struct branch_clk gcc_blsp1_qup6_spi_apps_clk = {
 	.cbcr_reg = BLSP1_QUP6_SPI_APPS_CBCR,
-	.parent = &blsp1_qup6_spi_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_qup6_spi_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup6_spi_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup6_spi_apps_clk.c),
@@ -1650,9 +1650,9 @@
 
 static struct branch_clk gcc_blsp1_uart1_apps_clk = {
 	.cbcr_reg = BLSP1_UART1_APPS_CBCR,
-	.parent = &blsp1_uart1_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_uart1_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_uart1_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_uart1_apps_clk.c),
@@ -1661,9 +1661,9 @@
 
 static struct branch_clk gcc_blsp1_uart2_apps_clk = {
 	.cbcr_reg = BLSP1_UART2_APPS_CBCR,
-	.parent = &blsp1_uart2_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_uart2_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_uart2_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_uart2_apps_clk.c),
@@ -1672,9 +1672,9 @@
 
 static struct branch_clk gcc_blsp1_uart3_apps_clk = {
 	.cbcr_reg = BLSP1_UART3_APPS_CBCR,
-	.parent = &blsp1_uart3_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_uart3_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_uart3_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_uart3_apps_clk.c),
@@ -1683,9 +1683,9 @@
 
 static struct branch_clk gcc_blsp1_uart4_apps_clk = {
 	.cbcr_reg = BLSP1_UART4_APPS_CBCR,
-	.parent = &blsp1_uart4_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_uart4_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_uart4_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_uart4_apps_clk.c),
@@ -1694,9 +1694,9 @@
 
 static struct branch_clk gcc_blsp1_uart5_apps_clk = {
 	.cbcr_reg = BLSP1_UART5_APPS_CBCR,
-	.parent = &blsp1_uart5_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_uart5_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_uart5_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_uart5_apps_clk.c),
@@ -1705,9 +1705,9 @@
 
 static struct branch_clk gcc_blsp1_uart6_apps_clk = {
 	.cbcr_reg = BLSP1_UART6_APPS_CBCR,
-	.parent = &blsp1_uart6_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_uart6_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_uart6_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_uart6_apps_clk.c),
@@ -1740,10 +1740,10 @@
 
 static struct branch_clk gcc_blsp2_qup1_i2c_apps_clk = {
 	.cbcr_reg = BLSP2_QUP1_I2C_APPS_CBCR,
-	.parent = &cxo_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "gcc_blsp2_qup1_i2c_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp2_qup1_i2c_apps_clk.c),
@@ -1752,9 +1752,9 @@
 
 static struct branch_clk gcc_blsp2_qup1_spi_apps_clk = {
 	.cbcr_reg = BLSP2_QUP1_SPI_APPS_CBCR,
-	.parent = &blsp2_qup1_spi_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp2_qup1_spi_apps_clk_src.c,
 		.dbg_name = "gcc_blsp2_qup1_spi_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp2_qup1_spi_apps_clk.c),
@@ -1763,10 +1763,10 @@
 
 static struct branch_clk gcc_blsp2_qup2_i2c_apps_clk = {
 	.cbcr_reg = BLSP2_QUP2_I2C_APPS_CBCR,
-	.parent = &cxo_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "gcc_blsp2_qup2_i2c_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp2_qup2_i2c_apps_clk.c),
@@ -1775,9 +1775,9 @@
 
 static struct branch_clk gcc_blsp2_qup2_spi_apps_clk = {
 	.cbcr_reg = BLSP2_QUP2_SPI_APPS_CBCR,
-	.parent = &blsp2_qup2_spi_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp2_qup2_spi_apps_clk_src.c,
 		.dbg_name = "gcc_blsp2_qup2_spi_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp2_qup2_spi_apps_clk.c),
@@ -1786,10 +1786,10 @@
 
 static struct branch_clk gcc_blsp2_qup3_i2c_apps_clk = {
 	.cbcr_reg = BLSP2_QUP3_I2C_APPS_CBCR,
-	.parent = &cxo_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "gcc_blsp2_qup3_i2c_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp2_qup3_i2c_apps_clk.c),
@@ -1798,9 +1798,9 @@
 
 static struct branch_clk gcc_blsp2_qup3_spi_apps_clk = {
 	.cbcr_reg = BLSP2_QUP3_SPI_APPS_CBCR,
-	.parent = &blsp2_qup3_spi_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp2_qup3_spi_apps_clk_src.c,
 		.dbg_name = "gcc_blsp2_qup3_spi_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp2_qup3_spi_apps_clk.c),
@@ -1809,10 +1809,10 @@
 
 static struct branch_clk gcc_blsp2_qup4_i2c_apps_clk = {
 	.cbcr_reg = BLSP2_QUP4_I2C_APPS_CBCR,
-	.parent = &cxo_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "gcc_blsp2_qup4_i2c_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp2_qup4_i2c_apps_clk.c),
@@ -1821,9 +1821,9 @@
 
 static struct branch_clk gcc_blsp2_qup4_spi_apps_clk = {
 	.cbcr_reg = BLSP2_QUP4_SPI_APPS_CBCR,
-	.parent = &blsp2_qup4_spi_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp2_qup4_spi_apps_clk_src.c,
 		.dbg_name = "gcc_blsp2_qup4_spi_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp2_qup4_spi_apps_clk.c),
@@ -1832,10 +1832,10 @@
 
 static struct branch_clk gcc_blsp2_qup5_i2c_apps_clk = {
 	.cbcr_reg = BLSP2_QUP5_I2C_APPS_CBCR,
-	.parent = &cxo_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "gcc_blsp2_qup5_i2c_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp2_qup5_i2c_apps_clk.c),
@@ -1844,9 +1844,9 @@
 
 static struct branch_clk gcc_blsp2_qup5_spi_apps_clk = {
 	.cbcr_reg = BLSP2_QUP5_SPI_APPS_CBCR,
-	.parent = &blsp2_qup5_spi_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp2_qup5_spi_apps_clk_src.c,
 		.dbg_name = "gcc_blsp2_qup5_spi_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp2_qup5_spi_apps_clk.c),
@@ -1855,10 +1855,10 @@
 
 static struct branch_clk gcc_blsp2_qup6_i2c_apps_clk = {
 	.cbcr_reg = BLSP2_QUP6_I2C_APPS_CBCR,
-	.parent = &cxo_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "gcc_blsp2_qup6_i2c_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp2_qup6_i2c_apps_clk.c),
@@ -1867,9 +1867,9 @@
 
 static struct branch_clk gcc_blsp2_qup6_spi_apps_clk = {
 	.cbcr_reg = BLSP2_QUP6_SPI_APPS_CBCR,
-	.parent = &blsp2_qup6_spi_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp2_qup6_spi_apps_clk_src.c,
 		.dbg_name = "gcc_blsp2_qup6_spi_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp2_qup6_spi_apps_clk.c),
@@ -1878,9 +1878,9 @@
 
 static struct branch_clk gcc_blsp2_uart1_apps_clk = {
 	.cbcr_reg = BLSP2_UART1_APPS_CBCR,
-	.parent = &blsp2_uart1_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp2_uart1_apps_clk_src.c,
 		.dbg_name = "gcc_blsp2_uart1_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp2_uart1_apps_clk.c),
@@ -1889,9 +1889,9 @@
 
 static struct branch_clk gcc_blsp2_uart2_apps_clk = {
 	.cbcr_reg = BLSP2_UART2_APPS_CBCR,
-	.parent = &blsp2_uart2_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp2_uart2_apps_clk_src.c,
 		.dbg_name = "gcc_blsp2_uart2_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp2_uart2_apps_clk.c),
@@ -1900,9 +1900,9 @@
 
 static struct branch_clk gcc_blsp2_uart3_apps_clk = {
 	.cbcr_reg = BLSP2_UART3_APPS_CBCR,
-	.parent = &blsp2_uart3_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp2_uart3_apps_clk_src.c,
 		.dbg_name = "gcc_blsp2_uart3_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp2_uart3_apps_clk.c),
@@ -1911,9 +1911,9 @@
 
 static struct branch_clk gcc_blsp2_uart4_apps_clk = {
 	.cbcr_reg = BLSP2_UART4_APPS_CBCR,
-	.parent = &blsp2_uart4_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp2_uart4_apps_clk_src.c,
 		.dbg_name = "gcc_blsp2_uart4_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp2_uart4_apps_clk.c),
@@ -1922,9 +1922,9 @@
 
 static struct branch_clk gcc_blsp2_uart5_apps_clk = {
 	.cbcr_reg = BLSP2_UART5_APPS_CBCR,
-	.parent = &blsp2_uart5_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp2_uart5_apps_clk_src.c,
 		.dbg_name = "gcc_blsp2_uart5_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp2_uart5_apps_clk.c),
@@ -1933,9 +1933,9 @@
 
 static struct branch_clk gcc_blsp2_uart6_apps_clk = {
 	.cbcr_reg = BLSP2_UART6_APPS_CBCR,
-	.parent = &blsp2_uart6_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp2_uart6_apps_clk_src.c,
 		.dbg_name = "gcc_blsp2_uart6_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp2_uart6_apps_clk.c),
@@ -2016,9 +2016,9 @@
 
 static struct branch_clk gcc_gp1_clk = {
 	.cbcr_reg = GP1_CBCR,
-	.parent = &gp1_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &gp1_clk_src.c,
 		.dbg_name = "gcc_gp1_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_gp1_clk.c),
@@ -2027,9 +2027,9 @@
 
 static struct branch_clk gcc_gp2_clk = {
 	.cbcr_reg = GP2_CBCR,
-	.parent = &gp2_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &gp2_clk_src.c,
 		.dbg_name = "gcc_gp2_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_gp2_clk.c),
@@ -2038,9 +2038,9 @@
 
 static struct branch_clk gcc_gp3_clk = {
 	.cbcr_reg = GP3_CBCR,
-	.parent = &gp3_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &gp3_clk_src.c,
 		.dbg_name = "gcc_gp3_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_gp3_clk.c),
@@ -2049,9 +2049,9 @@
 
 static struct branch_clk gcc_pdm2_clk = {
 	.cbcr_reg = PDM2_CBCR,
-	.parent = &pdm2_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &pdm2_clk_src.c,
 		.dbg_name = "gcc_pdm2_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_pdm2_clk.c),
@@ -2094,9 +2094,9 @@
 
 static struct branch_clk gcc_sdcc1_apps_clk = {
 	.cbcr_reg = SDCC1_APPS_CBCR,
-	.parent = &sdcc1_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &sdcc1_apps_clk_src.c,
 		.dbg_name = "gcc_sdcc1_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_sdcc1_apps_clk.c),
@@ -2116,9 +2116,9 @@
 
 static struct branch_clk gcc_sdcc2_apps_clk = {
 	.cbcr_reg = SDCC2_APPS_CBCR,
-	.parent = &sdcc2_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &sdcc2_apps_clk_src.c,
 		.dbg_name = "gcc_sdcc2_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_sdcc2_apps_clk.c),
@@ -2138,9 +2138,9 @@
 
 static struct branch_clk gcc_sdcc3_apps_clk = {
 	.cbcr_reg = SDCC3_APPS_CBCR,
-	.parent = &sdcc3_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &sdcc3_apps_clk_src.c,
 		.dbg_name = "gcc_sdcc3_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_sdcc3_apps_clk.c),
@@ -2160,9 +2160,9 @@
 
 static struct branch_clk gcc_sdcc4_apps_clk = {
 	.cbcr_reg = SDCC4_APPS_CBCR,
-	.parent = &sdcc4_apps_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &sdcc4_apps_clk_src.c,
 		.dbg_name = "gcc_sdcc4_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_sdcc4_apps_clk.c),
@@ -2182,9 +2182,9 @@
 
 static struct branch_clk gcc_tsif_ref_clk = {
 	.cbcr_reg = TSIF_REF_CBCR,
-	.parent = &tsif_ref_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &tsif_ref_clk_src.c,
 		.dbg_name = "gcc_tsif_ref_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_tsif_ref_clk.c),
@@ -2193,10 +2193,10 @@
 
 struct branch_clk gcc_sys_noc_usb3_axi_clk = {
 	.cbcr_reg = SYS_NOC_USB3_AXI_CBCR,
-	.parent = &usb30_master_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &usb30_master_clk_src.c,
 		.dbg_name = "gcc_sys_noc_usb3_axi_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_sys_noc_usb3_axi_clk.c),
@@ -2206,10 +2206,10 @@
 static struct branch_clk gcc_usb30_master_clk = {
 	.cbcr_reg = USB30_MASTER_CBCR,
 	.bcr_reg = USB_30_BCR,
-	.parent = &usb30_master_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &usb30_master_clk_src.c,
 		.dbg_name = "gcc_usb30_master_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_usb30_master_clk.c),
@@ -2219,9 +2219,9 @@
 
 static struct branch_clk gcc_usb30_mock_utmi_clk = {
 	.cbcr_reg = USB30_MOCK_UTMI_CBCR,
-	.parent = &usb30_mock_utmi_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &usb30_mock_utmi_clk_src.c,
 		.dbg_name = "gcc_usb30_mock_utmi_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_usb30_mock_utmi_clk.c),
@@ -2275,9 +2275,9 @@
 static struct branch_clk gcc_usb_hs_system_clk = {
 	.cbcr_reg = USB_HS_SYSTEM_CBCR,
 	.bcr_reg = USB_HS_BCR,
-	.parent = &usb_hs_system_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &usb_hs_system_clk_src.c,
 		.dbg_name = "gcc_usb_hs_system_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_usb_hs_system_clk.c),
@@ -2298,9 +2298,9 @@
 static struct branch_clk gcc_usb_hsic_clk = {
 	.cbcr_reg = USB_HSIC_CBCR,
 	.bcr_reg = USB_HS_HSIC_BCR,
-	.parent = &usb_hsic_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &usb_hsic_clk_src.c,
 		.dbg_name = "gcc_usb_hsic_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_usb_hsic_clk.c),
@@ -2309,9 +2309,9 @@
 
 static struct branch_clk gcc_usb_hsic_io_cal_clk = {
 	.cbcr_reg = USB_HSIC_IO_CAL_CBCR,
-	.parent = &usb_hsic_io_cal_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &usb_hsic_io_cal_clk_src.c,
 		.dbg_name = "gcc_usb_hsic_io_cal_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_usb_hsic_io_cal_clk.c),
@@ -2320,9 +2320,9 @@
 
 static struct branch_clk gcc_usb_hsic_system_clk = {
 	.cbcr_reg = USB_HSIC_SYSTEM_CBCR,
-	.parent = &usb_hsic_system_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &usb_hsic_system_clk_src.c,
 		.dbg_name = "gcc_usb_hsic_system_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_usb_hsic_system_clk.c),
@@ -2807,18 +2807,15 @@
 	},
 };
 
-static struct clk *dsi_pll_clk_get_parent(struct clk *c)
-{
-	return &cxo_clk_src.c;
-}
-
 static struct clk dsipll0_byte_clk_src = {
+	.parent = &cxo_clk_src.c,
 	.dbg_name = "dsipll0_byte_clk_src",
 	.ops = &clk_ops_dsi_byte_pll,
 	CLK_INIT(dsipll0_byte_clk_src),
 };
 
 static struct clk dsipll0_pixel_clk_src = {
+	.parent = &cxo_clk_src.c,
 	.dbg_name = "dsipll0_pixel_clk_src",
 	.ops = &clk_ops_dsi_pixel_pll,
 	CLK_INIT(dsipll0_pixel_clk_src),
@@ -2900,6 +2897,7 @@
 	.current_freq = &byte_freq,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &dsipll0_byte_clk_src,
 		.dbg_name = "byte0_clk_src",
 		.ops = &clk_ops_byte,
 		VDD_DIG_FMAX_MAP3(LOW, 93800000, NOMINAL, 187500000,
@@ -2913,6 +2911,7 @@
 	.current_freq = &byte_freq,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &dsipll0_byte_clk_src,
 		.dbg_name = "byte1_clk_src",
 		.ops = &clk_ops_byte,
 		VDD_DIG_FMAX_MAP3(LOW, 93800000, NOMINAL, 187500000,
@@ -3045,19 +3044,14 @@
 	return rc;
 }
 
-static struct clk *hdmi_pll_clk_get_parent(struct clk *c)
-{
-	return &cxo_clk_src.c;
-}
-
 static struct clk_ops clk_ops_hdmi_pll = {
 	.enable = hdmi_pll_clk_enable,
 	.disable = hdmi_pll_clk_disable,
 	.set_rate = hdmi_pll_clk_set_rate,
-	.get_parent = hdmi_pll_clk_get_parent,
 };
 
 static struct clk hdmipll_clk_src = {
+	.parent = &cxo_clk_src.c,
 	.dbg_name = "hdmipll_clk_src",
 	.ops = &clk_ops_hdmi_pll,
 	CLK_INIT(hdmipll_clk_src),
@@ -3127,6 +3121,7 @@
 	.current_freq = &pixel_freq,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &dsipll0_pixel_clk_src,
 		.dbg_name = "pclk0_clk_src",
 		.ops = &clk_ops_pixel,
 		VDD_DIG_FMAX_MAP2(LOW, 125000000, NOMINAL, 250000000),
@@ -3139,6 +3134,7 @@
 	.current_freq = &pixel_freq,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &dsipll0_pixel_clk_src,
 		.dbg_name = "pclk1_clk_src",
 		.ops = &clk_ops_pixel,
 		VDD_DIG_FMAX_MAP2(LOW, 125000000, NOMINAL, 250000000),
@@ -3203,10 +3199,10 @@
 
 static struct branch_clk camss_cci_cci_clk = {
 	.cbcr_reg = CAMSS_CCI_CCI_CBCR,
-	.parent = &cci_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &cci_clk_src.c,
 		.dbg_name = "camss_cci_cci_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_cci_cci_clk.c),
@@ -3226,10 +3222,10 @@
 
 static struct branch_clk camss_csi0_clk = {
 	.cbcr_reg = CAMSS_CSI0_CBCR,
-	.parent = &csi0_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi0_clk_src.c,
 		.dbg_name = "camss_csi0_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_csi0_clk.c),
@@ -3238,10 +3234,10 @@
 
 static struct branch_clk camss_csi0phy_clk = {
 	.cbcr_reg = CAMSS_CSI0PHY_CBCR,
-	.parent = &csi0_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi0_clk_src.c,
 		.dbg_name = "camss_csi0phy_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_csi0phy_clk.c),
@@ -3250,10 +3246,10 @@
 
 static struct branch_clk camss_csi0pix_clk = {
 	.cbcr_reg = CAMSS_CSI0PIX_CBCR,
-	.parent = &csi0_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi0_clk_src.c,
 		.dbg_name = "camss_csi0pix_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_csi0pix_clk.c),
@@ -3262,10 +3258,10 @@
 
 static struct branch_clk camss_csi0rdi_clk = {
 	.cbcr_reg = CAMSS_CSI0RDI_CBCR,
-	.parent = &csi0_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi0_clk_src.c,
 		.dbg_name = "camss_csi0rdi_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_csi0rdi_clk.c),
@@ -3285,10 +3281,10 @@
 
 static struct branch_clk camss_csi1_clk = {
 	.cbcr_reg = CAMSS_CSI1_CBCR,
-	.parent = &csi1_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi1_clk_src.c,
 		.dbg_name = "camss_csi1_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_csi1_clk.c),
@@ -3297,10 +3293,10 @@
 
 static struct branch_clk camss_csi1phy_clk = {
 	.cbcr_reg = CAMSS_CSI1PHY_CBCR,
-	.parent = &csi1_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi1_clk_src.c,
 		.dbg_name = "camss_csi1phy_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_csi1phy_clk.c),
@@ -3309,10 +3305,10 @@
 
 static struct branch_clk camss_csi1pix_clk = {
 	.cbcr_reg = CAMSS_CSI1PIX_CBCR,
-	.parent = &csi1_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi1_clk_src.c,
 		.dbg_name = "camss_csi1pix_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_csi1pix_clk.c),
@@ -3321,10 +3317,10 @@
 
 static struct branch_clk camss_csi1rdi_clk = {
 	.cbcr_reg = CAMSS_CSI1RDI_CBCR,
-	.parent = &csi1_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi1_clk_src.c,
 		.dbg_name = "camss_csi1rdi_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_csi1rdi_clk.c),
@@ -3344,10 +3340,10 @@
 
 static struct branch_clk camss_csi2_clk = {
 	.cbcr_reg = CAMSS_CSI2_CBCR,
-	.parent = &csi2_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi2_clk_src.c,
 		.dbg_name = "camss_csi2_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_csi2_clk.c),
@@ -3356,10 +3352,10 @@
 
 static struct branch_clk camss_csi2phy_clk = {
 	.cbcr_reg = CAMSS_CSI2PHY_CBCR,
-	.parent = &csi2_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi2_clk_src.c,
 		.dbg_name = "camss_csi2phy_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_csi2phy_clk.c),
@@ -3368,10 +3364,10 @@
 
 static struct branch_clk camss_csi2pix_clk = {
 	.cbcr_reg = CAMSS_CSI2PIX_CBCR,
-	.parent = &csi2_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi2_clk_src.c,
 		.dbg_name = "camss_csi2pix_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_csi2pix_clk.c),
@@ -3380,10 +3376,10 @@
 
 static struct branch_clk camss_csi2rdi_clk = {
 	.cbcr_reg = CAMSS_CSI2RDI_CBCR,
-	.parent = &csi2_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi2_clk_src.c,
 		.dbg_name = "camss_csi2rdi_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_csi2rdi_clk.c),
@@ -3403,10 +3399,10 @@
 
 static struct branch_clk camss_csi3_clk = {
 	.cbcr_reg = CAMSS_CSI3_CBCR,
-	.parent = &csi3_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi3_clk_src.c,
 		.dbg_name = "camss_csi3_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_csi3_clk.c),
@@ -3415,10 +3411,10 @@
 
 static struct branch_clk camss_csi3phy_clk = {
 	.cbcr_reg = CAMSS_CSI3PHY_CBCR,
-	.parent = &csi3_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi3_clk_src.c,
 		.dbg_name = "camss_csi3phy_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_csi3phy_clk.c),
@@ -3427,10 +3423,10 @@
 
 static struct branch_clk camss_csi3pix_clk = {
 	.cbcr_reg = CAMSS_CSI3PIX_CBCR,
-	.parent = &csi3_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi3_clk_src.c,
 		.dbg_name = "camss_csi3pix_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_csi3pix_clk.c),
@@ -3439,10 +3435,10 @@
 
 static struct branch_clk camss_csi3rdi_clk = {
 	.cbcr_reg = CAMSS_CSI3RDI_CBCR,
-	.parent = &csi3_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi3_clk_src.c,
 		.dbg_name = "camss_csi3rdi_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_csi3rdi_clk.c),
@@ -3451,10 +3447,10 @@
 
 static struct branch_clk camss_csi_vfe0_clk = {
 	.cbcr_reg = CAMSS_CSI_VFE0_CBCR,
-	.parent = &vfe0_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &vfe0_clk_src.c,
 		.dbg_name = "camss_csi_vfe0_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_csi_vfe0_clk.c),
@@ -3463,10 +3459,10 @@
 
 static struct branch_clk camss_csi_vfe1_clk = {
 	.cbcr_reg = CAMSS_CSI_VFE1_CBCR,
-	.parent = &vfe1_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &vfe1_clk_src.c,
 		.dbg_name = "camss_csi_vfe1_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_csi_vfe1_clk.c),
@@ -3475,10 +3471,10 @@
 
 static struct branch_clk camss_gp0_clk = {
 	.cbcr_reg = CAMSS_GP0_CBCR,
-	.parent = &mmss_gp0_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &mmss_gp0_clk_src.c,
 		.dbg_name = "camss_gp0_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_gp0_clk.c),
@@ -3487,10 +3483,10 @@
 
 static struct branch_clk camss_gp1_clk = {
 	.cbcr_reg = CAMSS_GP1_CBCR,
-	.parent = &mmss_gp1_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &mmss_gp1_clk_src.c,
 		.dbg_name = "camss_gp1_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_gp1_clk.c),
@@ -3510,10 +3506,10 @@
 
 static struct branch_clk camss_jpeg_jpeg0_clk = {
 	.cbcr_reg = CAMSS_JPEG_JPEG0_CBCR,
-	.parent = &jpeg0_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &jpeg0_clk_src.c,
 		.dbg_name = "camss_jpeg_jpeg0_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_jpeg_jpeg0_clk.c),
@@ -3522,10 +3518,10 @@
 
 static struct branch_clk camss_jpeg_jpeg1_clk = {
 	.cbcr_reg = CAMSS_JPEG_JPEG1_CBCR,
-	.parent = &jpeg1_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &jpeg1_clk_src.c,
 		.dbg_name = "camss_jpeg_jpeg1_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_jpeg_jpeg1_clk.c),
@@ -3534,10 +3530,10 @@
 
 static struct branch_clk camss_jpeg_jpeg2_clk = {
 	.cbcr_reg = CAMSS_JPEG_JPEG2_CBCR,
-	.parent = &jpeg2_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &jpeg2_clk_src.c,
 		.dbg_name = "camss_jpeg_jpeg2_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_jpeg_jpeg2_clk.c),
@@ -3557,10 +3553,10 @@
 
 static struct branch_clk camss_jpeg_jpeg_axi_clk = {
 	.cbcr_reg = CAMSS_JPEG_JPEG_AXI_CBCR,
-	.parent = &axi_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &axi_clk_src.c,
 		.dbg_name = "camss_jpeg_jpeg_axi_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_jpeg_jpeg_axi_clk.c),
@@ -3569,10 +3565,10 @@
 
 static struct branch_clk camss_jpeg_jpeg_ocmemnoc_clk = {
 	.cbcr_reg = CAMSS_JPEG_JPEG_OCMEMNOC_CBCR,
-	.parent = &ocmemnoc_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &ocmemnoc_clk_src.c,
 		.dbg_name = "camss_jpeg_jpeg_ocmemnoc_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_jpeg_jpeg_ocmemnoc_clk.c),
@@ -3581,10 +3577,10 @@
 
 static struct branch_clk camss_mclk0_clk = {
 	.cbcr_reg = CAMSS_MCLK0_CBCR,
-	.parent = &mclk0_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &mclk0_clk_src.c,
 		.dbg_name = "camss_mclk0_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_mclk0_clk.c),
@@ -3593,10 +3589,10 @@
 
 static struct branch_clk camss_mclk1_clk = {
 	.cbcr_reg = CAMSS_MCLK1_CBCR,
-	.parent = &mclk1_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &mclk1_clk_src.c,
 		.dbg_name = "camss_mclk1_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_mclk1_clk.c),
@@ -3605,10 +3601,10 @@
 
 static struct branch_clk camss_mclk2_clk = {
 	.cbcr_reg = CAMSS_MCLK2_CBCR,
-	.parent = &mclk2_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &mclk2_clk_src.c,
 		.dbg_name = "camss_mclk2_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_mclk2_clk.c),
@@ -3617,10 +3613,10 @@
 
 static struct branch_clk camss_mclk3_clk = {
 	.cbcr_reg = CAMSS_MCLK3_CBCR,
-	.parent = &mclk3_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &mclk3_clk_src.c,
 		.dbg_name = "camss_mclk3_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_mclk3_clk.c),
@@ -3640,10 +3636,10 @@
 
 static struct branch_clk camss_phy0_csi0phytimer_clk = {
 	.cbcr_reg = CAMSS_PHY0_CSI0PHYTIMER_CBCR,
-	.parent = &csi0phytimer_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi0phytimer_clk_src.c,
 		.dbg_name = "camss_phy0_csi0phytimer_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_phy0_csi0phytimer_clk.c),
@@ -3652,10 +3648,10 @@
 
 static struct branch_clk camss_phy1_csi1phytimer_clk = {
 	.cbcr_reg = CAMSS_PHY1_CSI1PHYTIMER_CBCR,
-	.parent = &csi1phytimer_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi1phytimer_clk_src.c,
 		.dbg_name = "camss_phy1_csi1phytimer_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_phy1_csi1phytimer_clk.c),
@@ -3664,10 +3660,10 @@
 
 static struct branch_clk camss_phy2_csi2phytimer_clk = {
 	.cbcr_reg = CAMSS_PHY2_CSI2PHYTIMER_CBCR,
-	.parent = &csi2phytimer_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &csi2phytimer_clk_src.c,
 		.dbg_name = "camss_phy2_csi2phytimer_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_phy2_csi2phytimer_clk.c),
@@ -3698,10 +3694,10 @@
 
 static struct branch_clk camss_vfe_cpp_clk = {
 	.cbcr_reg = CAMSS_VFE_CPP_CBCR,
-	.parent = &cpp_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &cpp_clk_src.c,
 		.dbg_name = "camss_vfe_cpp_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_vfe_cpp_clk.c),
@@ -3710,10 +3706,10 @@
 
 static struct branch_clk camss_vfe_vfe0_clk = {
 	.cbcr_reg = CAMSS_VFE_VFE0_CBCR,
-	.parent = &vfe0_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &vfe0_clk_src.c,
 		.dbg_name = "camss_vfe_vfe0_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_vfe_vfe0_clk.c),
@@ -3722,10 +3718,10 @@
 
 static struct branch_clk camss_vfe_vfe1_clk = {
 	.cbcr_reg = CAMSS_VFE_VFE1_CBCR,
-	.parent = &vfe1_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &vfe1_clk_src.c,
 		.dbg_name = "camss_vfe_vfe1_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_vfe_vfe1_clk.c),
@@ -3745,10 +3741,10 @@
 
 static struct branch_clk camss_vfe_vfe_axi_clk = {
 	.cbcr_reg = CAMSS_VFE_VFE_AXI_CBCR,
-	.parent = &axi_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &axi_clk_src.c,
 		.dbg_name = "camss_vfe_vfe_axi_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_vfe_vfe_axi_clk.c),
@@ -3757,10 +3753,10 @@
 
 static struct branch_clk camss_vfe_vfe_ocmemnoc_clk = {
 	.cbcr_reg = CAMSS_VFE_VFE_OCMEMNOC_CBCR,
-	.parent = &ocmemnoc_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &ocmemnoc_clk_src.c,
 		.dbg_name = "camss_vfe_vfe_ocmemnoc_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(camss_vfe_vfe_ocmemnoc_clk.c),
@@ -3780,10 +3776,10 @@
 
 static struct branch_clk mdss_axi_clk = {
 	.cbcr_reg = MDSS_AXI_CBCR,
-	.parent = &axi_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &axi_clk_src.c,
 		.dbg_name = "mdss_axi_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdss_axi_clk.c),
@@ -3792,10 +3788,10 @@
 
 static struct branch_clk mdss_byte0_clk = {
 	.cbcr_reg = MDSS_BYTE0_CBCR,
-	.parent = &byte0_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &byte0_clk_src.c,
 		.dbg_name = "mdss_byte0_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdss_byte0_clk.c),
@@ -3804,10 +3800,10 @@
 
 static struct branch_clk mdss_byte1_clk = {
 	.cbcr_reg = MDSS_BYTE1_CBCR,
-	.parent = &byte1_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &byte1_clk_src.c,
 		.dbg_name = "mdss_byte1_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdss_byte1_clk.c),
@@ -3816,10 +3812,10 @@
 
 static struct branch_clk mdss_edpaux_clk = {
 	.cbcr_reg = MDSS_EDPAUX_CBCR,
-	.parent = &edpaux_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &edpaux_clk_src.c,
 		.dbg_name = "mdss_edpaux_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdss_edpaux_clk.c),
@@ -3828,10 +3824,10 @@
 
 static struct branch_clk mdss_edplink_clk = {
 	.cbcr_reg = MDSS_EDPLINK_CBCR,
-	.parent = &edplink_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &edplink_clk_src.c,
 		.dbg_name = "mdss_edplink_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdss_edplink_clk.c),
@@ -3840,10 +3836,10 @@
 
 static struct branch_clk mdss_edppixel_clk = {
 	.cbcr_reg = MDSS_EDPPIXEL_CBCR,
-	.parent = &edppixel_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &edppixel_clk_src.c,
 		.dbg_name = "mdss_edppixel_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdss_edppixel_clk.c),
@@ -3852,10 +3848,10 @@
 
 static struct branch_clk mdss_esc0_clk = {
 	.cbcr_reg = MDSS_ESC0_CBCR,
-	.parent = &esc0_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &esc0_clk_src.c,
 		.dbg_name = "mdss_esc0_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdss_esc0_clk.c),
@@ -3864,10 +3860,10 @@
 
 static struct branch_clk mdss_esc1_clk = {
 	.cbcr_reg = MDSS_ESC1_CBCR,
-	.parent = &esc1_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &esc1_clk_src.c,
 		.dbg_name = "mdss_esc1_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdss_esc1_clk.c),
@@ -3876,10 +3872,10 @@
 
 static struct branch_clk mdss_extpclk_clk = {
 	.cbcr_reg = MDSS_EXTPCLK_CBCR,
-	.parent = &extpclk_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &extpclk_clk_src.c,
 		.dbg_name = "mdss_extpclk_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdss_extpclk_clk.c),
@@ -3899,10 +3895,10 @@
 
 static struct branch_clk mdss_hdmi_clk = {
 	.cbcr_reg = MDSS_HDMI_CBCR,
-	.parent = &hdmi_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &hdmi_clk_src.c,
 		.dbg_name = "mdss_hdmi_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdss_hdmi_clk.c),
@@ -3911,10 +3907,10 @@
 
 static struct branch_clk mdss_mdp_clk = {
 	.cbcr_reg = MDSS_MDP_CBCR,
-	.parent = &mdp_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &mdp_clk_src.c,
 		.dbg_name = "mdss_mdp_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdss_mdp_clk.c),
@@ -3923,10 +3919,10 @@
 
 static struct branch_clk mdss_mdp_lut_clk = {
 	.cbcr_reg = MDSS_MDP_LUT_CBCR,
-	.parent = &mdp_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &mdp_clk_src.c,
 		.dbg_name = "mdss_mdp_lut_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdss_mdp_lut_clk.c),
@@ -3935,10 +3931,10 @@
 
 static struct branch_clk mdss_pclk0_clk = {
 	.cbcr_reg = MDSS_PCLK0_CBCR,
-	.parent = &pclk0_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &pclk0_clk_src.c,
 		.dbg_name = "mdss_pclk0_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdss_pclk0_clk.c),
@@ -3947,10 +3943,10 @@
 
 static struct branch_clk mdss_pclk1_clk = {
 	.cbcr_reg = MDSS_PCLK1_CBCR,
-	.parent = &pclk1_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &pclk1_clk_src.c,
 		.dbg_name = "mdss_pclk1_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdss_pclk1_clk.c),
@@ -3959,10 +3955,10 @@
 
 static struct branch_clk mdss_vsync_clk = {
 	.cbcr_reg = MDSS_VSYNC_CBCR,
-	.parent = &vsync_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &vsync_clk_src.c,
 		.dbg_name = "mdss_vsync_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdss_vsync_clk.c),
@@ -3993,10 +3989,10 @@
 
 static struct branch_clk mmss_mmssnoc_axi_clk = {
 	.cbcr_reg = MMSS_MMSSNOC_AXI_CBCR,
-	.parent = &axi_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &axi_clk_src.c,
 		.dbg_name = "mmss_mmssnoc_axi_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mmss_mmssnoc_axi_clk.c),
@@ -4005,11 +4001,11 @@
 
 static struct branch_clk mmss_s0_axi_clk = {
 	.cbcr_reg = MMSS_S0_AXI_CBCR,
-	.parent = &axi_clk_src.c,
 	/* The bus driver needs set_rate to go through to the parent */
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &axi_clk_src.c,
 		.dbg_name = "mmss_s0_axi_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mmss_s0_axi_clk.c),
@@ -4019,11 +4015,11 @@
 
 struct branch_clk ocmemnoc_clk = {
 	.cbcr_reg = OCMEMNOC_CBCR,
-	.parent = &ocmemnoc_clk_src.c,
 	.has_sibling = 0,
 	.bcr_reg = 0x50b0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &ocmemnoc_clk_src.c,
 		.dbg_name = "ocmemnoc_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(ocmemnoc_clk.c),
@@ -4032,10 +4028,10 @@
 
 struct branch_clk ocmemcx_ocmemnoc_clk = {
 	.cbcr_reg = OCMEMCX_OCMEMNOC_CBCR,
-	.parent = &ocmemnoc_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &ocmemnoc_clk_src.c,
 		.dbg_name = "ocmemcx_ocmemnoc_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(ocmemcx_ocmemnoc_clk.c),
@@ -4055,10 +4051,10 @@
 
 static struct branch_clk venus0_axi_clk = {
 	.cbcr_reg = VENUS0_AXI_CBCR,
-	.parent = &axi_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &axi_clk_src.c,
 		.dbg_name = "venus0_axi_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(venus0_axi_clk.c),
@@ -4067,10 +4063,10 @@
 
 static struct branch_clk venus0_ocmemnoc_clk = {
 	.cbcr_reg = VENUS0_OCMEMNOC_CBCR,
-	.parent = &ocmemnoc_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &ocmemnoc_clk_src.c,
 		.dbg_name = "venus0_ocmemnoc_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(venus0_ocmemnoc_clk.c),
@@ -4079,10 +4075,10 @@
 
 static struct branch_clk venus0_vcodec0_clk = {
 	.cbcr_reg = VENUS0_VCODEC0_CBCR,
-	.parent = &vcodec0_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &vcodec0_clk_src.c,
 		.dbg_name = "venus0_vcodec0_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(venus0_vcodec0_clk.c),
@@ -4091,10 +4087,10 @@
 
 static struct branch_clk oxilicx_axi_clk = {
 	.cbcr_reg = OXILICX_AXI_CBCR,
-	.parent = &axi_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &axi_clk_src.c,
 		.dbg_name = "oxilicx_axi_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(oxilicx_axi_clk.c),
@@ -4103,9 +4099,9 @@
 
 static struct branch_clk oxili_gfx3d_clk = {
 	.cbcr_reg = OXILI_GFX3D_CBCR,
-	.parent = &oxili_gfx3d_clk_src.c,
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
+		.parent = &oxili_gfx3d_clk_src.c,
 		.dbg_name = "oxili_gfx3d_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(oxili_gfx3d_clk.c),
@@ -4145,9 +4141,9 @@
 
 static struct branch_clk audio_core_slimbus_core_clk = {
 	.cbcr_reg = AUDIO_CORE_SLIMBUS_CORE_CBCR,
-	.parent = &audio_core_slimbus_core_clk_src.c,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_slimbus_core_clk_src.c,
 		.dbg_name = "audio_core_slimbus_core_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_slimbus_core_clk.c),
@@ -4293,10 +4289,10 @@
 
 static struct branch_clk audio_core_lpaif_codec_spkr_osr_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_CODEC_SPKR_OSR_CBCR,
-	.parent = &audio_core_lpaif_codec_spkr_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_codec_spkr_clk_src.c,
 		.dbg_name = "audio_core_lpaif_codec_spkr_osr_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_codec_spkr_osr_clk.c),
@@ -4316,11 +4312,11 @@
 
 static struct branch_clk audio_core_lpaif_codec_spkr_ibit_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_CODEC_SPKR_IBIT_CBCR,
-	.parent = &audio_core_lpaif_codec_spkr_clk_src.c,
 	.has_sibling = 1,
 	.max_div = 15,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_codec_spkr_clk_src.c,
 		.dbg_name = "audio_core_lpaif_codec_spkr_ibit_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_codec_spkr_ibit_clk.c),
@@ -4329,10 +4325,10 @@
 
 static struct branch_clk audio_core_lpaif_pri_osr_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_PRI_OSR_CBCR,
-	.parent = &audio_core_lpaif_pri_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_pri_clk_src.c,
 		.dbg_name = "audio_core_lpaif_pri_osr_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_pri_osr_clk.c),
@@ -4352,11 +4348,11 @@
 
 static struct branch_clk audio_core_lpaif_pri_ibit_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_PRI_IBIT_CBCR,
-	.parent = &audio_core_lpaif_pri_clk_src.c,
 	.has_sibling = 1,
 	.max_div = 15,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_pri_clk_src.c,
 		.dbg_name = "audio_core_lpaif_pri_ibit_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_pri_ibit_clk.c),
@@ -4365,10 +4361,10 @@
 
 static struct branch_clk audio_core_lpaif_sec_osr_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_SEC_OSR_CBCR,
-	.parent = &audio_core_lpaif_sec_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_sec_clk_src.c,
 		.dbg_name = "audio_core_lpaif_sec_osr_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_sec_osr_clk.c),
@@ -4388,11 +4384,11 @@
 
 static struct branch_clk audio_core_lpaif_sec_ibit_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_SEC_IBIT_CBCR,
-	.parent = &audio_core_lpaif_sec_clk_src.c,
 	.has_sibling = 1,
 	.max_div = 15,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_sec_clk_src.c,
 		.dbg_name = "audio_core_lpaif_sec_ibit_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_sec_ibit_clk.c),
@@ -4401,10 +4397,10 @@
 
 static struct branch_clk audio_core_lpaif_ter_osr_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_TER_OSR_CBCR,
-	.parent = &audio_core_lpaif_ter_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_ter_clk_src.c,
 		.dbg_name = "audio_core_lpaif_ter_osr_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_ter_osr_clk.c),
@@ -4424,11 +4420,11 @@
 
 static struct branch_clk audio_core_lpaif_ter_ibit_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_TER_IBIT_CBCR,
-	.parent = &audio_core_lpaif_ter_clk_src.c,
 	.has_sibling = 1,
 	.max_div = 15,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_ter_clk_src.c,
 		.dbg_name = "audio_core_lpaif_ter_ibit_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_ter_ibit_clk.c),
@@ -4437,10 +4433,10 @@
 
 static struct branch_clk audio_core_lpaif_quad_osr_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_QUAD_OSR_CBCR,
-	.parent = &audio_core_lpaif_quad_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_quad_clk_src.c,
 		.dbg_name = "audio_core_lpaif_quad_osr_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_quad_osr_clk.c),
@@ -4460,11 +4456,11 @@
 
 static struct branch_clk audio_core_lpaif_quad_ibit_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_QUAD_IBIT_CBCR,
-	.parent = &audio_core_lpaif_quad_clk_src.c,
 	.has_sibling = 1,
 	.max_div = 15,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_quad_clk_src.c,
 		.dbg_name = "audio_core_lpaif_quad_ibit_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_quad_ibit_clk.c),
@@ -4484,10 +4480,10 @@
 
 static struct branch_clk audio_core_lpaif_pcm0_ibit_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_PCM0_IBIT_CBCR,
-	.parent = &audio_core_lpaif_pcm0_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_pcm0_clk_src.c,
 		.dbg_name = "audio_core_lpaif_pcm0_ibit_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_pcm0_ibit_clk.c),
@@ -4496,10 +4492,10 @@
 
 static struct branch_clk audio_core_lpaif_pcm1_ebit_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_PCM1_EBIT_CBCR,
-	.parent = &audio_core_lpaif_pcm1_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_pcm1_clk_src.c,
 		.dbg_name = "audio_core_lpaif_pcm1_ebit_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_pcm1_ebit_clk.c),
@@ -4508,10 +4504,10 @@
 
 static struct branch_clk audio_core_lpaif_pcm1_ibit_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_PCM1_IBIT_CBCR,
-	.parent = &audio_core_lpaif_pcm1_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_pcm1_clk_src.c,
 		.dbg_name = "audio_core_lpaif_pcm1_ibit_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_pcm1_ibit_clk.c),
@@ -4520,9 +4516,9 @@
 
 struct branch_clk audio_core_lpaif_pcmoe_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_PCM_DATA_OE_CBCR,
-	.parent = &audio_core_lpaif_pcmoe_clk_src.c,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_pcmoe_clk_src.c,
 		.dbg_name = "audio_core_lpaif_pcmoe_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_pcmoe_clk.c),
@@ -5159,10 +5155,10 @@
 
 	/* MM sensor clocks */
 	CLK_LOOKUP("cam_src_clk", mclk0_clk_src.c, "6e.qcom,camera"),
-	CLK_LOOKUP("cam_src_clk", mclk1_clk_src.c, "6c.qcom,camera"),
+	CLK_LOOKUP("cam_src_clk", mclk2_clk_src.c, "6c.qcom,camera"),
 	CLK_LOOKUP("cam_src_clk", mclk1_clk_src.c, "90.qcom,camera"),
 	CLK_LOOKUP("cam_clk", camss_mclk0_clk.c, "6e.qcom,camera"),
-	CLK_LOOKUP("cam_clk", camss_mclk1_clk.c, "6c.qcom,camera"),
+	CLK_LOOKUP("cam_clk", camss_mclk2_clk.c, "6c.qcom,camera"),
 	CLK_LOOKUP("cam_clk", camss_mclk1_clk.c, "90.qcom,camera"),
 	CLK_LOOKUP("cam_clk", camss_mclk1_clk.c, ""),
 	CLK_LOOKUP("cam_clk", camss_mclk2_clk.c, ""),
@@ -5681,11 +5677,9 @@
 {
 	clk_ops_byte = clk_ops_rcg_mnd;
 	clk_ops_byte.set_rate = set_rate_byte;
-	clk_ops_dsi_byte_pll.get_parent = dsi_pll_clk_get_parent;
 
 	clk_ops_pixel = clk_ops_rcg;
 	clk_ops_pixel.set_rate = set_rate_pixel;
-	clk_ops_dsi_pixel_pll.get_parent = dsi_pll_clk_get_parent;
 
 	mdss_clk_ctrl_init();
 }
diff --git a/arch/arm/mach-msm/clock-8x60.c b/arch/arm/mach-msm/clock-8x60.c
index 5a9799a..2ad425d 100644
--- a/arch/arm/mach-msm/clock-8x60.c
+++ b/arch/arm/mach-msm/clock-8x60.c
@@ -317,8 +317,8 @@
 	.en_mask = BIT(8),
 	.status_reg = BB_PLL8_STATUS_REG,
 	.status_mask = BIT(16),
-	.parent = &pxo_clk.c,
 	.c = {
+		.parent = &pxo_clk.c,
 		.dbg_name = "pll8_clk",
 		.rate = 384000000,
 		.ops = &clk_ops_pll_vote,
@@ -328,8 +328,8 @@
 
 static struct pll_clk pll2_clk = {
 	.mode_reg = MM_PLL1_MODE_REG,
-	.parent = &pxo_clk.c,
 	.c = {
+		.parent = &pxo_clk.c,
 		.dbg_name = "pll2_clk",
 		.rate = 800000000,
 		.ops = &clk_ops_local_pll,
@@ -339,8 +339,8 @@
 
 static struct pll_clk pll3_clk = {
 	.mode_reg = MM_PLL2_MODE_REG,
-	.parent = &pxo_clk.c,
 	.c = {
+		.parent = &pxo_clk.c,
 		.dbg_name = "pll3_clk",
 		.rate = 0, /* TODO: Detect rate dynamically */
 		.ops = &clk_ops_local_pll,
@@ -360,11 +360,6 @@
 	msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
 }
 
-static struct clk *pll4_clk_get_parent(struct clk *c)
-{
-	return &pxo_clk.c;
-}
-
 static bool pll4_clk_is_local(struct clk *c)
 {
 	return false;
@@ -383,13 +378,13 @@
 static struct clk_ops clk_ops_pll4 = {
 	.enable = pll4_clk_enable,
 	.disable = pll4_clk_disable,
-	.get_parent = pll4_clk_get_parent,
 	.is_local = pll4_clk_is_local,
 	.handoff = pll4_clk_handoff,
 };
 
 static struct fixed_clk pll4_clk = {
 	.c = {
+		.parent = &pxo_clk.c,
 		.dbg_name = "pll4_clk",
 		.rate = 540672000,
 		.ops = &clk_ops_pll4,
@@ -1386,8 +1381,8 @@
 		.halt_reg = CLK_HALT_CFPB_STATEA_REG,
 		.halt_bit = 15,
 	},
-	.parent = &usb_fs1_src_clk.c,
 	.c = {
+		.parent = &usb_fs1_src_clk.c,
 		.dbg_name = "usb_fs1_xcvr_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_fs1_xcvr_clk.c),
@@ -1403,8 +1398,8 @@
 		.halt_reg = CLK_HALT_CFPB_STATEA_REG,
 		.halt_bit = 16,
 	},
-	.parent = &usb_fs1_src_clk.c,
 	.c = {
+		.parent = &usb_fs1_src_clk.c,
 		.dbg_name = "usb_fs1_sys_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_fs1_sys_clk.c),
@@ -1421,8 +1416,8 @@
 		.halt_reg = CLK_HALT_CFPB_STATEA_REG,
 		.halt_bit = 12,
 	},
-	.parent = &usb_fs2_src_clk.c,
 	.c = {
+		.parent = &usb_fs2_src_clk.c,
 		.dbg_name = "usb_fs2_xcvr_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_fs2_xcvr_clk.c),
@@ -1438,8 +1433,8 @@
 		.halt_reg = CLK_HALT_CFPB_STATEA_REG,
 		.halt_bit = 13,
 	},
-	.parent = &usb_fs2_src_clk.c,
 	.c = {
+		.parent = &usb_fs2_src_clk.c,
 		.dbg_name = "usb_fs2_sys_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_fs2_sys_clk.c),
@@ -1454,8 +1449,8 @@
 		.halt_reg = CLK_HALT_CFPB_STATEC_REG,
 		.halt_bit = 0,
 	},
-	.parent = &pxo_clk.c,
 	.c = {
+		.parent = &pxo_clk.c,
 		.dbg_name = "ce2_p_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(ce2_p_clk.c),
@@ -1808,8 +1803,8 @@
 		.halt_check = HALT_VOTED,
 		.halt_bit = 14,
 	},
-	.parent = &pxo_clk.c,
 	.c = {
+		.parent = &pxo_clk.c,
 		.dbg_name = "adm0_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(adm0_clk.c),
@@ -1839,8 +1834,8 @@
 		.halt_check = HALT_VOTED,
 		.halt_bit = 12,
 	},
-	.parent = &pxo_clk.c,
 	.c = {
+		.parent = &pxo_clk.c,
 		.dbg_name = "adm1_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(adm1_clk.c),
@@ -2044,8 +2039,8 @@
 		.halt_reg = DBG_BUS_VEC_B_REG,
 		.halt_bit = 13,
 	},
-	.parent = &csi_src_clk.c,
 	.c = {
+		.parent = &csi_src_clk.c,
 		.dbg_name = "csi0_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(csi0_clk.c),
@@ -2061,8 +2056,8 @@
 		.halt_reg = DBG_BUS_VEC_B_REG,
 		.halt_bit = 14,
 	},
-	.parent = &csi_src_clk.c,
 	.c = {
+		.parent = &csi_src_clk.c,
 		.dbg_name = "csi1_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(csi1_clk.c),
@@ -2566,8 +2561,8 @@
 		.halt_reg = DBG_BUS_VEC_C_REG,
 		.halt_bit = 21,
 	},
-	.parent = &pixel_mdp_clk.c,
 	.c = {
+		.parent = &pixel_mdp_clk.c,
 		.dbg_name = "pixel_lcdc_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(pixel_lcdc_clk.c),
@@ -2695,8 +2690,8 @@
 		.halt_reg = DBG_BUS_VEC_D_REG,
 		.halt_bit = 8,
 	},
-	.parent = &tv_src_clk.c,
 	.c = {
+		.parent = &tv_src_clk.c,
 		.dbg_name = "tv_enc_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(tv_enc_clk.c),
@@ -2710,8 +2705,8 @@
 		.halt_reg = DBG_BUS_VEC_D_REG,
 		.halt_bit = 9,
 	},
-	.parent = &tv_src_clk.c,
 	.c = {
+		.parent = &tv_src_clk.c,
 		.dbg_name = "tv_dac_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(tv_dac_clk.c),
@@ -2727,8 +2722,8 @@
 		.halt_reg = DBG_BUS_VEC_D_REG,
 		.halt_bit = 11,
 	},
-	.parent = &tv_src_clk.c,
 	.c = {
+		.parent = &tv_src_clk.c,
 		.dbg_name = "mdp_tv_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(mdp_tv_clk.c),
@@ -2744,8 +2739,8 @@
 		.halt_reg = DBG_BUS_VEC_D_REG,
 		.halt_bit = 10,
 	},
-	.parent = &tv_src_clk.c,
 	.c = {
+		.parent = &tv_src_clk.c,
 		.dbg_name = "hdmi_tv_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(hdmi_tv_clk.c),
@@ -2934,8 +2929,8 @@
 		.halt_reg = DBG_BUS_VEC_B_REG,
 		.halt_bit = 7,
 	},
-	.parent = &vfe_clk.c,
 	.c = {
+		.parent = &vfe_clk.c,
 		.dbg_name = "csi0_vfe_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(csi0_vfe_clk.c),
@@ -2951,8 +2946,8 @@
 		.halt_reg = DBG_BUS_VEC_B_REG,
 		.halt_bit = 8,
 	},
-	.parent = &vfe_clk.c,
 	.c = {
+		.parent = &vfe_clk.c,
 		.dbg_name = "csi1_vfe_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(csi1_vfe_clk.c),
diff --git a/arch/arm/mach-msm/clock-9615.c b/arch/arm/mach-msm/clock-9615.c
index 338361b..fffbcea 100644
--- a/arch/arm/mach-msm/clock-9615.c
+++ b/arch/arm/mach-msm/clock-9615.c
@@ -228,10 +228,10 @@
 	.en_mask = BIT(0),
 	.status_reg = BB_PLL0_STATUS_REG,
 	.status_mask = BIT(16),
-	.parent = &cxo_clk.c,
 	.soft_vote = &soft_vote_pll0,
 	.soft_vote_mask = PLL_SOFT_VOTE_PRIMARY,
 	.c = {
+		.parent = &cxo_clk.c,
 		.dbg_name = "pll0_clk",
 		.rate = 276000000,
 		.ops = &clk_ops_pll_acpu_vote,
@@ -259,8 +259,8 @@
 	.en_mask = BIT(4),
 	.status_reg = LCC_PLL0_STATUS_REG,
 	.status_mask = BIT(16),
-	.parent = &cxo_clk.c,
 	.c = {
+		.parent = &cxo_clk.c,
 		.dbg_name = "pll4_clk",
 		.rate = 393216000,
 		.ops = &clk_ops_pll_vote,
@@ -275,10 +275,10 @@
 	.en_mask = BIT(8),
 	.status_reg = BB_PLL8_STATUS_REG,
 	.status_mask = BIT(16),
-	.parent = &cxo_clk.c,
 	.soft_vote = &soft_vote_pll8,
 	.soft_vote_mask = PLL_SOFT_VOTE_PRIMARY,
 	.c = {
+		.parent = &cxo_clk.c,
 		.dbg_name = "pll8_clk",
 		.rate = 384000000,
 		.ops = &clk_ops_pll_acpu_vote,
@@ -316,8 +316,8 @@
 	.en_mask = BIT(11),
 	.status_reg = BB_PLL14_STATUS_REG,
 	.status_mask = BIT(16),
-	.parent = &cxo_clk.c,
 	.c = {
+		.parent = &cxo_clk.c,
 		.dbg_name = "pll14_clk",
 		.rate = 480000000,
 		.ops = &clk_ops_pll_vote,
@@ -767,8 +767,8 @@
 		.halt_reg = CLK_HALT_DFAB_STATE_REG,
 		.halt_bit = 8,
 	},
-	.parent = &cxo_clk.c,
 	.c = {
+		.parent = &cxo_clk.c,
 		.dbg_name = "usb_hsic_hsio_cal_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(usb_hsic_hsio_cal_clk.c),
@@ -1295,8 +1295,8 @@
 		.halt_check = ENABLE,
 		.halt_bit = 1,
 	},
-	.parent = &audio_slimbus_clk.c,
 	.c = {
+		.parent = &audio_slimbus_clk.c,
 		.dbg_name = "sps_slimbus_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(sps_slimbus_clk.c),
@@ -1310,8 +1310,8 @@
 		.halt_reg = CLK_HALT_DFAB_STATE_REG,
 		.halt_bit = 28,
 	},
-	.parent = &sps_slimbus_clk.c,
 	.c = {
+		.parent = &sps_slimbus_clk.c,
 		.dbg_name = "slimbus_xo_src_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(slimbus_xo_src_clk.c),
diff --git a/arch/arm/mach-msm/clock-9625.c b/arch/arm/mach-msm/clock-9625.c
index 0b8919b..b284168 100644
--- a/arch/arm/mach-msm/clock-9625.c
+++ b/arch/arm/mach-msm/clock-9625.c
@@ -388,11 +388,11 @@
 	.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE_REG,
 	.status_reg = (void __iomem *)GPLL0_STATUS_REG,
 	.status_mask = BIT(17),
-	.parent = &cxo_clk_src.c,
 	.soft_vote = &soft_vote_gpll0,
 	.soft_vote_mask = PLL_SOFT_VOTE_PRIMARY,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.rate = 600000000,
 		.dbg_name = "gpll0_clk_src",
 		.ops = &clk_ops_pll_acpu_vote,
@@ -420,9 +420,9 @@
 	.en_mask = BIT(0),
 	.status_reg = (void __iomem *)LPAPLL_STATUS_REG,
 	.status_mask = BIT(17),
-	.parent = &cxo_clk_src.c,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.rate = 393216000,
 		.dbg_name = "lpapll0_clk_src",
 		.ops = &clk_ops_pll_vote,
@@ -435,9 +435,9 @@
 	.en_mask = BIT(1),
 	.status_reg = (void __iomem *)GPLL1_STATUS_REG,
 	.status_mask = BIT(17),
-	.parent = &cxo_clk_src.c,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.rate = 480000000,
 		.dbg_name = "gpll1_clk_src",
 		.ops = &clk_ops_pll_vote,
@@ -988,10 +988,10 @@
 
 static struct branch_clk gcc_blsp1_qup1_i2c_apps_clk = {
 	.cbcr_reg = BLSP1_QUP1_I2C_APPS_CBCR,
-	.parent = &cxo_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup1_i2c_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup1_i2c_apps_clk.c),
@@ -1000,10 +1000,10 @@
 
 static struct branch_clk gcc_blsp1_qup1_spi_apps_clk = {
 	.cbcr_reg = BLSP1_QUP1_SPI_APPS_CBCR,
-	.parent = &blsp1_qup1_spi_apps_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_qup1_spi_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup1_spi_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup1_spi_apps_clk.c),
@@ -1012,10 +1012,10 @@
 
 static struct branch_clk gcc_blsp1_qup2_i2c_apps_clk = {
 	.cbcr_reg = BLSP1_QUP2_I2C_APPS_CBCR,
-	.parent = &cxo_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup2_i2c_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup2_i2c_apps_clk.c),
@@ -1024,10 +1024,10 @@
 
 static struct branch_clk gcc_blsp1_qup2_spi_apps_clk = {
 	.cbcr_reg = BLSP1_QUP2_SPI_APPS_CBCR,
-	.parent = &blsp1_qup2_spi_apps_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_qup2_spi_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup2_spi_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup2_spi_apps_clk.c),
@@ -1036,10 +1036,10 @@
 
 static struct branch_clk gcc_blsp1_qup3_i2c_apps_clk = {
 	.cbcr_reg = BLSP1_QUP3_I2C_APPS_CBCR,
-	.parent = &cxo_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup3_i2c_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup3_i2c_apps_clk.c),
@@ -1048,10 +1048,10 @@
 
 static struct branch_clk gcc_blsp1_qup3_spi_apps_clk = {
 	.cbcr_reg = BLSP1_QUP3_SPI_APPS_CBCR,
-	.parent = &blsp1_qup3_spi_apps_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_qup3_spi_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup3_spi_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup3_spi_apps_clk.c),
@@ -1060,10 +1060,10 @@
 
 static struct branch_clk gcc_blsp1_qup4_i2c_apps_clk = {
 	.cbcr_reg = BLSP1_QUP4_I2C_APPS_CBCR,
-	.parent = &cxo_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup4_i2c_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup4_i2c_apps_clk.c),
@@ -1072,10 +1072,10 @@
 
 static struct branch_clk gcc_blsp1_qup4_spi_apps_clk = {
 	.cbcr_reg = BLSP1_QUP4_SPI_APPS_CBCR,
-	.parent = &blsp1_qup4_spi_apps_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_qup4_spi_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup4_spi_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup4_spi_apps_clk.c),
@@ -1084,10 +1084,10 @@
 
 static struct branch_clk gcc_blsp1_qup5_i2c_apps_clk = {
 	.cbcr_reg = BLSP1_QUP5_I2C_APPS_CBCR,
-	.parent = &cxo_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup5_i2c_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup5_i2c_apps_clk.c),
@@ -1096,10 +1096,10 @@
 
 static struct branch_clk gcc_blsp1_qup5_spi_apps_clk = {
 	.cbcr_reg = BLSP1_QUP5_SPI_APPS_CBCR,
-	.parent = &blsp1_qup5_spi_apps_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_qup5_spi_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup5_spi_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup5_spi_apps_clk.c),
@@ -1108,10 +1108,10 @@
 
 static struct branch_clk gcc_blsp1_qup6_i2c_apps_clk = {
 	.cbcr_reg = BLSP1_QUP6_I2C_APPS_CBCR,
-	.parent = &cxo_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &cxo_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup6_i2c_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup6_i2c_apps_clk.c),
@@ -1120,10 +1120,10 @@
 
 static struct branch_clk gcc_blsp1_qup6_spi_apps_clk = {
 	.cbcr_reg = BLSP1_QUP6_SPI_APPS_CBCR,
-	.parent = &blsp1_qup6_spi_apps_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_qup6_spi_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_qup6_spi_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_qup6_spi_apps_clk.c),
@@ -1132,10 +1132,10 @@
 
 static struct branch_clk gcc_blsp1_uart1_apps_clk = {
 	.cbcr_reg = BLSP1_UART1_APPS_CBCR,
-	.parent = &blsp1_uart1_apps_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_uart1_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_uart1_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_uart1_apps_clk.c),
@@ -1144,10 +1144,10 @@
 
 static struct branch_clk gcc_blsp1_uart2_apps_clk = {
 	.cbcr_reg = BLSP1_UART2_APPS_CBCR,
-	.parent = &blsp1_uart2_apps_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_uart2_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_uart2_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_uart2_apps_clk.c),
@@ -1156,10 +1156,10 @@
 
 static struct branch_clk gcc_blsp1_uart3_apps_clk = {
 	.cbcr_reg = BLSP1_UART3_APPS_CBCR,
-	.parent = &blsp1_uart3_apps_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_uart3_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_uart3_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_uart3_apps_clk.c),
@@ -1168,10 +1168,10 @@
 
 static struct branch_clk gcc_blsp1_uart4_apps_clk = {
 	.cbcr_reg = BLSP1_UART4_APPS_CBCR,
-	.parent = &blsp1_uart4_apps_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_uart4_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_uart4_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_uart4_apps_clk.c),
@@ -1180,10 +1180,10 @@
 
 static struct branch_clk gcc_blsp1_uart5_apps_clk = {
 	.cbcr_reg = BLSP1_UART5_APPS_CBCR,
-	.parent = &blsp1_uart5_apps_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_uart5_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_uart5_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_uart5_apps_clk.c),
@@ -1192,10 +1192,10 @@
 
 static struct branch_clk gcc_blsp1_uart6_apps_clk = {
 	.cbcr_reg = BLSP1_UART6_APPS_CBCR,
-	.parent = &blsp1_uart6_apps_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &blsp1_uart6_apps_clk_src.c,
 		.dbg_name = "gcc_blsp1_uart6_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_blsp1_uart6_apps_clk.c),
@@ -1252,10 +1252,10 @@
 
 static struct branch_clk gcc_gp1_clk = {
 	.cbcr_reg = GP1_CBCR,
-	.parent = &gp1_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &gp1_clk_src.c,
 		.dbg_name = "gcc_gp1_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_gp1_clk.c),
@@ -1264,10 +1264,10 @@
 
 static struct branch_clk gcc_gp2_clk = {
 	.cbcr_reg = GP2_CBCR,
-	.parent = &gp2_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &gp2_clk_src.c,
 		.dbg_name = "gcc_gp2_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_gp2_clk.c),
@@ -1276,10 +1276,10 @@
 
 static struct branch_clk gcc_gp3_clk = {
 	.cbcr_reg = GP3_CBCR,
-	.parent = &gp3_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &gp3_clk_src.c,
 		.dbg_name = "gcc_gp3_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_gp3_clk.c),
@@ -1288,10 +1288,10 @@
 
 static struct branch_clk gcc_ipa_clk = {
 	.cbcr_reg = IPA_CBCR,
-	.parent = &ipa_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &ipa_clk_src.c,
 		.dbg_name = "gcc_ipa_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_ipa_clk.c),
@@ -1311,10 +1311,10 @@
 
 static struct branch_clk gcc_pdm2_clk = {
 	.cbcr_reg = PDM2_CBCR,
-	.parent = &pdm2_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &pdm2_clk_src.c,
 		.dbg_name = "gcc_pdm2_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_pdm2_clk.c),
@@ -1357,10 +1357,10 @@
 
 static struct branch_clk gcc_qpic_clk = {
 	.cbcr_reg = QPIC_CBCR,
-	.parent = &qpic_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &qpic_clk_src.c,
 		.dbg_name = "gcc_qpic_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_qpic_clk.c),
@@ -1380,10 +1380,10 @@
 
 static struct branch_clk gcc_sdcc2_apps_clk = {
 	.cbcr_reg = SDCC2_APPS_CBCR,
-	.parent = &sdcc2_apps_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &sdcc2_apps_clk_src.c,
 		.dbg_name = "gcc_sdcc2_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_sdcc2_apps_clk.c),
@@ -1403,10 +1403,10 @@
 
 static struct branch_clk gcc_sdcc3_apps_clk = {
 	.cbcr_reg = SDCC3_APPS_CBCR,
-	.parent = &sdcc3_apps_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &sdcc3_apps_clk_src.c,
 		.dbg_name = "gcc_sdcc3_apps_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_sdcc3_apps_clk.c),
@@ -1415,10 +1415,10 @@
 
 static struct branch_clk gcc_sys_noc_ipa_axi_clk = {
 	.cbcr_reg = SYS_NOC_IPA_AXI_CBCR,
-	.parent = &ipa_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &ipa_clk_src.c,
 		.dbg_name = "gcc_sys_noc_ipa_axi_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_sys_noc_ipa_axi_clk.c),
@@ -1439,10 +1439,10 @@
 static struct branch_clk gcc_usb_hs_system_clk = {
 	.cbcr_reg = USB_HS_SYSTEM_CBCR,
 	.bcr_reg = USB_HS_BCR,
-	.parent = &usb_hs_system_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &usb_hs_system_clk_src.c,
 		.dbg_name = "gcc_usb_hs_system_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_usb_hs_system_clk.c),
@@ -1462,10 +1462,10 @@
 
 static struct branch_clk gcc_usb_hsic_clk = {
 	.cbcr_reg = USB_HSIC_CBCR,
-	.parent = &usb_hsic_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &usb_hsic_clk_src.c,
 		.dbg_name = "gcc_usb_hsic_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_usb_hsic_clk.c),
@@ -1474,10 +1474,10 @@
 
 static struct branch_clk gcc_usb_hsic_io_cal_clk = {
 	.cbcr_reg = USB_HSIC_IO_CAL_CBCR,
-	.parent = &usb_hsic_io_cal_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &usb_hsic_io_cal_clk_src.c,
 		.dbg_name = "gcc_usb_hsic_io_cal_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_usb_hsic_io_cal_clk.c),
@@ -1487,10 +1487,10 @@
 static struct branch_clk gcc_usb_hsic_system_clk = {
 	.cbcr_reg = USB_HSIC_SYSTEM_CBCR,
 	.bcr_reg = USB_HS_HSIC_BCR,
-	.parent = &usb_hsic_system_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &usb_hsic_system_clk_src.c,
 		.dbg_name = "gcc_usb_hsic_system_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_usb_hsic_system_clk.c),
@@ -1499,10 +1499,10 @@
 
 static struct branch_clk gcc_usb_hsic_xcvr_fs_clk = {
 	.cbcr_reg = USB_HSIC_XCVR_FS_CBCR,
-	.parent = &usb_hsic_xcvr_fs_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[GCC_BASE],
 	.c = {
+		.parent = &usb_hsic_xcvr_fs_clk_src.c,
 		.dbg_name = "gcc_usb_hsic_xcvr_fs_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_usb_hsic_xcvr_fs_clk.c),
@@ -1639,9 +1639,9 @@
 
 static struct branch_clk audio_core_lpaif_pcm_data_oe_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_PCM_DATA_OE_CBCR,
-	.parent = &audio_core_lpaif_pcmoe_clk_src.c,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_pcmoe_clk_src.c,
 		.dbg_name = "audio_core_lpaif_pcm_data_oe_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_pcm_data_oe_clk.c),
@@ -1650,9 +1650,9 @@
 
 static struct branch_clk audio_core_slimbus_core_clk = {
 	.cbcr_reg = AUDIO_CORE_SLIMBUS_CORE_CBCR,
-	.parent = &audio_core_slimbus_core_clk_src.c,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_slimbus_core_clk_src.c,
 		.dbg_name = "audio_core_slimbus_core_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_slimbus_core_clk.c),
@@ -1672,11 +1672,11 @@
 
 static struct branch_clk audio_core_lpaif_pri_ibit_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_PRI_IBIT_CBCR,
-	.parent = &audio_core_lpaif_pri_clk_src.c,
 	.has_sibling = 1,
 	.max_div = 15,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_pri_clk_src.c,
 		.dbg_name = "audio_core_lpaif_pri_ibit_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_pri_ibit_clk.c),
@@ -1685,10 +1685,10 @@
 
 static struct branch_clk audio_core_lpaif_pri_osr_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_PRI_OSR_CBCR,
-	.parent = &audio_core_lpaif_pri_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_pri_clk_src.c,
 		.dbg_name = "audio_core_lpaif_pri_osr_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_pri_osr_clk.c),
@@ -1708,10 +1708,10 @@
 
 static struct branch_clk audio_core_lpaif_pcm0_ibit_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_PCM0_IBIT_CBCR,
-	.parent = &audio_core_lpaif_pcm0_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_pcm0_clk_src.c,
 		.dbg_name = "audio_core_lpaif_pcm0_ibit_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_pcm0_ibit_clk.c),
@@ -1731,11 +1731,11 @@
 
 static struct branch_clk audio_core_lpaif_sec_ibit_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_SEC_IBIT_CBCR,
-	.parent = &audio_core_lpaif_sec_clk_src.c,
 	.has_sibling = 1,
 	.max_div = 15,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_sec_clk_src.c,
 		.dbg_name = "audio_core_lpaif_sec_ibit_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_sec_ibit_clk.c),
@@ -1744,10 +1744,10 @@
 
 static struct branch_clk audio_core_lpaif_sec_osr_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_SEC_OSR_CBCR,
-	.parent = &audio_core_lpaif_sec_clk_src.c,
 	.has_sibling = 1,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_sec_clk_src.c,
 		.dbg_name = "audio_core_lpaif_sec_osr_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_sec_osr_clk.c),
@@ -1767,10 +1767,10 @@
 
 static struct branch_clk audio_core_lpaif_pcm1_ibit_clk = {
 	.cbcr_reg = AUDIO_CORE_LPAIF_PCM1_IBIT_CBCR,
-	.parent = &audio_core_lpaif_pcm1_clk_src.c,
 	.has_sibling = 0,
 	.base = &virt_bases[LPASS_BASE],
 	.c = {
+		.parent = &audio_core_lpaif_pcm1_clk_src.c,
 		.dbg_name = "audio_core_lpaif_pcm1_ibit_clk",
 		.ops = &clk_ops_branch,
 		CLK_INIT(audio_core_lpaif_pcm1_ibit_clk.c),
@@ -2027,13 +2027,13 @@
 
 	CLK_LOOKUP("dma_bam_pclk", gcc_bam_dma_ahb_clk.c, "msm_sps"),
 	CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "msm_serial_hsl.0"),
-	CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "spi_qsd.1"),
+	CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f9924000.spi"),
 	CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f9925000.i2c"),
 	CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, ""),
 	CLK_LOOKUP("core_clk", gcc_blsp1_qup1_i2c_apps_clk.c, ""),
-	CLK_LOOKUP("core_clk", gcc_blsp1_qup1_spi_apps_clk.c, "spi_qsd.1"),
+	CLK_LOOKUP("core_clk", gcc_blsp1_qup1_spi_apps_clk.c, ""),
 	CLK_LOOKUP("core_clk", gcc_blsp1_qup2_i2c_apps_clk.c, ""),
-	CLK_LOOKUP("core_clk", gcc_blsp1_qup2_spi_apps_clk.c, ""),
+	CLK_LOOKUP("core_clk", gcc_blsp1_qup2_spi_apps_clk.c, "f9924000.spi"),
 	CLK_LOOKUP("core_clk", gcc_blsp1_qup3_i2c_apps_clk.c, "f9925000.i2c"),
 	CLK_LOOKUP("core_clk", gcc_blsp1_qup3_spi_apps_clk.c, ""),
 	CLK_LOOKUP("core_clk", gcc_blsp1_qup4_i2c_apps_clk.c, ""),
@@ -2058,6 +2058,7 @@
 	CLK_LOOKUP("core_clk", gcc_gp2_clk.c, ""),
 	CLK_LOOKUP("core_clk", gcc_gp3_clk.c, ""),
 
+	CLK_LOOKUP("iface_clk", gcc_prng_ahb_clk.c, "f9bff000.qcom,msm-rng"),
 	CLK_LOOKUP("core_src_clk", ipa_clk_src.c, "fd4c0000.qcom,ipa"),
 	CLK_LOOKUP("core_clk", gcc_ipa_clk.c, "fd4c0000.qcom,ipa"),
 	CLK_LOOKUP("bus_clk",  gcc_sys_noc_ipa_axi_clk.c, "fd4c0000.qcom,ipa"),
@@ -2085,10 +2086,15 @@
 	/* LPASS clocks */
 	CLK_LOOKUP("core_clk", audio_core_slimbus_core_clk.c, "fe12f000.slim"),
 	CLK_LOOKUP("iface_clk", audio_core_slimbus_lfabif_clk.c, ""),
-	CLK_LOOKUP("core_clk", audio_core_lpaif_pri_clk_src.c, ""),
-	CLK_LOOKUP("osr_clk", audio_core_lpaif_pri_osr_clk.c, ""),
-	CLK_LOOKUP("ebit_clk", audio_core_lpaif_pri_ebit_clk.c, ""),
-	CLK_LOOKUP("ibit_clk", audio_core_lpaif_pri_ibit_clk.c, ""),
+
+	CLK_LOOKUP("core_clk", audio_core_lpaif_pri_clk_src.c,
+		   "msm-dai-q6-mi2s.0"),
+	CLK_LOOKUP("osr_clk", audio_core_lpaif_pri_osr_clk.c,
+		   "msm-dai-q6-mi2s.0"),
+	CLK_LOOKUP("ebit_clk", audio_core_lpaif_pri_ebit_clk.c,
+		   "msm-dai-q6-mi2s.0"),
+	CLK_LOOKUP("ibit_clk", audio_core_lpaif_pri_ibit_clk.c,
+		   "msm-dai-q6-mi2s.0"),
 	CLK_LOOKUP("core_clk", audio_core_lpaif_sec_clk_src.c, ""),
 	CLK_LOOKUP("osr_clk", audio_core_lpaif_sec_osr_clk.c, ""),
 	CLK_LOOKUP("ebit_clk", audio_core_lpaif_sec_ebit_clk.c, ""),
diff --git a/arch/arm/mach-msm/clock-local.c b/arch/arm/mach-msm/clock-local.c
index c43ca46..4432795 100644
--- a/arch/arm/mach-msm/clock-local.c
+++ b/arch/arm/mach-msm/clock-local.c
@@ -527,6 +527,7 @@
 	 * is called to make sure the MNCNTR_EN bit is set correctly.
 	 */
 	rcg->current_freq = nf;
+	c->parent = nf->src_clk;
 
 	/* Enable any clocks that were disabled. */
 	if (!rcg->bank_info) {
@@ -583,11 +584,6 @@
 	return (rcg->freq_tbl + n)->freq_hz;
 }
 
-static struct clk *rcg_clk_get_parent(struct clk *c)
-{
-	return to_rcg_clk(c)->current_freq->src_clk;
-}
-
 /* Disable hw clock gating if not set at boot */
 enum handoff branch_handoff(struct branch *b, struct clk *c)
 {
@@ -644,6 +640,7 @@
 		return HANDOFF_UNKNOWN_RATE;
 
 	rcg->current_freq = freq;
+	c->parent = freq->src_clk;
 	c->rate = freq->freq_hz;
 
 	return HANDOFF_ENABLED_CLK;
@@ -683,11 +680,6 @@
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 }
 
-static struct clk *branch_clk_get_parent(struct clk *c)
-{
-	return to_branch_clk(c)->parent;
-}
-
 static int branch_clk_is_enabled(struct clk *c)
 {
 	return to_branch_clk(c)->enabled;
@@ -834,7 +826,6 @@
 	.in_hwcg_mode = branch_clk_in_hwcg_mode,
 	.is_enabled = branch_clk_is_enabled,
 	.reset = branch_clk_reset,
-	.get_parent = branch_clk_get_parent,
 	.handoff = branch_clk_handoff,
 	.set_flags = branch_clk_set_flags,
 };
@@ -843,7 +834,6 @@
 	.prepare = branch_clk_enable,
 	.unprepare = branch_clk_disable,
 	.is_enabled = branch_clk_is_enabled,
-	.get_parent = branch_clk_get_parent,
 	.handoff = branch_clk_handoff,
 };
 
@@ -870,7 +860,6 @@
 	.is_enabled = rcg_clk_is_enabled,
 	.round_rate = rcg_clk_round_rate,
 	.reset = rcg_clk_reset,
-	.get_parent = rcg_clk_get_parent,
 	.set_flags = rcg_clk_set_flags,
 };
 
diff --git a/arch/arm/mach-msm/clock-local.h b/arch/arm/mach-msm/clock-local.h
index fca6486..ff6dc69 100644
--- a/arch/arm/mach-msm/clock-local.h
+++ b/arch/arm/mach-msm/clock-local.h
@@ -235,7 +235,6 @@
  * struct branch_clk - branch
  * @enabled: true if clock is on, false otherwise
  * @b: branch
- * @parent: clock source
  * @c: clock
  *
  * An on/off switch with a rate derived from the parent.
@@ -243,7 +242,6 @@
 struct branch_clk {
 	bool enabled;
 	struct branch b;
-	struct clk *parent;
 	struct clk c;
 };
 
diff --git a/arch/arm/mach-msm/clock-local2.c b/arch/arm/mach-msm/clock-local2.c
index b9c3036..5923951 100644
--- a/arch/arm/mach-msm/clock-local2.c
+++ b/arch/arm/mach-msm/clock-local2.c
@@ -206,6 +206,7 @@
 		clk_unprepare(cf->src_clk);
 
 	rcg->current_freq = nf;
+	c->parent = nf->src_clk;
 
 	return 0;
 }
@@ -234,11 +235,6 @@
 	return (rcg->freq_tbl + n)->freq_hz;
 }
 
-static struct clk *rcg_clk_get_parent(struct clk *c)
-{
-	return to_rcg_clk(c)->current_freq->src_clk;
-}
-
 static enum handoff _rcg_clk_handoff(struct rcg_clk *rcg, int has_mnd)
 {
 	u32 n_regval = 0, m_regval = 0, d_regval = 0;
@@ -306,6 +302,7 @@
 		return HANDOFF_UNKNOWN_RATE;
 
 	rcg->current_freq = freq;
+	rcg->c.parent = freq->src_clk;
 	rcg->c.rate = freq->freq_hz;
 
 	return HANDOFF_ENABLED_CLK;
@@ -431,7 +428,7 @@
 		return branch_cdiv_set_rate(branch, rate);
 
 	if (!branch->has_sibling)
-		return clk_set_rate(branch->parent, rate);
+		return clk_set_rate(c->parent, rate);
 
 	return -EPERM;
 }
@@ -444,7 +441,7 @@
 		return rate <= (branch->max_div) ? rate : -EPERM;
 
 	if (!branch->has_sibling)
-		return clk_round_rate(branch->parent, rate);
+		return clk_round_rate(c->parent, rate);
 
 	return -EPERM;
 }
@@ -457,16 +454,11 @@
 		return branch->c.rate;
 
 	if (!branch->has_sibling)
-		return clk_get_rate(branch->parent);
+		return clk_get_rate(c->parent);
 
 	return 0;
 }
 
-static struct clk *branch_clk_get_parent(struct clk *c)
-{
-	return to_branch_clk(c)->parent;
-}
-
 static int branch_clk_list_rate(struct clk *c, unsigned n)
 {
 	struct branch_clk *branch = to_branch_clk(c);
@@ -474,8 +466,8 @@
 	if (branch->has_sibling == 1)
 		return -ENXIO;
 
-	if (branch->parent && branch->parent->ops->list_rate)
-		return branch->parent->ops->list_rate(branch->parent, n);
+	if (c->parent && c->parent->ops->list_rate)
+		return c->parent->ops->list_rate(c->parent, n);
 	else
 		return -ENXIO;
 }
@@ -489,9 +481,9 @@
 	if ((cbcr_regval & CBCR_BRANCH_OFF_BIT))
 		return HANDOFF_DISABLED_CLK;
 
-	if (branch->parent) {
-		if (branch->parent->ops->handoff)
-			return branch->parent->ops->handoff(branch->parent);
+	if (c->parent) {
+		if (c->parent->ops->handoff)
+			return c->parent->ops->handoff(c->parent);
 	}
 
 	return HANDOFF_ENABLED_CLK;
@@ -603,7 +595,6 @@
 	.set_rate = rcg_clk_set_rate,
 	.list_rate = rcg_clk_list_rate,
 	.round_rate = rcg_clk_round_rate,
-	.get_parent = rcg_clk_get_parent,
 	.handoff = rcg_clk_handoff,
 };
 
@@ -612,7 +603,6 @@
 	.set_rate = rcg_clk_set_rate,
 	.list_rate = rcg_clk_list_rate,
 	.round_rate = rcg_clk_round_rate,
-	.get_parent = rcg_clk_get_parent,
 	.handoff = rcg_mnd_clk_handoff,
 };
 
@@ -624,7 +614,6 @@
 	.list_rate = branch_clk_list_rate,
 	.round_rate = branch_clk_round_rate,
 	.reset = branch_clk_reset,
-	.get_parent = branch_clk_get_parent,
 	.handoff = branch_clk_handoff,
 };
 
diff --git a/arch/arm/mach-msm/clock-local2.h b/arch/arm/mach-msm/clock-local2.h
index 101dc2d..a6d2ed6 100644
--- a/arch/arm/mach-msm/clock-local2.h
+++ b/arch/arm/mach-msm/clock-local2.h
@@ -87,7 +87,6 @@
 /**
  * struct branch_clk - branch clock
  * @set_rate: Set the frequency of this branch clock.
- * @parent: clock source
  * @c: clk
  * @cbcr_reg: branch control register
  * @bcr_reg: block reset register
@@ -99,7 +98,6 @@
  */
 struct branch_clk {
 	void   (*set_rate)(struct branch_clk *, struct clk_freq_tbl *);
-	struct clk *parent;
 	struct clk c;
 	const u32 cbcr_reg;
 	const u32 bcr_reg;
diff --git a/arch/arm/mach-msm/clock-pll.c b/arch/arm/mach-msm/clock-pll.c
index 240f4e4..8e11d37 100644
--- a/arch/arm/mach-msm/clock-pll.c
+++ b/arch/arm/mach-msm/clock-pll.c
@@ -98,11 +98,6 @@
 	spin_unlock_irqrestore(&pll_reg_lock, flags);
 }
 
-static struct clk *pll_vote_clk_get_parent(struct clk *c)
-{
-	return to_pll_vote_clk(c)->parent;
-}
-
 static int pll_vote_clk_is_enabled(struct clk *c)
 {
 	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
@@ -122,7 +117,6 @@
 	.enable = pll_vote_clk_enable,
 	.disable = pll_vote_clk_disable,
 	.is_enabled = pll_vote_clk_is_enabled,
-	.get_parent = pll_vote_clk_get_parent,
 	.handoff = pll_vote_clk_handoff,
 };
 
@@ -229,11 +223,6 @@
 	return HANDOFF_DISABLED_CLK;
 }
 
-static struct clk *local_pll_clk_get_parent(struct clk *c)
-{
-	return to_pll_clk(c)->parent;
-}
-
 static int local_pll_clk_set_rate(struct clk *c, unsigned long rate)
 {
 	struct pll_freq_tbl *nf;
@@ -346,7 +335,6 @@
 	.disable = local_pll_clk_disable,
 	.set_rate = local_pll_clk_set_rate,
 	.handoff = local_pll_clk_handoff,
-	.get_parent = local_pll_clk_get_parent,
 };
 
 struct pll_rate {
@@ -537,7 +525,6 @@
 	.enable = pll_acpu_vote_clk_enable,
 	.disable = pll_acpu_vote_clk_disable,
 	.is_enabled = pll_vote_clk_is_enabled,
-	.get_parent = pll_vote_clk_get_parent,
 };
 
 static void __init __set_fsm_mode(void __iomem *mode_reg,
diff --git a/arch/arm/mach-msm/clock-pll.h b/arch/arm/mach-msm/clock-pll.h
index 33b35a8..cb334d7 100644
--- a/arch/arm/mach-msm/clock-pll.h
+++ b/arch/arm/mach-msm/clock-pll.h
@@ -35,7 +35,6 @@
  * any HW voting
  * @id: PLL ID
  * @mode_reg: enable register
- * @parent: clock source
  * @c: clock
  */
 struct pll_shared_clk {
@@ -104,7 +103,6 @@
  * @en_mask: ORed with @en_reg to enable the clock
  * @status_mask: ANDed with @status_reg to determine if PLL is active.
  * @status_reg: status register
- * @parent: clock source
  * @c: clock
  */
 struct pll_vote_clk {
@@ -115,7 +113,6 @@
 	void __iomem *const status_reg;
 	const u32 status_mask;
 
-	struct clk *parent;
 	struct clk c;
 	void *const __iomem *base;
 };
@@ -144,7 +141,6 @@
  * @status_reg: status register, contains the lock detection bit
  * @masks: masks used for settings in config_reg
  * @freq_tbl: pll freq table
- * @parent: clock source
  * @c: clk
  * @base: pointer to base address of ioremapped registers.
  */
@@ -159,7 +155,6 @@
 	struct pll_config_masks masks;
 	struct pll_freq_tbl *freq_tbl;
 
-	struct clk *parent;
 	struct clk c;
 	void *const __iomem *base;
 };
diff --git a/arch/arm/mach-msm/clock-voter.c b/arch/arm/mach-msm/clock-voter.c
index fa170bf4..7421ba6 100644
--- a/arch/arm/mach-msm/clock-voter.c
+++ b/arch/arm/mach-msm/clock-voter.c
@@ -43,7 +43,7 @@
 	mutex_lock(&voter_clk_lock);
 
 	if (v->enabled) {
-		struct clk *parent = v->parent;
+		struct clk *parent = clk->parent;
 
 		/*
 		 * Get the aggregate rate without this clock's vote and update
@@ -79,7 +79,7 @@
 	struct clk_voter *v = to_clk_voter(clk);
 
 	mutex_lock(&voter_clk_lock);
-	parent = v->parent;
+	parent = clk->parent;
 
 	/*
 	 * Increase the rate if this clock is voting for a higher rate
@@ -105,7 +105,7 @@
 	struct clk_voter *v = to_clk_voter(clk);
 
 	mutex_lock(&voter_clk_lock);
-	parent = v->parent;
+	parent = clk->parent;
 
 	/*
 	 * Decrease the rate if this clock was the only one voting for
@@ -129,14 +129,7 @@
 
 static long voter_clk_round_rate(struct clk *clk, unsigned long rate)
 {
-	struct clk_voter *v = to_clk_voter(clk);
-	return clk_round_rate(v->parent, rate);
-}
-
-static struct clk *voter_clk_get_parent(struct clk *clk)
-{
-	struct clk_voter *v = to_clk_voter(clk);
-	return v->parent;
+	return clk_round_rate(clk->parent, rate);
 }
 
 static bool voter_clk_is_local(struct clk *clk)
@@ -159,7 +152,6 @@
 	.set_rate = voter_clk_set_rate,
 	.is_enabled = voter_clk_is_enabled,
 	.round_rate = voter_clk_round_rate,
-	.get_parent = voter_clk_get_parent,
 	.is_local = voter_clk_is_local,
 	.handoff = voter_clk_handoff,
 };
diff --git a/arch/arm/mach-msm/clock-voter.h b/arch/arm/mach-msm/clock-voter.h
index 82c071b..eb55a12 100644
--- a/arch/arm/mach-msm/clock-voter.h
+++ b/arch/arm/mach-msm/clock-voter.h
@@ -21,7 +21,6 @@
 
 struct clk_voter {
 	bool enabled;
-	struct clk *parent;
 	struct clk c;
 };
 
@@ -32,8 +31,8 @@
 
 #define DEFINE_CLK_VOTER(clk_name, _parent, _default_rate) \
 	struct clk_voter clk_name = { \
-		.parent = _parent, \
 		.c = { \
+			.parent = _parent, \
 			.dbg_name = #clk_name, \
 			.ops = &clk_ops_voter, \
 			.rate = _default_rate, \
diff --git a/arch/arm/mach-msm/clock.c b/arch/arm/mach-msm/clock.c
index e9dd974..c2bf5ba 100644
--- a/arch/arm/mach-msm/clock.c
+++ b/arch/arm/mach-msm/clock.c
@@ -163,7 +163,7 @@
 
 	mutex_lock(&clk->prepare_lock);
 	if (clk->prepare_count == 0) {
-		parent = clk_get_parent(clk);
+		parent = clk->parent;
 
 		ret = clk_prepare(parent);
 		if (ret)
@@ -213,7 +213,7 @@
 	WARN(!clk->prepare_count,
 			"%s: Don't call enable on unprepared clocks\n", name);
 	if (clk->count == 0) {
-		parent = clk_get_parent(clk);
+		parent = clk->parent;
 
 		ret = clk_enable(parent);
 		if (ret)
@@ -258,7 +258,7 @@
 	if (WARN(clk->count == 0, "%s is unbalanced", name))
 		goto out;
 	if (clk->count == 1) {
-		struct clk *parent = clk_get_parent(clk);
+		struct clk *parent = clk->parent;
 
 		trace_clock_disable(name, 0, smp_processor_id());
 		if (clk->ops->disable)
@@ -283,7 +283,7 @@
 	if (WARN(!clk->prepare_count, "%s is unbalanced (prepare)", name))
 		goto out;
 	if (clk->prepare_count == 1) {
-		struct clk *parent = clk_get_parent(clk);
+		struct clk *parent = clk->parent;
 
 		WARN(clk->count,
 			"%s: Don't call unprepare when the clock is enabled\n",
@@ -411,10 +411,7 @@
 	if (IS_ERR_OR_NULL(clk))
 		return NULL;
 
-	if (!clk->ops->get_parent)
-		return NULL;
-
-	return clk->ops->get_parent(clk);
+	return clk->parent;
 }
 EXPORT_SYMBOL(clk_get_parent);
 
@@ -438,7 +435,7 @@
 
 	for (n = 0; n < num_clocks; n++) {
 		clk = clock_tbl[n].clk;
-		parent = clk_get_parent(clk);
+		parent = clk->parent;
 		if (parent && list_empty(&clk->siblings))
 			list_add(&clk->siblings, &parent->children);
 	}
@@ -492,11 +489,11 @@
 
 	/*
 	 * Handoff functions for children must be called before their parents'
-	 * so that the correct parent is returned by the clk_get_parent() below.
+	 * so that the correct parent is available below.
 	 */
 	ret = clk->ops->handoff(clk);
 	if (ret == HANDOFF_ENABLED_CLK) {
-		ret = __handoff_clk(clk_get_parent(clk));
+		ret = __handoff_clk(clk->parent);
 		if (ret == HANDOFF_ENABLED_CLK) {
 			h = kmalloc(sizeof(*h), GFP_KERNEL);
 			if (!h) {
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index abc0e6a..f559629 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -2726,18 +2726,27 @@
 	.resource	= i2s_mdm_resources,
 };
 
-static struct msm_dcvs_freq_entry apq8064_freq[] = {
-	{ 384000, 900,  0, 0, 0},
-	{ 594000, 950,  0, 0, 0},
-	{ 702000, 975,  0, 0, 0},
-	{1026000, 1075, 0, 0, 0},
-	{1242000, 1150, 0, 100, 100},
-	{1458000, 1188, 0, 100, 100},
-	{1512000, 1200, 1, 100, 100},
+static struct msm_dcvs_sync_rule apq8064_dcvs_sync_rules[] = {
+	{1026000,	400000},
+	{384000,	200000},
+	{0,		128000},
+};
+
+static struct msm_dcvs_platform_data apq8064_dcvs_data = {
+	.sync_rules	= apq8064_dcvs_sync_rules,
+	.num_sync_rules = ARRAY_SIZE(apq8064_dcvs_sync_rules),
+	.gpu_max_nom_khz = 320000,
+};
+
+struct platform_device apq8064_dcvs_device = {
+	.name		= "dcvs",
+	.id		= -1,
+	.dev		= {
+		.platform_data = &apq8064_dcvs_data,
+	},
 };
 
 static struct msm_dcvs_core_info apq8064_core_info = {
-	.freq_tbl		= &apq8064_freq[0],
 	.num_cores		= 4,
 	.sensors		= (int[]){7, 8, 9, 10},
 	.thermal_poll_ms	= 60000,
@@ -2772,7 +2781,7 @@
 	},
 	.power_param		= {
 		.current_temp	= 25,
-		.num_freq	= ARRAY_SIZE(apq8064_freq),
+		.num_freq	= 0, /* set at runtime */
 	}
 };
 
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index c3be6ce..0faf500 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -669,6 +669,18 @@
 	.id		= -1,
 };
 
+static struct acpuclk_platform_data acpuclk_8930ab_pdata = {
+	.uses_pm8917 = false,
+};
+
+struct platform_device msm8930ab_device_acpuclk = {
+	.name		= "acpuclk-8930ab",
+	.id		= -1,
+	.dev = {
+		.platform_data = &acpuclk_8930ab_pdata,
+	},
+};
+
 static struct fs_driver_data gfx3d_fs_data = {
 	.clks = (struct fs_clk_data[]){
 		{ .name = "core_clk", .reset_rate = 27000000 },
diff --git a/arch/arm/mach-msm/devices-9615.c b/arch/arm/mach-msm/devices-9615.c
index 1ba408f..e55e9a7 100644
--- a/arch/arm/mach-msm/devices-9615.c
+++ b/arch/arm/mach-msm/devices-9615.c
@@ -562,6 +562,21 @@
 	.id	= -1,
 };
 
+struct platform_device msm_cpudai_incall_music_rx = {
+	.name   = "msm-dai-q6",
+	.id     = 0x8005,
+};
+
+struct platform_device msm_cpudai_incall_record_rx = {
+	.name   = "msm-dai-q6",
+	.id     = 0x8004,
+};
+
+struct platform_device msm_cpudai_incall_record_tx = {
+	.name   = "msm-dai-q6",
+	.id     = 0x8003,
+};
+
 struct platform_device msm_i2s_cpudai0 = {
 	.name   = "msm-dai-q6",
 	.id     = PRIMARY_I2S_RX,
@@ -584,6 +599,10 @@
 	.name	= "msm-voip-dsp",
 	.id	= -1,
 };
+struct platform_device msm_cpudai_stub = {
+	.name = "msm-dai-stub",
+	.id = -1,
+};
 struct platform_device msm_dtmf = {
 	.name	= "msm-pcm-dtmf",
 	.id	= -1,
diff --git a/arch/arm/mach-msm/devices-iommu.c b/arch/arm/mach-msm/devices-iommu.c
index 091a8e8..983b13e 100644
--- a/arch/arm/mach-msm/devices-iommu.c
+++ b/arch/arm/mach-msm/devices-iommu.c
@@ -360,25 +360,25 @@
 static struct msm_iommu_dev gfx3d_iommu = {
 	.name = "gfx3d",
 	.ncb = 3,
-	.ttbr_split = 1,
+	.ttbr_split = 0,
 };
 
 static struct msm_iommu_dev gfx3d1_iommu = {
 	.name = "gfx3d1",
 	.ncb = 3,
-	.ttbr_split = 1,
+	.ttbr_split = 0,
 };
 
 static struct msm_iommu_dev gfx2d0_iommu = {
 	.name = "gfx2d0",
 	.ncb = 2,
-	.ttbr_split = 1,
+	.ttbr_split = 0,
 };
 
 static struct msm_iommu_dev gfx2d1_iommu = {
 	.name = "gfx2d1",
 	.ncb = 2,
-	.ttbr_split = 1,
+	.ttbr_split = 0,
 };
 
 static struct msm_iommu_dev vcap_iommu = {
diff --git a/arch/arm/mach-msm/devices-msm7x27a.c b/arch/arm/mach-msm/devices-msm7x27a.c
index 5296048..b4ef76d2 100644
--- a/arch/arm/mach-msm/devices-msm7x27a.c
+++ b/arch/arm/mach-msm/devices-msm7x27a.c
@@ -1917,8 +1917,6 @@
 	else if (msm8625_cpu_id() == MSM8625)
 		msm_cpr_pdata.max_freq = 1008000;
 
-	msm_cpr_clk_enable();
-
 	platform_device_register(&msm8625_vp_device);
 	platform_device_register(&msm8625_device_cpr);
 }
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index cd99628..bd5a20f 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -250,6 +250,8 @@
 extern struct platform_device msm_i2s_cpudai1;
 extern struct platform_device msm_i2s_cpudai4;
 extern struct platform_device msm_i2s_cpudai5;
+extern struct platform_device msm_cpudai_stub;
+
 extern struct platform_device msm_pil_q6v3;
 extern struct platform_device msm_pil_modem;
 extern struct platform_device msm_pil_tzapps;
@@ -408,6 +410,7 @@
 extern struct platform_device *msm_8974_stub_regulator_devices[];
 extern int msm_8974_stub_regulator_devices_len;
 
+extern struct platform_device apq8064_dcvs_device;
 extern struct platform_device apq8064_msm_gov_device;
 
 extern struct platform_device msm_bus_8930_apps_fabric;
@@ -452,6 +455,7 @@
 extern struct platform_device msm8x60_device_acpuclk;
 extern struct platform_device msm8930_device_acpuclk;
 extern struct platform_device msm8930aa_device_acpuclk;
+extern struct platform_device msm8930ab_device_acpuclk;
 extern struct platform_device msm8960_device_acpuclk;
 extern struct platform_device msm8960ab_device_acpuclk;
 extern struct platform_device msm9615_device_acpuclk;
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 9c5e52c..cbe2040 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -465,6 +465,7 @@
 	char dlane_swap;
 	void (*dsi_pwm_cfg)(void);
 	char enable_wled_bl_ctrl;
+	void (*gpio_set_backlight)(int bl_level);
 };
 
 struct lvds_panel_platform_data {
diff --git a/arch/arm/mach-msm/include/mach/camera.h b/arch/arm/mach-msm/include/mach/camera.h
index 14fd092..3dfa659c 100644
--- a/arch/arm/mach-msm/include/mach/camera.h
+++ b/arch/arm/mach-msm/include/mach/camera.h
@@ -605,6 +605,7 @@
 	S_DEFAULT,
 	S_LIVESHOT,
 	S_DUAL,
+	S_ADV_VIDEO,
 	S_EXIT
 };
 
diff --git a/arch/arm/mach-msm/include/mach/clk-provider.h b/arch/arm/mach-msm/include/mach/clk-provider.h
index d47e88e..0f2feaa 100644
--- a/arch/arm/mach-msm/include/mach/clk-provider.h
+++ b/arch/arm/mach-msm/include/mach/clk-provider.h
@@ -103,6 +103,7 @@
  * @depends: non-direct parent of clock to enable when this clock is enabled
  * @vdd_class: voltage scaling requirement class
  * @fmax: maximum frequency in Hz supported at each voltage level
+ * @parent: the current source of this clock
  */
 struct clk {
 	uint32_t flags;
@@ -113,6 +114,7 @@
 	unsigned long *fmax;
 	int num_fmax;
 	unsigned long rate;
+	struct clk *parent;
 
 	struct list_head children;
 	struct list_head siblings;
diff --git a/arch/arm/mach-msm/include/mach/dma.h b/arch/arm/mach-msm/include/mach/dma.h
index 988b249..5fcf579 100644
--- a/arch/arm/mach-msm/include/mach/dma.h
+++ b/arch/arm/mach-msm/include/mach/dma.h
@@ -269,7 +269,7 @@
 #define DMOV8064_CE_OUT_CHAN       1
 #define DMOV8064_CE_OUT_CRCI       15
 
-#define DMOV8064_TSIF_CHAN         2
+#define DMOV8064_TSIF_CHAN         4
 #define DMOV8064_TSIF_CRCI         1
 
 /* channels for MPQ8064 */
diff --git a/arch/arm/mach-msm/include/mach/iommu.h b/arch/arm/mach-msm/include/mach/iommu.h
index 054e70c..57b4bd3 100644
--- a/arch/arm/mach-msm/include/mach/iommu.h
+++ b/arch/arm/mach-msm/include/mach/iommu.h
@@ -161,6 +161,12 @@
 }
 #endif
 
+/*
+ * Function to program the global registers of an IOMMU securely.
+ * This should only be called on IOMMUs for which kernel programming
+ * of global registers is not possible
+ */
+int msm_iommu_sec_program_iommu(int sec_id);
 
 static inline int msm_soc_version_supports_iommu_v1(void)
 {
diff --git a/arch/arm/mach-msm/include/mach/ipa.h b/arch/arm/mach-msm/include/mach/ipa.h
new file mode 100644
index 0000000..dae6d3b
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/ipa.h
@@ -0,0 +1,732 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_H_
+#define _IPA_H_
+
+#include <linux/msm_ipa.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <mach/sps.h>
+
+/**
+ * enum ipa_nat_en_type - NAT setting type in IPA end-point
+ */
+enum ipa_nat_en_type {
+	IPA_BYPASS_NAT,
+	IPA_SRC_NAT,
+	IPA_DST_NAT,
+};
+
+/**
+ * enum ipa_mode_type - mode setting type in IPA end-point
+ * @BASIC: basic mode
+ * @ENABLE_FRAMING_HDLC: not currently supported
+ * @ENABLE_DEFRAMING_HDLC: not currently supported
+ */
+enum ipa_mode_type {
+	IPA_BASIC,
+	IPA_ENABLE_FRAMING_HDLC,
+	IPA_ENABLE_DEFRAMING_HDLC,
+	IPA_DMA,
+};
+
+/**
+ *  enum ipa_aggr_en_type - aggregation setting type in IPA
+ *  end-point
+ */
+enum ipa_aggr_en_type {
+	IPA_BYPASS_AGGR,
+	IPA_ENABLE_AGGR,
+	IPA_ENABLE_DEAGGR,
+};
+
+/**
+ *  enum ipa_aggr_type - type of aggregation in IPA end-point
+ */
+enum ipa_aggr_type {
+	IPA_MBIM_16,
+	IPA_MBIM_32,
+	IPA_TLP,
+};
+
+/**
+ * enum ipa_aggr_mode - global aggregation mode
+ */
+enum ipa_aggr_mode {
+	IPA_MBIM,
+	IPA_QCNCM,
+};
+
+/**
+ * enum ipa_dp_evt_type - type of event client callback is
+ * invoked for on data path
+ * @IPA_RECEIVE: data is struct sk_buff
+ * @IPA_WRITE_DONE: data is struct sk_buff
+ */
+enum ipa_dp_evt_type {
+	IPA_RECEIVE,
+	IPA_WRITE_DONE,
+};
+
+/**
+ * struct ipa_ep_cfg_nat - NAT configuration in IPA end-point
+ * @nat_en:	This defines the default NAT mode for the pipe: in case of
+ *		filter miss - the default NAT mode defines the NATing operation
+ *		on the packet. Valid for Input Pipes only (IPA consumer)
+ */
+struct ipa_ep_cfg_nat {
+	enum ipa_nat_en_type nat_en;
+};
+
+/**
+ * struct ipa_ep_cfg_hdr - header configuration in IPA end-point
+ * @hdr_len:	Header length in bytes to be added/removed. Assuming header len
+ *		is constant per endpoint. Valid for both Input and Output Pipes
+ * @hdr_ofst_metadata_valid:	0: Metadata_Ofst  value is invalid, i.e., no
+ *				metadata within header.
+ *				1: Metadata_Ofst  value is valid, i.e., metadata
+ *				within header is in offset Metadata_Ofst Valid
+ *				for Input Pipes only (IPA Consumer) (for output
+ *				pipes, metadata already set within the header)
+ * @hdr_ofst_metadata:	Offset within header in which metadata resides
+ *			Size of metadata - 4bytes
+ *			Example -  Stream ID/SSID/mux ID.
+ *			Valid for  Input Pipes only (IPA Consumer) (for output
+ *			pipes, metadata already set within the header)
+ * @hdr_additional_const_len:	Defines the constant length that should be added
+ *				to the payload length in order for IPA to update
+ *				correctly the length field within the header
+ *				(valid only in case Hdr_Ofst_Pkt_Size_Valid=1)
+ *				Valid for Output Pipes (IPA Producer)
+ * @hdr_ofst_pkt_size_valid:	0: Hdr_Ofst_Pkt_Size  value is invalid, i.e., no
+ *				length field within the inserted header
+ *				1: Hdr_Ofst_Pkt_Size  value is valid, i.e., a
+ *				packet length field resides within the header
+ *				Valid for Output Pipes (IPA Producer)
+ * @hdr_ofst_pkt_size:	Offset within header in which packet size reside. Upon
+ *			Header Insertion, IPA will update this field within the
+ *			header with the packet length . Assumption is that
+ *			header length field size is constant and is 2Bytes
+ *			Valid for Output Pipes (IPA Producer)
+ * @hdr_a5_mux:	Determines whether A5 Mux header should be added to the packet.
+ *		This bit is valid only when Hdr_En=01(Header Insertion)
+ *		SW should set this bit for IPA-to-A5 pipes.
+ *		0: Do not insert A5 Mux Header
+ *		1: Insert A5 Mux Header
+ *		Valid for Output Pipes (IPA Producer)
+ */
+struct ipa_ep_cfg_hdr {
+	u32 hdr_len;
+	u32 hdr_ofst_metadata_valid;
+	u32 hdr_ofst_metadata;
+	u32 hdr_additional_const_len;
+	u32 hdr_ofst_pkt_size_valid;
+	u32 hdr_ofst_pkt_size;
+	u32 hdr_a5_mux;
+};
+
+/**
+ * struct ipa_ep_cfg_mode - mode configuration in IPA end-point
+ * @mode:	Valid for Input Pipes only (IPA Consumer)
+ * @dst:	This parameter specifies the output pipe to which the packets
+ *		will be routed to.
+ *		This parameter is valid for Mode=DMA and not valid for
+ *		Mode=Basic
+ *		Valid for Input Pipes only (IPA Consumer)
+ */
+struct ipa_ep_cfg_mode {
+	enum ipa_mode_type mode;
+	enum ipa_client_type dst;
+};
+
+/**
+ * struct ipa_ep_cfg_aggr - aggregation configuration in IPA end-point
+ * @aggr_en:	Valid for both Input and Output Pipes
+ * @aggr:	Valid for both Input and Output Pipes
+ * @aggr_byte_limit:	Limit of aggregated packet size in KB (<=32KB) When set
+ *			to 0, there is no size limitation on the aggregation.
+ *			When both, Aggr_Byte_Limit and Aggr_Time_Limit are set
+ *			to 0, there is no aggregation, every packet is sent
+ *			independently according to the aggregation structure
+ *			Valid for Output Pipes only (IPA Producer )
+ * @aggr_time_limit:	Timer to close aggregated packet (<=32ms) When set to 0,
+ *			there is no time limitation on the aggregation.  When
+ *			both, Aggr_Byte_Limit and Aggr_Time_Limit are set to 0,
+ *			there is no aggregation, every packet is sent
+ *			independently according to the aggregation structure
+ *			Valid for Output Pipes only (IPA Producer)
+ */
+struct ipa_ep_cfg_aggr {
+	enum ipa_aggr_en_type aggr_en;
+	enum ipa_aggr_type aggr;
+	u32 aggr_byte_limit;
+	u32 aggr_time_limit;
+};
+
+/**
+ * struct ipa_ep_cfg_route - route configuration in IPA end-point
+ * @rt_tbl_hdl:	Defines the default routing table index to be used in case there
+ *		is no filter rule matching, valid for Input Pipes only (IPA
+ *		Consumer). Clients should set this to 0 which will cause default
+ *		v4 and v6 routes setup internally by IPA driver to be used for
+ *		this end-point
+ */
+struct ipa_ep_cfg_route {
+	u32 rt_tbl_hdl;
+};
+
+/**
+ * struct ipa_ep_cfg - configuration of IPA end-point
+ * @nat:	NAT parmeters
+ * @hdr:	Header parameters
+ * @mode:	Mode parameters
+ * @aggr:	Aggregation parameters
+ * @route:	Routing parameters
+ */
+struct ipa_ep_cfg {
+	struct ipa_ep_cfg_nat nat;
+	struct ipa_ep_cfg_hdr hdr;
+	struct ipa_ep_cfg_mode mode;
+	struct ipa_ep_cfg_aggr aggr;
+	struct ipa_ep_cfg_route route;
+};
+
+/**
+ * struct ipa_connect_params - low-level client connect input parameters. Either
+ * client allocates the data and desc FIFO and specifies that in data+desc OR
+ * specifies sizes and pipe_mem pref and IPA does the allocation.
+ *
+ * @ipa_ep_cfg:	IPA EP configuration
+ * @client:	type of "client"
+ * @client_bam_hdl:	 client SPS handle
+ * @client_ep_idx:	 client PER EP index
+ * @priv:	callback cookie
+ * @notify:	callback
+ *		priv - callback cookie evt - type of event data - data relevant
+ *		to event.  May not be valid. See event_type enum for valid
+ *		cases.
+ * @desc_fifo_sz:	size of desc FIFO
+ * @data_fifo_sz:	size of data FIFO
+ * @pipe_mem_preferred:	if true, try to alloc the FIFOs in pipe mem, fallback
+ *			to sys mem if pipe mem alloc fails
+ * @desc:	desc FIFO meta-data when client has allocated it
+ * @data:	data FIFO meta-data when client has allocated it
+ */
+struct ipa_connect_params {
+	struct ipa_ep_cfg ipa_ep_cfg;
+	enum ipa_client_type client;
+	u32 client_bam_hdl;
+	u32 client_ep_idx;
+	void *priv;
+	void (*notify)(void *priv, enum ipa_dp_evt_type evt,
+			unsigned long data);
+	u32 desc_fifo_sz;
+	u32 data_fifo_sz;
+	bool pipe_mem_preferred;
+	struct sps_mem_buffer desc;
+	struct sps_mem_buffer data;
+};
+
+/**
+ *  struct ipa_sps_params - SPS related output parameters resulting from
+ *  low/high level client connect
+ *  @ipa_bam_hdl:	IPA SPS handle
+ *  @ipa_ep_idx:	IPA PER EP index
+ *  @desc:	desc FIFO meta-data
+ *  @data:	data FIFO meta-data
+ */
+struct ipa_sps_params {
+	u32 ipa_bam_hdl;
+	u32 ipa_ep_idx;
+	struct sps_mem_buffer desc;
+	struct sps_mem_buffer data;
+};
+
+/**
+ * struct ipa_tx_intf - interface tx properties
+ * @num_props:	number of tx properties
+ * @prop:	the tx properties array
+ */
+struct ipa_tx_intf {
+	u32 num_props;
+	struct ipa_ioc_tx_intf_prop *prop;
+};
+
+/**
+ * struct ipa_rx_intf - interface rx properties
+ * @num_props:	number of rx properties
+ * @prop:	the rx properties array
+ */
+struct ipa_rx_intf {
+	u32 num_props;
+	struct ipa_ioc_rx_intf_prop *prop;
+};
+
+/**
+ * struct ipa_sys_connect_params - information needed to setup an IPA end-point
+ * in system-BAM mode
+ * @ipa_ep_cfg:	IPA EP configuration
+ * @client:	the type of client who "owns" the EP
+ * @desc_fifo_sz:	size of desc FIFO
+ * @priv:	callback cookie
+ * @notify:	callback
+ *		priv - callback cookie
+ *		evt - type of event
+ *		data - data relevant to event.  May not be valid. See event_type
+ *		enum for valid cases.
+ */
+struct ipa_sys_connect_params {
+	struct ipa_ep_cfg ipa_ep_cfg;
+	enum ipa_client_type client;
+	u32 desc_fifo_sz;
+	void *priv;
+	void (*notify)(void *priv,
+			enum ipa_dp_evt_type evt,
+			unsigned long data);
+};
+
+/**
+ * struct ipa_msg_meta_wrapper - message meta-data wrapper
+ * @meta:	the meta-data itself
+ * @link:	opaque to client
+ * @meta_wrapper_free:	function to free the metadata wrapper when IPA driver
+ *			is done with it
+ */
+struct ipa_msg_meta_wrapper {
+	struct ipa_msg_meta meta;
+	struct list_head link;
+	void (*meta_wrapper_free)(struct ipa_msg_meta_wrapper *buff);
+};
+
+/**
+ * struct ipa_tx_meta - meta-data for the TX packet
+ * @mbim_stream_id:	the stream ID used in NDP signature
+ * @mbim_stream_id_valid:	 is above field valid?
+ */
+struct ipa_tx_meta {
+	u8 mbim_stream_id;
+	bool mbim_stream_id_valid;
+};
+
+/**
+ * struct ipa_msg_wrapper - message wrapper
+ * @msg:	the message buffer itself, MUST exist after call returns, will
+ *		be freed by IPA driver when it is done with it
+ * @link:	opaque to client
+ * @msg_free:	function to free the message when IPA driver is done with it
+ * @msg_wrapper_free:	function to free the message wrapper when IPA driver is
+ *			done with it
+ */
+struct ipa_msg_wrapper {
+	void *msg;
+	struct list_head link;
+	void (*msg_free)(void *msg);
+	void (*msg_wrapper_free)(struct ipa_msg_wrapper *buff);
+};
+
+/**
+ * typedef ipa_pull_fn - callback function
+ * @buf - [in] the buffer to populate the message into
+ * @sz - [in] the size of the buffer
+ *
+ * callback function registered by kernel client with IPA driver for IPA driver
+ * to be able to pull messages from the kernel client asynchronously.
+ *
+ * Returns how many bytes were copied into the buffer, negative on failure.
+ */
+typedef int (*ipa_pull_fn)(void *buf, uint16_t sz);
+
+#ifdef CONFIG_IPA
+
+/*
+ * Connect / Disconnect
+ */
+int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
+		u32 *clnt_hdl);
+int ipa_disconnect(u32 clnt_hdl);
+
+/*
+ * Configuration
+ */
+int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
+
+int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+
+int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
+
+int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg);
+
+int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg);
+
+int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg);
+
+/*
+ * Header removal / addition
+ */
+int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs);
+
+int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls);
+
+int ipa_commit_hdr(void);
+
+int ipa_reset_hdr(void);
+
+int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup);
+
+int ipa_put_hdr(u32 hdr_hdl);
+
+int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy);
+
+/*
+ * Routing
+ */
+int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+
+int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
+
+int ipa_commit_rt(enum ipa_ip_type ip);
+
+int ipa_reset_rt(enum ipa_ip_type ip);
+
+int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup);
+
+int ipa_put_rt_tbl(u32 rt_tbl_hdl);
+
+/*
+ * Filtering
+ */
+int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
+
+int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
+
+int ipa_commit_flt(enum ipa_ip_type ip);
+
+int ipa_reset_flt(enum ipa_ip_type ip);
+
+/*
+ * NAT
+ */
+int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem);
+
+int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init);
+
+int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
+
+int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del);
+
+/*
+ * Aggregation
+ */
+int ipa_set_aggr_mode(enum ipa_aggr_mode mode);
+
+int ipa_set_qcncm_ndp_sig(char sig[3]);
+
+int ipa_set_single_ndp_per_mbim(bool enable);
+
+/*
+ * rmnet bridge
+ */
+int rmnet_bridge_init(void);
+
+int rmnet_bridge_disconnect(void);
+
+int rmnet_bridge_connect(u32 producer_hdl,
+			 u32 consumer_hdl,
+			 int wwan_logical_channel_id);
+
+/*
+ * Data path
+ */
+int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *metadata);
+
+/*
+ * System pipes
+ */
+int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl);
+
+int ipa_teardown_sys_pipe(u32 clnt_hdl);
+
+#else
+
+/*
+ * Connect / Disconnect
+ */
+static inline int ipa_connect(const struct ipa_connect_params *in,
+		struct ipa_sps_params *sps,	u32 *clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_disconnect(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+
+/*
+ * Configuration
+ */
+static inline int ipa_cfg_ep(u32 clnt_hdl,
+		const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_cfg_ep_nat(u32 clnt_hdl,
+		const struct ipa_ep_cfg_nat *ipa_ep_cfg)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_cfg_ep_hdr(u32 clnt_hdl,
+		const struct ipa_ep_cfg_hdr *ipa_ep_cfg)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_cfg_ep_mode(u32 clnt_hdl,
+		const struct ipa_ep_cfg_mode *ipa_ep_cfg)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_cfg_ep_aggr(u32 clnt_hdl,
+		const struct ipa_ep_cfg_aggr *ipa_ep_cfg)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_cfg_ep_route(u32 clnt_hdl,
+		const struct ipa_ep_cfg_route *ipa_ep_cfg)
+{
+	return -EPERM;
+}
+
+
+/*
+ * Header removal / addition
+ */
+static inline int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_commit_hdr(void)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_reset_hdr(void)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_put_hdr(u32 hdr_hdl)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+	return -EPERM;
+}
+
+
+/*
+ * Routing
+ */
+static inline int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_commit_rt(enum ipa_ip_type ip)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_reset_rt(enum ipa_ip_type ip)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_put_rt_tbl(u32 rt_tbl_hdl)
+{
+	return -EPERM;
+}
+
+
+/*
+ * Filtering
+ */
+static inline int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_commit_flt(enum ipa_ip_type ip)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_reset_flt(enum ipa_ip_type ip)
+{
+	return -EPERM;
+}
+
+
+/*
+ * NAT
+ */
+static inline int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+	return -EPERM;
+}
+
+
+/*
+ * Aggregation
+ */
+static inline int ipa_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_set_qcncm_ndp_sig(char sig[3])
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_set_single_ndp_per_mbim(bool enable)
+{
+	return -EPERM;
+}
+
+
+/*
+ * rmnet bridge
+ */
+static inline int rmnet_bridge_init(void)
+{
+	return -EPERM;
+}
+
+
+static inline int rmnet_bridge_disconnect(void)
+{
+	return -EPERM;
+}
+
+
+static inline int rmnet_bridge_connect(u32 producer_hdl,
+			 u32 consumer_hdl,
+			 int wwan_logical_channel_id)
+{
+	return -EPERM;
+}
+
+
+/*
+ * Data path
+ */
+static inline int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *metadata)
+{
+	return -EPERM;
+}
+
+
+/*
+ * System pipes
+ */
+static inline int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in,
+		u32 *clnt_hdl)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_teardown_sys_pipe(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+
+#endif /* CONFIG_IPA*/
+
+#endif /* _IPA_H_ */
diff --git a/arch/arm/mach-msm/include/mach/irqs-8226.h b/arch/arm/mach-msm/include/mach/irqs-8226.h
index 7e174b9..72602b1 100644
--- a/arch/arm/mach-msm/include/mach/irqs-8226.h
+++ b/arch/arm/mach-msm/include/mach/irqs-8226.h
@@ -32,7 +32,7 @@
 #define TLMM_MSM_SUMMARY_IRQ		(GIC_SPI_START + 208)
 
 #define NR_MSM_IRQS 256
-#define NR_GPIO_IRQS 146
+#define NR_GPIO_IRQS 117
 #define NR_QPNP_IRQS 32768 /* SPARSE_IRQ is required to support this */
 #define NR_BOARD_IRQS NR_QPNP_IRQS
 #define NR_TLMM_MSM_DIR_CONN_IRQ 8
diff --git a/arch/arm/mach-msm/include/mach/irqs-9625.h b/arch/arm/mach-msm/include/mach/irqs-9625.h
index f50606d..b1f65d1 100644
--- a/arch/arm/mach-msm/include/mach/irqs-9625.h
+++ b/arch/arm/mach-msm/include/mach/irqs-9625.h
@@ -21,6 +21,10 @@
  * 32+:   SPI (shared peripheral interrupts)
  */
 
+#define GIC_PPI_START 16
+
+#define INT_ARMQC_PERFMON		(GIC_PPI_START + 7)
+
 #define GIC_SPI_START 32
 
 #define APCC_QGICL2PERFMONIRPTREQ	(GIC_SPI_START + 1)
@@ -28,7 +32,7 @@
 #define TLMM_MSM_SUMMARY_IRQ		(GIC_SPI_START + 208)
 
 #define NR_MSM_IRQS 288
-#define NR_GPIO_IRQS 88
+#define NR_GPIO_IRQS 76
 #define NR_BOARD_IRQS 0
 #define NR_TLMM_MSM_DIR_CONN_IRQ 8 /*Need to Verify this Count*/
 #define NR_MSM_GPIOS NR_GPIO_IRQS
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index acfbe4a..d089924 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -98,26 +98,25 @@
 #define finish_arch_switch(prev)	do { store_ttbr0(); } while (0)
 #endif
 
+#define MAX_HOLE_ADDRESS    (PHYS_OFFSET + 0x10000000)
+extern unsigned long memory_hole_offset;
+extern unsigned long memory_hole_start;
+extern unsigned long memory_hole_end;
 #ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
-extern unsigned long membank0_size;
-extern unsigned long membank1_start;
-void find_membank0_hole(void);
+void find_memory_hole(void);
 
-#define MEMBANK0_PHYS_OFFSET PHYS_OFFSET
-#define MEMBANK0_PAGE_OFFSET PAGE_OFFSET
-
-#define MEMBANK1_PHYS_OFFSET (membank1_start)
-#define MEMBANK1_PAGE_OFFSET (MEMBANK0_PAGE_OFFSET + (membank0_size))
+#define MEM_HOLE_END_PHYS_OFFSET (memory_hole_end)
+#define MEM_HOLE_PAGE_OFFSET (PAGE_OFFSET + memory_hole_offset)
 
 #define __phys_to_virt(phys)				\
-	((MEMBANK1_PHYS_OFFSET && ((phys) >= MEMBANK1_PHYS_OFFSET)) ?	\
-	(phys) - MEMBANK1_PHYS_OFFSET + MEMBANK1_PAGE_OFFSET :	\
-	(phys) - MEMBANK0_PHYS_OFFSET + MEMBANK0_PAGE_OFFSET)
+	((MEM_HOLE_END_PHYS_OFFSET && ((phys) >= MEM_HOLE_END_PHYS_OFFSET)) ? \
+	(phys) - MEM_HOLE_END_PHYS_OFFSET + MEM_HOLE_PAGE_OFFSET :	\
+	(phys) - PHYS_OFFSET + PAGE_OFFSET)
 
 #define __virt_to_phys(virt)				\
-	((MEMBANK1_PHYS_OFFSET && ((virt) >= MEMBANK1_PAGE_OFFSET)) ?	\
-	(virt) - MEMBANK1_PAGE_OFFSET + MEMBANK1_PHYS_OFFSET :	\
-	(virt) - MEMBANK0_PAGE_OFFSET + MEMBANK0_PHYS_OFFSET)
+	((MEM_HOLE_END_PHYS_OFFSET && ((virt) >= MEM_HOLE_PAGE_OFFSET)) ? \
+	(virt) - MEM_HOLE_PAGE_OFFSET + MEM_HOLE_END_PHYS_OFFSET :	\
+	(virt) - PAGE_OFFSET + PHYS_OFFSET)
 #endif
 
 /*
diff --git a/arch/arm/mach-msm/include/mach/msm_dcvs.h b/arch/arm/mach-msm/include/mach/msm_dcvs.h
index e81cee4..2ad7d22 100644
--- a/arch/arm/mach-msm/include/mach/msm_dcvs.h
+++ b/arch/arm/mach-msm/include/mach/msm_dcvs.h
@@ -36,12 +36,37 @@
 	MSM_DCVS_DISABLE_HIGH_LATENCY_MODES,
 };
 
+struct msm_dcvs_sync_rule {
+	unsigned long cpu_khz;
+	unsigned long gpu_floor_khz;
+};
+
+struct msm_dcvs_platform_data {
+	struct msm_dcvs_sync_rule *sync_rules;
+	unsigned num_sync_rules;
+	unsigned long gpu_max_nom_khz;
+};
+
 struct msm_gov_platform_data {
 	struct msm_dcvs_core_info *info;
 	int latency;
 };
 
 /**
+ * msm_dcvs_register_cpu_freq
+ * @freq: the frequency value to register
+ * @voltage: the operating voltage (in mV) associated with the above frequency
+ *
+ * Register a cpu frequency and its operating voltage with dcvs.
+ */
+#ifdef CONFIG_MSM_DCVS
+void msm_dcvs_register_cpu_freq(uint32_t freq, uint32_t voltage);
+#else
+static inline void msm_dcvs_register_cpu_freq(uint32_t freq, uint32_t voltage)
+{}
+#endif
+
+/**
  * msm_dcvs_idle
  * @dcvs_core_id: The id returned by msm_dcvs_register_core
  * @state: The enter/exit idle state the core is in
@@ -98,6 +123,7 @@
 	unsigned int (*get_frequency)(int type_core_num),
 	int (*idle_enable)(int type_core_num,
 				enum msm_core_control_event event),
+	int (*set_floor_frequency)(int type_core_num, unsigned int freq),
 	int sensor);
 
 /**
@@ -129,4 +155,23 @@
  * Update the frequency known to dcvs when the limits are changed.
  */
 extern void msm_dcvs_update_limits(int dcvs_core_id);
+
+/**
+ * msm_dcvs_apply_gpu_floor
+ * @cpu_freq: CPU frequency to compare to GPU sync rules
+ *
+ * Apply a GPU floor frequency if the corresponding CPU frequency,
+ * or the number of CPUs online, requires it.
+ */
+extern void msm_dcvs_apply_gpu_floor(unsigned long cpu_freq);
+
+/**
+ * msm_dcvs_update_algo_params
+ * @return:
+ *      0 on success, < 0 on error
+ *
+ * Updates the DCVS algorithm with parameters depending on the
+ * number of CPUs online.
+ */
+extern int msm_dcvs_update_algo_params(void);
 #endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8226.h b/arch/arm/mach-msm/include/mach/msm_iomap-8226.h
index c03b513..ab43a8a 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8226.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8226.h
@@ -37,9 +37,12 @@
 #define MSM8226_TLMM_PHYS	0xFD510000
 #define MSM8226_TLMM_SIZE	SZ_16K
 
-#define MSM8226_IMEM_PHYS	0xFC42B000
+#define MSM8226_IMEM_PHYS	0xFE805000
 #define MSM8226_IMEM_SIZE	SZ_4K
 
+#define MSM8226_MPM2_PSHOLD_PHYS	0xFC4AB000
+#define MSM8226_MPM2_PSHOLD_SIZE	SZ_4K
+
 #ifdef CONFIG_DEBUG_MSM8226_UART
 #define MSM_DEBUG_UART_BASE	IOMEM(0xFA71E000)
 #define MSM_DEBUG_UART_PHYS	0xF991E000
diff --git a/arch/arm/mach-msm/include/mach/msm_memtypes.h b/arch/arm/mach-msm/include/mach/msm_memtypes.h
index 5ca5861..80e454a 100644
--- a/arch/arm/mach-msm/include/mach/msm_memtypes.h
+++ b/arch/arm/mach-msm/include/mach/msm_memtypes.h
@@ -68,6 +68,8 @@
 
 int __init dt_scan_for_memory_reserve(unsigned long node, const char *uname,
 					int depth, void *data);
-
+int __init dt_scan_for_memory_hole(unsigned long node, const char *uname,
+					int depth, void *data);
+void adjust_meminfo(unsigned long start, unsigned long size);
 unsigned long __init reserve_memory_for_fmem(unsigned long, unsigned long);
 #endif
diff --git a/arch/arm/mach-msm/include/mach/msm_spi.h b/arch/arm/mach-msm/include/mach/msm_spi.h
index 11d3014..ab5271f 100644
--- a/arch/arm/mach-msm/include/mach/msm_spi.h
+++ b/arch/arm/mach-msm/include/mach/msm_spi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2008-2009, 2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,10 @@
 	void (*gpio_release)(void);
 	int (*dma_config)(void);
 	const char *rsl_id;
-	uint32_t pm_lat;
-	uint32_t infinite_mode;
+	u32  pm_lat;
+	u32  infinite_mode;
+	bool ver_reg_exists;
+	bool use_bam;
+	u32  bam_consumer_pipe_index;
+	u32  bam_producer_pipe_index;
 };
diff --git a/arch/arm/mach-msm/include/mach/ocmem.h b/arch/arm/mach-msm/include/mach/ocmem.h
index cb8aae0..6124cd6 100644
--- a/arch/arm/mach-msm/include/mach/ocmem.h
+++ b/arch/arm/mach-msm/include/mach/ocmem.h
@@ -22,7 +22,7 @@
 
 /* Maximum number of slots in DM */
 #define OCMEM_MAX_CHUNKS 32
-#define MIN_CHUNK_SIZE 128
+#define MIN_CHUNK_SIZE SZ_4K
 
 struct ocmem_notifier;
 
diff --git a/arch/arm/mach-msm/include/mach/qdsp6v2/audio_acdb.h b/arch/arm/mach-msm/include/mach/qdsp6v2/audio_acdb.h
index d34536d..88cb94a 100644
--- a/arch/arm/mach-msm/include/mach/qdsp6v2/audio_acdb.h
+++ b/arch/arm/mach-msm/include/mach/qdsp6v2/audio_acdb.h
@@ -14,7 +14,7 @@
 #define _AUDIO_ACDB_H
 
 #include <linux/msm_audio_acdb.h>
-#ifdef CONFIG_ARCH_MSM8974
+#if defined CONFIG_ARCH_MSM8974 || defined CONFIG_ARCH_MSM9625
 #include <sound/q6adm-v2.h>
 #else
 #include <sound/q6adm.h>
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index 34bdc79..0499a7a 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -111,6 +111,7 @@
 	MSM_CPU_8092,
 	MSM_CPU_8226,
 	MSM_CPU_8910,
+	MSM_CPU_8625Q,
 };
 
 enum pmic_model {
@@ -447,6 +448,18 @@
 #endif
 }
 
+static inline int cpu_is_msm8625q(void)
+{
+#ifdef CONFIG_ARCH_MSM8625
+	enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+	BUG_ON(cpu == MSM_CPU_UNKNOWN);
+	return cpu == MSM_CPU_8625Q;
+#else
+	return 0;
+#endif
+}
+
 static inline int soc_class_is_msm8960(void)
 {
 	return cpu_is_msm8960() || cpu_is_msm8960ab();
diff --git a/arch/arm/mach-msm/include/mach/usb_bam.h b/arch/arm/mach-msm/include/mach/usb_bam.h
index 47313a7..5e1ef6f 100644
--- a/arch/arm/mach-msm/include/mach/usb_bam.h
+++ b/arch/arm/mach-msm/include/mach/usb_bam.h
@@ -13,6 +13,7 @@
 #ifndef _USB_BAM_H_
 #define _USB_BAM_H_
 #include "sps.h"
+#include <mach/ipa.h>
 
 /**
  * SPS Pipes direction.
@@ -27,6 +28,22 @@
 	PEER_PERIPHERAL_TO_USB,
 };
 
+struct usb_bam_connect_ipa_params {
+	u8 idx;
+	u32 *src_pipe;
+	u32 *dst_pipe;
+	enum usb_bam_pipe_dir dir;
+	/* client handle assigned by IPA to client */
+	u32 prod_clnt_hdl;
+	u32 cons_clnt_hdl;
+	/* params assigned by the CD */
+	enum ipa_client_type client;
+	struct ipa_ep_cfg ipa_ep_cfg;
+	void *priv;
+	void (*notify)(void *priv, enum ipa_dp_evt_type evt,
+			unsigned long data);
+};
+
 #ifdef CONFIG_USB_BAM
 /**
  * Connect USB-to-Periperal SPS connection.
@@ -47,6 +64,31 @@
 int usb_bam_connect(u8 idx, u32 *src_pipe_idx, u32 *dst_pipe_idx);
 
 /**
+ * Connect USB-to-IPA SPS connection.
+ *
+ * This function returns the allocated pipes number adn clnt handles.
+ *
+ * @ipa_params - in/out parameters
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int usb_bam_connect_ipa(struct usb_bam_connect_ipa_params *ipa_params);
+
+/**
+ * Disconnect USB-to-IPA SPS connection.
+ *
+ * @idx - Connection index.
+ *
+ * @ipa_params - in/out parameters
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int usb_bam_disconnect_ipa(u8 idx,
+		struct usb_bam_connect_ipa_params *ipa_params);
+
+/**
  * Register a wakeup callback from peer BAM.
  *
  * @idx - Connection index.
@@ -96,6 +138,18 @@
 	return -ENODEV;
 }
 
+static inline int usb_bam_connect_ipa(
+			struct usb_bam_connect_ipa_params *ipa_params)
+{
+	return -ENODEV;
+}
+
+static inline int usb_bam_disconnect_ipa(u8 idx,
+			struct usb_bam_connect_ipa_params *ipa_params)
+{
+	return -ENODEV;
+}
+
 static inline int usb_bam_register_wake_cb(u8 idx,
 	int (*callback)(void *), void* param)
 {
diff --git a/arch/arm/mach-msm/include/mach/usb_gadget_xport.h b/arch/arm/mach-msm/include/mach/usb_gadget_xport.h
index be11989..41dac62 100644
--- a/arch/arm/mach-msm/include/mach/usb_gadget_xport.h
+++ b/arch/arm/mach-msm/include/mach/usb_gadget_xport.h
@@ -21,12 +21,13 @@
 	USB_GADGET_XPORT_SMD,
 	USB_GADGET_XPORT_BAM,
 	USB_GADGET_XPORT_BAM2BAM,
+	USB_GADGET_XPORT_BAM2BAM_IPA,
 	USB_GADGET_XPORT_HSIC,
 	USB_GADGET_XPORT_HSUART,
 	USB_GADGET_XPORT_NONE,
 };
 
-#define XPORT_STR_LEN	10
+#define XPORT_STR_LEN	12
 
 static char *xport_to_str(enum transport_type t)
 {
@@ -41,6 +42,8 @@
 		return "BAM";
 	case USB_GADGET_XPORT_BAM2BAM:
 		return "BAM2BAM";
+	case USB_GADGET_XPORT_BAM2BAM_IPA:
+		return "BAM2BAM_IPA";
 	case USB_GADGET_XPORT_HSIC:
 		return "HSIC";
 	case USB_GADGET_XPORT_HSUART:
@@ -64,6 +67,8 @@
 		return USB_GADGET_XPORT_BAM;
 	if (!strncasecmp("BAM2BAM", name, XPORT_STR_LEN))
 		return USB_GADGET_XPORT_BAM2BAM;
+	if (!strncasecmp("BAM2BAM_IPA", name, XPORT_STR_LEN))
+		return USB_GADGET_XPORT_BAM2BAM_IPA;
 	if (!strncasecmp("HSIC", name, XPORT_STR_LEN))
 		return USB_GADGET_XPORT_HSIC;
 	if (!strncasecmp("HSUART", name, XPORT_STR_LEN))
diff --git a/arch/arm/mach-msm/include/mach/usbdiag.h b/arch/arm/mach-msm/include/mach/usbdiag.h
index d1e3605..4d0f63a 100644
--- a/arch/arm/mach-msm/include/mach/usbdiag.h
+++ b/arch/arm/mach-msm/include/mach/usbdiag.h
@@ -1,6 +1,6 @@
 /* include/asm-arm/arch-msm/usbdiag.h
  *
- * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2008-2010, 2012, The Linux Foundation. All rights reserved.
  *
  * All source code in this file is licensed under the following license except
  * where indicated.
@@ -23,6 +23,7 @@
 
 #define DIAG_LEGACY		"diag"
 #define DIAG_MDM		"diag_mdm"
+#define DIAG_QSC		"diag_qsc"
 
 #define USB_DIAG_CONNECT	0
 #define USB_DIAG_DISCONNECT	1
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 52bb8ef..cd70ae9 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -519,6 +519,7 @@
 	MSM_CHIP_DEVICE(APCS_GCC, MSM8226),
 	MSM_CHIP_DEVICE(TLMM, MSM8226),
 	MSM_CHIP_DEVICE(IMEM, MSM8226),
+	MSM_CHIP_DEVICE(MPM2_PSHOLD, MSM8226),
 	{
 		.virtual =  (unsigned long) MSM_SHARED_RAM_BASE,
 		.length =   MSM_SHARED_RAM_SIZE,
diff --git a/arch/arm/mach-msm/krait-regulator.c b/arch/arm/mach-msm/krait-regulator.c
index 63ae4e6..f7b2b1e 100644
--- a/arch/arm/mach-msm/krait-regulator.c
+++ b/arch/arm/mach-msm/krait-regulator.c
@@ -159,6 +159,7 @@
 	int				retention_uV;
 	int				headroom_uV;
 	int				ldo_threshold_uV;
+	bool				online;
 };
 
 static u32 version;
@@ -309,6 +310,7 @@
 	}
 
 	setpoint = DIV_ROUND_UP(uV, LV_RANGE_STEP);
+
 	return msm_spm_apcs_set_vdd(setpoint);
 }
 
@@ -319,6 +321,8 @@
 	int rc = 0;
 
 	list_for_each_entry(kvreg, &pvreg->krait_power_vregs, link) {
+		if (!kvreg->online)
+			continue;
 		if (kvreg->uV > kvreg->ldo_threshold_uV
 			 || kvreg->uV > vmax - kvreg->headroom_uV) {
 			rc = switch_to_using_hs(kvreg);
@@ -482,6 +486,9 @@
 	struct pmic_gang_vreg *pvreg = from->pvreg;
 
 	list_for_each_entry(kvreg, &pvreg->krait_power_vregs, link) {
+		if (!kvreg->online)
+			continue;
+
 		v = kvreg->uV;
 
 		if (kvreg == from)
@@ -490,6 +497,7 @@
 		if (vmax < v)
 			vmax = v;
 	}
+
 	return vmax;
 }
 
@@ -499,14 +507,17 @@
 	struct krait_power_vreg *kvreg;
 	struct pmic_gang_vreg *pvreg = from->pvreg;
 
-	list_for_each_entry(kvreg, &pvreg->krait_power_vregs, link)
+	list_for_each_entry(kvreg, &pvreg->krait_power_vregs, link) {
+		if (!kvreg->online)
+			continue;
 		load_total += kvreg->load_uA;
+	}
 
 	return load_total;
 }
 
 #define ROUND_UP_VOLTAGE(v, res) (DIV_ROUND_UP(v, res) * res)
-static int krait_power_set_voltage(struct regulator_dev *rdev,
+static int _set_voltage(struct regulator_dev *rdev,
 			int min_uV, int max_uV, unsigned *selector)
 {
 	struct krait_power_vreg *kvreg = rdev_get_drvdata(rdev);
@@ -514,6 +525,31 @@
 	int rc;
 	int vmax;
 
+	vmax = get_vmax(kvreg, min_uV);
+
+	/* round up the pmic voltage as per its resolution */
+	vmax = ROUND_UP_VOLTAGE(vmax, LV_RANGE_STEP);
+
+	rc = pmic_gang_set_voltage(kvreg, vmax);
+	if (rc < 0) {
+		dev_err(&rdev->dev, "%s failed set voltage (%d, %d) rc = %d\n",
+				kvreg->name, min_uV, max_uV, rc);
+		goto out;
+	}
+
+	pvreg->pmic_vmax_uV = vmax;
+
+out:
+	return rc;
+}
+
+static int krait_power_set_voltage(struct regulator_dev *rdev,
+			int min_uV, int max_uV, unsigned *selector)
+{
+	struct krait_power_vreg *kvreg = rdev_get_drvdata(rdev);
+	struct pmic_gang_vreg *pvreg = kvreg->pvreg;
+	int rc;
+
 	/*
 	 * if the voltage requested is below LDO_THRESHOLD this cpu could
 	 * switch to LDO mode. Hence round the voltage as per the LDO
@@ -526,49 +562,29 @@
 	}
 
 	mutex_lock(&pvreg->krait_power_vregs_lock);
-
-	vmax = get_vmax(kvreg, min_uV);
-
-	/* round up the pmic voltage as per its resolution */
-	vmax = ROUND_UP_VOLTAGE(vmax, LV_RANGE_STEP);
-
-	/*
-	 * Assign the voltage before updating the gang voltage as we iterate
-	 * over all the core voltages and choose HS or LDO for each of them
-	 */
 	kvreg->uV = min_uV;
 
-	rc = pmic_gang_set_voltage(kvreg, vmax);
-	if (rc < 0) {
-		dev_err(&rdev->dev, "%s failed set voltage (%d, %d) rc = %d\n",
-				kvreg->name, min_uV, max_uV, rc);
-		goto out;
+	if (!kvreg->online) {
+		mutex_unlock(&pvreg->krait_power_vregs_lock);
+		return 0;
 	}
 
-	pvreg->pmic_vmax_uV = vmax;
-
-out:
+	rc = _set_voltage(rdev, min_uV, max_uV, selector);
 	mutex_unlock(&pvreg->krait_power_vregs_lock);
+
 	return rc;
 }
 
 #define PMIC_FTS_MODE_PFM	0x00
 #define PMIC_FTS_MODE_PWM	0x80
 #define PFM_LOAD_UA		500000
-static unsigned int krait_power_get_optimum_mode(struct regulator_dev *rdev,
+static unsigned int _get_optimum_mode(struct regulator_dev *rdev,
 			int input_uV, int output_uV, int load_uA)
 {
 	struct krait_power_vreg *kvreg = rdev_get_drvdata(rdev);
 	struct pmic_gang_vreg *pvreg = kvreg->pvreg;
 	int rc;
 	int load_total_uA;
-	int reg_mode = -EINVAL;
-
-	mutex_lock(&pvreg->krait_power_vregs_lock);
-
-	reg_mode = kvreg->mode;
-
-	kvreg->load_uA = load_uA;
 
 	load_total_uA = get_total_load(kvreg);
 
@@ -584,8 +600,7 @@
 				pvreg->pfm_mode = true;
 			}
 		}
-		mutex_unlock(&pvreg->krait_power_vregs_lock);
-		return reg_mode;
+		return kvreg->mode;
 	}
 
 	if (pvreg->pfm_mode) {
@@ -608,8 +623,27 @@
 	}
 
 out:
+	return kvreg->mode;
+}
+
+static unsigned int krait_power_get_optimum_mode(struct regulator_dev *rdev,
+			int input_uV, int output_uV, int load_uA)
+{
+	struct krait_power_vreg *kvreg = rdev_get_drvdata(rdev);
+	struct pmic_gang_vreg *pvreg = kvreg->pvreg;
+	int rc;
+
+	mutex_lock(&pvreg->krait_power_vregs_lock);
+	kvreg->load_uA = load_uA;
+	if (!kvreg->online) {
+		mutex_unlock(&pvreg->krait_power_vregs_lock);
+		return kvreg->mode;
+	}
+
+	rc = _get_optimum_mode(rdev, input_uV, output_uV, load_uA);
 	mutex_unlock(&pvreg->krait_power_vregs_lock);
-	return reg_mode;
+
+	return rc;
 }
 
 static int krait_power_set_mode(struct regulator_dev *rdev, unsigned int mode)
@@ -624,12 +658,62 @@
 	return kvreg->mode;
 }
 
+static int krait_power_is_enabled(struct regulator_dev *rdev)
+{
+	struct krait_power_vreg *kvreg = rdev_get_drvdata(rdev);
+
+	return kvreg->online;
+}
+
+static int krait_power_enable(struct regulator_dev *rdev)
+{
+	struct krait_power_vreg *kvreg = rdev_get_drvdata(rdev);
+	struct pmic_gang_vreg *pvreg = kvreg->pvreg;
+	int rc;
+
+	mutex_lock(&pvreg->krait_power_vregs_lock);
+	kvreg->online = true;
+	rc = _get_optimum_mode(rdev, kvreg->uV, kvreg->uV,
+							kvreg->load_uA);
+	if (rc < 0)
+		goto en_err;
+	rc = _set_voltage(rdev, kvreg->uV,
+					rdev->constraints->max_uV, NULL);
+en_err:
+	mutex_unlock(&pvreg->krait_power_vregs_lock);
+	return rc;
+}
+
+static int krait_power_disable(struct regulator_dev *rdev)
+{
+	struct krait_power_vreg *kvreg = rdev_get_drvdata(rdev);
+	struct pmic_gang_vreg *pvreg = kvreg->pvreg;
+	int rc;
+
+	mutex_lock(&pvreg->krait_power_vregs_lock);
+	kvreg->online = false;
+
+	rc = _get_optimum_mode(rdev, kvreg->uV, kvreg->uV,
+							kvreg->load_uA);
+	if (rc < 0)
+		goto dis_err;
+
+	rc = _set_voltage(rdev, kvreg->uV,
+					rdev->constraints->max_uV, NULL);
+dis_err:
+	mutex_unlock(&pvreg->krait_power_vregs_lock);
+	return rc;
+}
+
 static struct regulator_ops krait_power_ops = {
 	.get_voltage		= krait_power_get_voltage,
 	.set_voltage		= krait_power_set_voltage,
 	.get_optimum_mode	= krait_power_get_optimum_mode,
 	.set_mode		= krait_power_set_mode,
 	.get_mode		= krait_power_get_mode,
+	.enable			= krait_power_enable,
+	.disable		= krait_power_disable,
+	.is_enabled		= krait_power_is_enabled,
 };
 
 static void kvreg_hw_init(struct krait_power_vreg *kvreg)
diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c
index 3fe65b8..9cc2a9d 100644
--- a/arch/arm/mach-msm/memory.c
+++ b/arch/arm/mach-msm/memory.c
@@ -27,6 +27,7 @@
 #include <asm/setup.h>
 #include <asm/mach-types.h>
 #include <mach/msm_memtypes.h>
+#include <mach/memory.h>
 #include <linux/hardirq.h>
 #if defined(CONFIG_MSM_NPA_REMOTE)
 #include "npa_remote.h"
@@ -365,7 +366,7 @@
 	return ret;
 }
 
-static int check_for_compat(unsigned long node)
+static int __init check_for_compat(unsigned long node)
 {
 	char **start = __compat_exports_start;
 
@@ -454,6 +455,79 @@
 	return 0;
 }
 
+/* This function scans the device tree to populate the memory hole table */
+int __init dt_scan_for_memory_hole(unsigned long node, const char *uname,
+		int depth, void *data)
+{
+	unsigned int *memory_remove_prop;
+	unsigned long memory_remove_prop_length;
+	unsigned long hole_start;
+	unsigned long hole_size;
+
+	memory_remove_prop = of_get_flat_dt_prop(node,
+						"qcom,memblock-remove",
+						&memory_remove_prop_length);
+
+	if (memory_remove_prop) {
+		if (!check_for_compat(node))
+			goto out;
+	} else {
+		goto out;
+	}
+
+	if (memory_remove_prop) {
+		if (memory_remove_prop_length != (2*sizeof(unsigned int))) {
+			WARN(1, "Memory remove malformed\n");
+			goto out;
+		}
+
+		hole_start = be32_to_cpu(memory_remove_prop[0]);
+		hole_size = be32_to_cpu(memory_remove_prop[1]);
+
+		if (hole_start + hole_size <= MAX_HOLE_ADDRESS) {
+			if (memory_hole_start == 0 && memory_hole_end == 0) {
+				memory_hole_start = hole_start;
+				memory_hole_end = hole_start + hole_size;
+			} else if ((memory_hole_end - memory_hole_start)
+							<= hole_size) {
+				memory_hole_start = hole_start;
+				memory_hole_end = hole_start + hole_size;
+			}
+		}
+		adjust_meminfo(hole_start, hole_size);
+	}
+
+out:
+	return 0;
+}
+
+/*
+ * Split the memory bank to reflect the hole, if present,
+ * using the start and end of the memory hole.
+ */
+void adjust_meminfo(unsigned long start, unsigned long size)
+{
+	int i, j;
+
+	for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
+		struct membank *bank = &meminfo.bank[j];
+		*bank = meminfo.bank[i];
+
+		if (((start + size) <= (bank->start + bank->size)) &&
+			(start >= bank->start)) {
+			memmove(bank + 1, bank,
+				(meminfo.nr_banks - i) * sizeof(*bank));
+			meminfo.nr_banks++;
+			i++;
+			bank[1].size -= (start + size);
+			bank[1].start = (start + size);
+			bank[1].highmem = 0;
+			j++;
+			bank->size = start - bank->start;
+		}
+		j++;
+	}
+}
 unsigned long get_ddr_size(void)
 {
 	unsigned int i;
diff --git a/arch/arm/mach-msm/mpm-8625.c b/arch/arm/mach-msm/mpm-8625.c
index c70ff5c..aaac476 100644
--- a/arch/arm/mach-msm/mpm-8625.c
+++ b/arch/arm/mach-msm/mpm-8625.c
@@ -101,6 +101,7 @@
 
 static uint16_t msm_bypassed_apps_irqs[] = {
 	MSM8625_INT_CPR_IRQ0,
+	MSM8625_INT_L2CC_INTR,
 };
 
 /* Check IRQ falls into bypassed list are not */
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
index e0ab983..ea17efe 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
@@ -485,7 +485,7 @@
 };
 
 #define M_PRIOLVL_OVERRIDE_ADDR(b, n) \
-	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000220)
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
 enum bimc_m_priolvl_override {
 	M_PRIOLVL_OVERRIDE_RMSK			= 0x301,
 	M_PRIOLVL_OVERRIDE_BMSK			= 0x300,
@@ -495,10 +495,10 @@
 };
 
 #define M_RD_CMD_OVERRIDE_ADDR(b, n) \
-	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
 enum bimc_m_read_command_override {
-	M_RD_CMD_OVERRIDE_RMSK			= 0x37f3f,
-	M_RD_CMD_OVERRIDE_AREQPRIO_BMSK		= 0x300000,
+	M_RD_CMD_OVERRIDE_RMSK			= 0x3071f7f,
+	M_RD_CMD_OVERRIDE_AREQPRIO_BMSK		= 0x3000000,
 	M_RD_CMD_OVERRIDE_AREQPRIO_SHFT		= 0x18,
 	M_RD_CMD_OVERRIDE_AMEMTYPE_BMSK		= 0x70000,
 	M_RD_CMD_OVERRIDE_AMEMTYPE_SHFT		= 0x10,
@@ -529,13 +529,15 @@
 };
 
 #define M_WR_CMD_OVERRIDE_ADDR(b, n) \
-	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000250)
 enum bimc_m_write_command_override {
-	M_WR_CMD_OVERRIDE_RMSK			= 0x37f3f,
-	M_WR_CMD_OVERRIDE_AREQPRIO_BMSK		= 0x30000,
-	M_WR_CMD_OVERRIDE_AREQPRIO_SHFT		= 0x10,
-	M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK		= 0x7000,
-	M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT		= 0xc,
+	M_WR_CMD_OVERRIDE_RMSK			= 0x3071f7f,
+	M_WR_CMD_OVERRIDE_AREQPRIO_BMSK		= 0x3000000,
+	M_WR_CMD_OVERRIDE_AREQPRIO_SHFT		= 0x18,
+	M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK		= 0x70000,
+	M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT		= 0x10,
+	M_WR_CMD_OVERRIDE_ATRANSIENT_BMSK	= 0x1000,
+	M_WR_CMD_OVERRIDE_ATRANSIENT_SHFT	= 0xc,
 	M_WR_CMD_OVERRIDE_ASHARED_BMSK		= 0x800,
 	M_WR_CMD_OVERRIDE_ASHARED_SHFT		= 0xb,
 	M_WR_CMD_OVERRIDE_AREDIRECT_BMSK		= 0x400,
@@ -544,8 +546,10 @@
 	M_WR_CMD_OVERRIDE_AOOO_SHFT			= 0x9,
 	M_WR_CMD_OVERRIDE_AINNERSHARED_BMSK		= 0x100,
 	M_WR_CMD_OVERRIDE_AINNERSHARED_SHFT		= 0x8,
-	M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK	= 0x20,
-	M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT	= 0x5,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK	= 0x40,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT	= 0x6,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK	= 0x20,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT	= 0x5,
 	M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK	= 0x10,
 	M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT	= 0x4,
 	M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK	= 0x8,
@@ -1454,7 +1458,7 @@
 		 * boundary in future
 		 */
 		wmb();
-		set_qos_mode(binfo->base, mas_index, 1, 1, 1);
+		set_qos_mode(binfo->base, mas_index, 0, 1, 1);
 		break;
 
 	case BIMC_QOS_MODE_BYPASS:
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c b/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c
index f0f5cd8..5c20a4e 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c
@@ -806,7 +806,7 @@
 		.tier = tier2,
 		.num_tiers = ARRAY_SIZE(tier2),
 		.hw_sel = MSM_BUS_NOC,
-		.perm_mode = NOC_QOS_MODES_ALL_PERM,
+		.perm_mode = NOC_QOS_PERM_MODE_BYPASS,
 		.mode = NOC_QOS_MODE_BYPASS,
 		.ws = 10000,
 		.qport = qports_oxili,
@@ -819,7 +819,7 @@
 		.tier = tier2,
 		.num_tiers = ARRAY_SIZE(tier2),
 		.hw_sel = MSM_BUS_NOC,
-		.perm_mode = NOC_QOS_MODES_ALL_PERM,
+		.perm_mode = NOC_QOS_PERM_MODE_BYPASS,
 		.mode = NOC_QOS_MODE_BYPASS,
 		.qport = qports_gemini,
 		.ws = 10000,
@@ -832,7 +832,7 @@
 		.tier = tier2,
 		.num_tiers = ARRAY_SIZE(tier2),
 		.hw_sel = MSM_BUS_NOC,
-		.perm_mode = NOC_QOS_MODES_ALL_PERM,
+		.perm_mode = NOC_QOS_PERM_MODE_BYPASS,
 		.mode = NOC_QOS_MODE_BYPASS,
 		.qport = qports_mdp,
 		.ws = 10000,
@@ -845,7 +845,7 @@
 		.tier = tier2,
 		.num_tiers = ARRAY_SIZE(tier2),
 		.hw_sel = MSM_BUS_NOC,
-		.perm_mode = NOC_QOS_MODES_ALL_PERM,
+		.perm_mode = NOC_QOS_PERM_MODE_BYPASS,
 		.mode = NOC_QOS_MODE_BYPASS,
 		.ws = 10000,
 		.qport = qports_venus_p0,
@@ -858,7 +858,7 @@
 		.tier = tier2,
 		.num_tiers = ARRAY_SIZE(tier2),
 		.hw_sel = MSM_BUS_NOC,
-		.perm_mode = NOC_QOS_MODES_ALL_PERM,
+		.perm_mode = NOC_QOS_PERM_MODE_BYPASS,
 		.mode = NOC_QOS_MODE_BYPASS,
 		.ws = 10000,
 		.qport = qports_venus_p1,
@@ -871,7 +871,7 @@
 		.tier = tier2,
 		.num_tiers = ARRAY_SIZE(tier2),
 		.hw_sel = MSM_BUS_NOC,
-		.perm_mode = NOC_QOS_MODES_ALL_PERM,
+		.perm_mode = NOC_QOS_PERM_MODE_BYPASS,
 		.mode = NOC_QOS_MODE_BYPASS,
 		.ws = 10000,
 		.qport = qports_vfe,
@@ -1049,9 +1049,8 @@
 		.qport = qports_kmpss,
 		.ws = 10000,
 		.mas_hw_id = MAS_APPSS_PROC,
-		.prio_lvl = 0,
-		.prio_rd = 2,
-		.prio_wr = 2,
+		.prio_rd = 1,
+		.prio_wr = 1,
 	},
 	{
 		.id = MSM_BUS_MASTER_AMPSS_M1,
@@ -1064,6 +1063,8 @@
 		.qport = qports_kmpss,
 		.ws = 10000,
 		.mas_hw_id = MAS_APPSS_PROC,
+		.prio_rd = 1,
+		.prio_wr = 1,
 	},
 	{
 		.id = MSM_BUS_MASTER_MSS_PROC,
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_noc.c b/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
index fb2e5da..9e89256 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
@@ -362,16 +362,18 @@
 	}
 
 	for (i = 0; i < info->node_info->num_mports; i++) {
-		if (info->node_info->mode != NOC_QOS_MODE_BYPASS)
+		if (info->node_info->mode != NOC_QOS_MODE_BYPASS) {
 			noc_set_qos_priority(ninfo, info->node_info->qport[i],
 				prio);
 
-		if (info->node_info->mode != NOC_QOS_MODE_FIXED) {
-			struct msm_bus_noc_qos_bw qbw;
-			qbw.ws = info->node_info->ws;
-			qbw.bw = 0;
-			msm_bus_noc_set_qos_bw(ninfo, info->node_info->qport[i],
-				info->node_info->perm_mode, &qbw);
+			if (info->node_info->mode != NOC_QOS_MODE_FIXED) {
+				struct msm_bus_noc_qos_bw qbw;
+				qbw.ws = info->node_info->ws;
+				qbw.bw = 0;
+				msm_bus_noc_set_qos_bw(ninfo, info->node_info->
+					qport[i], info->node_info->perm_mode,
+					&qbw);
+			}
 		}
 
 		noc_set_qos_mode(ninfo, info->node_info->qport[i], info->
diff --git a/arch/arm/mach-msm/msm_cpr.c b/arch/arm/mach-msm/msm_cpr.c
index b68a8db..c7a8b98 100644
--- a/arch/arm/mach-msm/msm_cpr.c
+++ b/arch/arm/mach-msm/msm_cpr.c
@@ -906,6 +906,14 @@
 		return -ENOMEM;
 	}
 
+	/* enable clk for cpr */
+	if (!pdata->clk_enable) {
+		pr_err("CPR: Invalid clk_enable hook\n");
+		return -EFAULT;
+	}
+
+	pdata->clk_enable();
+
 	/* Initialize platform_data */
 	cpr->config = pdata;
 
diff --git a/arch/arm/mach-msm/msm_dcvs.c b/arch/arm/mach-msm/msm_dcvs.c
index c1c05af..41afd24 100644
--- a/arch/arm/mach-msm/msm_dcvs.c
+++ b/arch/arm/mach-msm/msm_dcvs.c
@@ -23,6 +23,7 @@
 #include <linux/stringify.h>
 #include <linux/debugfs.h>
 #include <linux/msm_tsens.h>
+#include <linux/platform_device.h>
 #include <asm/atomic.h>
 #include <asm/page.h>
 #include <mach/msm_dcvs.h>
@@ -33,6 +34,8 @@
 #define __info(f, ...) pr_info("MSM_DCVS: %s: " f, __func__, __VA_ARGS__)
 #define MAX_PENDING	(5)
 
+#define CORE_FLAG_TEMP_UPDATE		0x1
+
 struct core_attribs {
 	struct kobj_attribute freq_change_us;
 
@@ -123,12 +126,14 @@
 	unsigned int (*get_frequency)(int type_core_num);
 	int (*idle_enable)(int type_core_num,
 			enum msm_core_control_event event);
+	int (*set_floor_frequency)(int type_core_num, unsigned int freq);
 
 	spinlock_t	pending_freq_lock;
 	int pending_freq;
 
 	struct hrtimer	slack_timer;
 	struct delayed_work	temperature_work;
+	int flags;
 };
 
 static int msm_dcvs_enabled = 1;
@@ -140,6 +145,13 @@
 
 static struct kobject *cores_kobj;
 
+#define DCVS_MAX_NUM_FREQS 15
+static struct msm_dcvs_freq_entry cpu_freq_tbl[DCVS_MAX_NUM_FREQS];
+static unsigned num_cpu_freqs;
+static struct msm_dcvs_platform_data *dcvs_pdata;
+
+static DEFINE_MUTEX(gpu_floor_mutex);
+
 static void force_stop_slack_timer(struct dcvs_core *core)
 {
 	unsigned long flags;
@@ -246,6 +258,51 @@
 	spin_unlock_irqrestore(&core->idle_state_change_lock, flags2);
 }
 
+void msm_dcvs_apply_gpu_floor(unsigned long cpu_freq)
+{
+	static unsigned long curr_cpu0_freq;
+	unsigned long gpu_floor_freq = 0;
+	struct dcvs_core *gpu;
+	int i;
+
+	if (!dcvs_pdata)
+		return;
+
+	mutex_lock(&gpu_floor_mutex);
+
+	if (cpu_freq)
+		curr_cpu0_freq = cpu_freq;
+
+	for (i = 0; i < dcvs_pdata->num_sync_rules; i++)
+		if (curr_cpu0_freq > dcvs_pdata->sync_rules[i].cpu_khz) {
+			gpu_floor_freq =
+				dcvs_pdata->sync_rules[i].gpu_floor_khz;
+			break;
+		}
+
+	if (num_online_cpus() > 1)
+		gpu_floor_freq = max(gpu_floor_freq,
+				     dcvs_pdata->gpu_max_nom_khz);
+
+	if (!gpu_floor_freq) {
+		mutex_unlock(&gpu_floor_mutex);
+		return;
+	}
+
+	for (i = GPU_OFFSET; i < CORES_MAX; i++) {
+		gpu = &core_list[i];
+		if (gpu->dcvs_core_id == -1)
+			continue;
+
+		if (gpu->pending_freq != STOP_FREQ_CHANGE &&
+		    gpu->set_floor_frequency)
+			gpu->set_floor_frequency(gpu->type_core_num,
+						 gpu_floor_freq);
+	}
+
+	mutex_unlock(&gpu_floor_mutex);
+}
+
 static int __msm_dcvs_change_freq(struct dcvs_core *core)
 {
 	int ret = 0;
@@ -256,27 +313,26 @@
 	uint32_t ret1 = 0;
 
 	spin_lock_irqsave(&core->pending_freq_lock, flags);
+	if (core->pending_freq == STOP_FREQ_CHANGE)
+		goto out;
 repeat:
 	BUG_ON(!core->pending_freq);
-	if (core->pending_freq == STOP_FREQ_CHANGE)
-		BUG();
 
 	requested_freq = core->pending_freq;
 	time_start = core->time_start;
 	core->time_start = ns_to_ktime(0);
 
-	if (requested_freq < 0) {
-		requested_freq = -1 * requested_freq;
-		core->pending_freq = STOP_FREQ_CHANGE;
-	} else {
-		core->pending_freq = NO_OUTSTANDING_FREQ_CHANGE;
-	}
+	core->pending_freq = NO_OUTSTANDING_FREQ_CHANGE;
 
 	if (requested_freq == core->actual_freq)
 		goto out;
 
 	spin_unlock_irqrestore(&core->pending_freq_lock, flags);
 
+	if (core->type == MSM_DCVS_CORE_TYPE_CPU &&
+	    core->type_core_num == 0)
+		msm_dcvs_apply_gpu_floor(requested_freq);
+
 	/**
 	 * Call the frequency sink driver to change the frequency
 	 * We will need to get back the actual frequency in KHz and
@@ -353,6 +409,9 @@
 	unsigned long temp = 0;
 	int interval_ms;
 
+	if (!(core->flags & CORE_FLAG_TEMP_UPDATE))
+		return;
+
 	tsens_dev.sensor_num = core->sensor;
 	ret = tsens_get_temp(&tsens_dev, &temp);
 	if (!temp) {
@@ -412,10 +471,7 @@
 	}
 
 	if (new_freq == STOP_FREQ_CHANGE) {
-		if (core->pending_freq == NO_OUTSTANDING_FREQ_CHANGE)
-			core->pending_freq = STOP_FREQ_CHANGE;
-		else if (core->pending_freq > 0)
-			core->pending_freq = -1 * core->pending_freq;
+		core->pending_freq = STOP_FREQ_CHANGE;
 		return;
 	}
 
@@ -491,6 +547,36 @@
 	return HRTIMER_NORESTART;
 }
 
+int msm_dcvs_update_algo_params(void)
+{
+	static struct msm_dcvs_algo_param curr_params;
+	static DEFINE_MUTEX(param_update_mutex);
+	struct msm_dcvs_algo_param *new_params;
+	int cpu, ret = 0;
+
+	mutex_lock(&param_update_mutex);
+	new_params = &core_list[CPU_OFFSET + num_online_cpus() - 1].algo_param;
+
+	if (memcmp(&curr_params, new_params,
+		   sizeof(struct msm_dcvs_algo_param))) {
+		for_each_possible_cpu(cpu) {
+			ret = msm_dcvs_scm_set_algo_params(CPU_OFFSET + cpu,
+							   new_params);
+			if (ret) {
+				pr_err("scm set algo params failed on cpu %d, ret %d\n",
+				       cpu, ret);
+				mutex_unlock(&param_update_mutex);
+				return ret;
+			}
+		}
+		memcpy(&curr_params, new_params,
+		       sizeof(struct msm_dcvs_algo_param));
+	}
+
+	mutex_unlock(&param_update_mutex);
+	return ret;
+}
+
 /* Helper functions and macros for sysfs nodes for a core */
 #define CORE_FROM_ATTRIBS(attr, name) \
 	container_of(container_of(attr, struct core_attribs, name), \
@@ -545,12 +631,9 @@
 	} else { \
 		uint32_t old_val = core->algo_param._name; \
 		core->algo_param._name = val; \
-		ret = msm_dcvs_scm_set_algo_params(core->dcvs_core_id, \
-				&core->algo_param); \
+		ret = msm_dcvs_update_algo_params(); \
 		if (ret) { \
 			core->algo_param._name = old_val; \
-			__err("Error(%d) in setting %d for algo param %s\n",\
-					ret, val, __stringify(_name)); \
 		} \
 	} \
 	return count; \
@@ -825,6 +908,37 @@
 	return &core_list[offset];
 }
 
+void msm_dcvs_register_cpu_freq(uint32_t freq, uint32_t voltage)
+{
+	BUG_ON(freq == 0 || voltage == 0 ||
+	       num_cpu_freqs == DCVS_MAX_NUM_FREQS);
+
+	cpu_freq_tbl[num_cpu_freqs].freq = freq;
+	cpu_freq_tbl[num_cpu_freqs].voltage = voltage;
+
+	num_cpu_freqs++;
+}
+
+static void update_cpu_dcvs_params(struct msm_dcvs_core_info *info)
+{
+	int i;
+
+	BUG_ON(num_cpu_freqs == 0);
+
+	info->freq_tbl = cpu_freq_tbl;
+	info->power_param.num_freq = num_cpu_freqs;
+
+	if (!dcvs_pdata || dcvs_pdata->num_sync_rules == 0)
+		return;
+
+	/* the first sync rule shows what the turbo frequencies are -
+	 * these frequencies need energy offsets set */
+	for (i = 0; i < DCVS_MAX_NUM_FREQS && cpu_freq_tbl[i].freq != 0; i++)
+		if (cpu_freq_tbl[i].freq > dcvs_pdata->sync_rules[0].cpu_khz) {
+			cpu_freq_tbl[i].active_energy_offset = 100;
+			cpu_freq_tbl[i].leakage_energy_offset = 100;
+		}
+}
 
 int msm_dcvs_register_core(
 	enum msm_dcvs_core_type type,
@@ -834,6 +948,7 @@
 	unsigned int (*get_frequency)(int type_core_num),
 	int (*idle_enable)(int type_core_num,
 					enum msm_core_control_event event),
+	int (*set_floor_frequency)(int type_core_num, unsigned int freq),
 	int sensor)
 {
 	int ret = -EINVAL;
@@ -857,9 +972,12 @@
 	core->set_frequency = set_frequency;
 	core->get_frequency = get_frequency;
 	core->idle_enable = idle_enable;
-	core->pending_freq = STOP_FREQ_CHANGE;
+	core->set_floor_frequency = set_floor_frequency;
 
 	core->info = info;
+	if (type == MSM_DCVS_CORE_TYPE_CPU)
+		update_cpu_dcvs_params(info);
+
 	memcpy(&core->algo_param, &info->algo_param,
 			sizeof(struct msm_dcvs_algo_param));
 
@@ -913,10 +1031,6 @@
 	core->task = kthread_run(msm_dcvs_do_freq, (void *)core,
 			"msm_dcvs/%d", core->dcvs_core_id);
 	ret = core->dcvs_core_id;
-
-	INIT_DELAYED_WORK(&core->temperature_work, msm_dcvs_report_temp_work);
-	schedule_delayed_work(&core->temperature_work,
-			msecs_to_jiffies(info->thermal_poll_ms));
 	return ret;
 bail:
 	core->dcvs_core_id = -1;
@@ -985,6 +1099,10 @@
 	}
 	force_start_slack_timer(core, timer_interval_us);
 
+	core->flags |= CORE_FLAG_TEMP_UPDATE;
+	INIT_DELAYED_WORK(&core->temperature_work, msm_dcvs_report_temp_work);
+	schedule_delayed_work(&core->temperature_work,
+			      msecs_to_jiffies(core->info->thermal_poll_ms));
 
 	core->idle_enable(core->type_core_num, MSM_DCVS_ENABLE_IDLE_PULSE);
 	return 0;
@@ -1011,16 +1129,27 @@
 		return ret;
 	}
 
+	core->flags &= ~CORE_FLAG_TEMP_UPDATE;
+	cancel_delayed_work(&core->temperature_work);
+
 	core->idle_enable(core->type_core_num, MSM_DCVS_DISABLE_IDLE_PULSE);
 	/* Notify TZ to stop receiving idle info for the core */
 	ret = msm_dcvs_scm_event(core->dcvs_core_id, MSM_DCVS_SCM_DCVS_ENABLE,
 				0, core->actual_freq, &freq, &ret1);
 	core->idle_enable(core->type_core_num,
 			MSM_DCVS_ENABLE_HIGH_LATENCY_MODES);
+
+	if (core->type == MSM_DCVS_CORE_TYPE_GPU)
+		mutex_lock(&gpu_floor_mutex);
+
 	spin_lock_irqsave(&core->pending_freq_lock, flags);
 	/* flush out all the pending freq changes */
 	request_freq_change(core, STOP_FREQ_CHANGE);
 	spin_unlock_irqrestore(&core->pending_freq_lock, flags);
+
+	if (core->type == MSM_DCVS_CORE_TYPE_GPU)
+		mutex_unlock(&gpu_floor_mutex);
+
 	force_stop_slack_timer(core);
 
 	return 0;
@@ -1109,11 +1238,29 @@
 }
 late_initcall(msm_dcvs_late_init);
 
+static int __devinit dcvs_probe(struct platform_device *pdev)
+{
+	if (pdev->dev.platform_data)
+		dcvs_pdata = pdev->dev.platform_data;
+
+	return 0;
+}
+
+static struct platform_driver dcvs_driver = {
+	.probe = dcvs_probe,
+	.driver = {
+		.name = "dcvs",
+		.owner = THIS_MODULE,
+	},
+};
+
 static int __init msm_dcvs_early_init(void)
 {
 	int ret = 0;
 	int i;
 
+	platform_driver_register(&dcvs_driver);
+
 	if (!msm_dcvs_enabled) {
 		__info("Not enabled (%d)\n", msm_dcvs_enabled);
 		return 0;
@@ -1127,8 +1274,10 @@
 		goto done;
 	}
 
-	for (i = 0; i < CORES_MAX; i++)
+	for (i = 0; i < CORES_MAX; i++) {
 		core_list[i].dcvs_core_id = -1;
+		core_list[i].pending_freq = STOP_FREQ_CHANGE;
+	}
 done:
 	return ret;
 }
diff --git a/arch/arm/mach-msm/msm_mpdecision.c b/arch/arm/mach-msm/msm_mpdecision.c
index a65b3ee..94b546a 100644
--- a/arch/arm/mach-msm/msm_mpdecision.c
+++ b/arch/arm/mach-msm/msm_mpdecision.c
@@ -123,6 +123,7 @@
 
 static unsigned long last_nr;
 static int num_present_hundreds;
+static ktime_t last_down_time;
 
 #define RQ_AVG_INSIGNIFICANT_BITS	3
 static bool ok_to_update_tz(int nr, int last_nr)
@@ -380,15 +381,18 @@
 			}
 		}
 
-		for_each_possible_cpu(cpu) {
-			if (!(atomic_read(&msm_mpd.algo_cpu_mask) & (1 << cpu))
-				&& cpu_online(cpu)) {
-				bring_down_cpu(cpu);
-				if (!cpu_online(cpu))
-					goto restart;
-			}
-		}
+		if (ktime_to_ns(ktime_sub(ktime_get(), last_down_time)) >
+		    100 * NSEC_PER_MSEC)
+			for_each_possible_cpu(cpu)
+				if (!(atomic_read(&msm_mpd.algo_cpu_mask) &
+				      (1 << cpu)) && cpu_online(cpu)) {
+					bring_down_cpu(cpu);
+					last_down_time = ktime_get();
+					break;
+				}
 		msm_mpd.hpupdate = HPUPDATE_WAITING;
+		msm_dcvs_apply_gpu_floor(0);
+		msm_dcvs_update_algo_params();
 	}
 
 	return 0;
diff --git a/arch/arm/mach-msm/no-pm.c b/arch/arm/mach-msm/no-pm.c
index d38b416..d460c70 100644
--- a/arch/arm/mach-msm/no-pm.c
+++ b/arch/arm/mach-msm/no-pm.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,13 +12,15 @@
  */
 
 #include <linux/module.h>
-
+#include <asm/proc-fns.h>
 #include <mach/cpuidle.h>
 #include "idle.h"
 #include "pm.h"
 
 void arch_idle(void)
-{ }
+{
+	cpu_do_idle();
+}
 
 void msm_pm_set_platform_data(struct msm_pm_platform_data *data, int count)
 { }
diff --git a/arch/arm/mach-msm/ocmem.c b/arch/arm/mach-msm/ocmem.c
index 7829d8d..34fd8d2 100644
--- a/arch/arm/mach-msm/ocmem.c
+++ b/arch/arm/mach-msm/ocmem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -395,7 +395,8 @@
 	pr_debug("ocmem: Disabled br clock\n");
 }
 
-static struct ocmem_plat_data *parse_dt_config(struct platform_device *pdev)
+static struct ocmem_plat_data * __devinit parse_dt_config
+						(struct platform_device *pdev)
 {
 	struct device   *dev = &pdev->dev;
 	struct device_node *node = pdev->dev.of_node;
diff --git a/arch/arm/mach-msm/ocmem_api.c b/arch/arm/mach-msm/ocmem_api.c
index 689e015..8b56775 100644
--- a/arch/arm/mach-msm/ocmem_api.c
+++ b/arch/arm/mach-msm/ocmem_api.c
@@ -304,6 +304,7 @@
 
 	for (i = 0; i < list->num_chunks; i++) {
 		if (!chunks[i].ddr_paddr ||
+			!IS_ALIGNED(chunks[i].ddr_paddr, MIN_CHUNK_SIZE) ||
 			chunks[i].size < MIN_CHUNK_SIZE ||
 			!IS_ALIGNED(chunks[i].size, MIN_CHUNK_SIZE)) {
 			pr_err("Invalid ocmem chunk at index %d (p: %lx, size %lx)\n",
diff --git a/arch/arm/mach-msm/pil-pronto.c b/arch/arm/mach-msm/pil-pronto.c
index 49fe182..6e8d127 100644
--- a/arch/arm/mach-msm/pil-pronto.c
+++ b/arch/arm/mach-msm/pil-pronto.c
@@ -31,6 +31,7 @@
 
 #include "peripheral-loader.h"
 #include "scm-pas.h"
+#include "ramdump.h"
 
 #define PRONTO_PMU_COMMON_GDSCR				0x24
 #define PRONTO_PMU_COMMON_GDSCR_SW_COLLAPSE		BIT(0)
@@ -81,6 +82,7 @@
 	bool crash;
 	struct delayed_work cancel_vote_work;
 	int irq;
+	struct ramdump_device *ramdump_dev;
 };
 
 static int pil_pronto_make_proxy_vote(struct pil_desc *pil)
@@ -301,6 +303,7 @@
 		return IRQ_HANDLED;
 	}
 
+	disable_irq_nosync(drv->irq);
 	drv->restart_inprogress = true;
 	restart_wcnss(drv);
 
@@ -322,7 +325,6 @@
 	pil_shutdown(&drv->desc);
 	flush_delayed_work(&drv->cancel_vote_work);
 	wcnss_flush_delayed_boot_votes();
-	disable_irq_nosync(drv->irq);
 
 	return 0;
 }
@@ -359,9 +361,19 @@
 		smsm_change_state(SMSM_APPS_STATE, SMSM_RESET, SMSM_RESET);
 }
 
-static int wcnss_ramdump(int enable, const struct subsys_desc *crashed_subsys)
+static struct ramdump_segment pronto_segments[] = {
+	{ 0x0D200000, 0x0D980000 - 0x0D200000 }
+};
+
+static int wcnss_ramdump(int enable, const struct subsys_desc *subsys)
 {
-	return 0;
+	struct pronto_data *drv = subsys_to_drv(subsys);
+
+	if (enable)
+		return do_ramdump(drv->ramdump_dev, pronto_segments,
+				ARRAY_SIZE(pronto_segments));
+	else
+		return 0;
 }
 
 static int __devinit pil_pronto_probe(struct platform_device *pdev)
@@ -464,6 +476,12 @@
 	if (ret < 0)
 		goto err_irq;
 
+	drv->ramdump_dev = create_ramdump_device("pronto", &pdev->dev);
+	if (!drv->ramdump_dev) {
+		ret = -ENOMEM;
+		goto err_irq;
+	}
+
 	/* Initialize common_ss GDSCR to wait 4 cycles between states */
 	regval = readl_relaxed(drv->base + PRONTO_PMU_COMMON_GDSCR)
 		& PRONTO_PMU_COMMON_GDSCR_SW_COLLAPSE;
@@ -489,6 +507,7 @@
 	smsm_state_cb_deregister(SMSM_WCNSS_STATE, SMSM_RESET,
 					smsm_state_cb_hdlr, drv);
 	pil_desc_release(&drv->desc);
+	destroy_ramdump_device(drv->ramdump_dev);
 	return 0;
 }
 
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index 7652d74..07cbe19 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -423,7 +423,7 @@
 	struct mba_data *drv = subsys_to_drv(subsys);
 
 	if (!drv->is_loadable)
-		return -ENODEV;
+		return 0;
 	/* MBA doesn't support shutdown */
 	pil_shutdown(&drv->q6->desc);
 	return 0;
@@ -435,7 +435,7 @@
 	int ret;
 
 	if (!drv->is_loadable)
-		return -ENODEV;
+		return 0;
 	/*
 	 * At this time, the modem is shutdown. Therefore this function cannot
 	 * run concurrently with either the watchdog bite error handler or the
@@ -527,7 +527,7 @@
 	struct mba_data *drv = subsys_to_drv(desc);
 
 	if (!drv->is_loadable)
-		return -ENODEV;
+		return 0;
 
 	ret = pil_boot(&drv->q6->desc);
 	if (ret)
@@ -729,8 +729,8 @@
 		return -ENOMEM;
 	platform_set_drvdata(pdev, drv);
 
-	of_property_read_u32(pdev->dev.of_node, "qcom,is_loadable",
-				&drv->is_loadable);
+	drv->is_loadable = of_property_read_bool(pdev->dev.of_node,
+							"qcom,is-loadable");
 	if (drv->is_loadable) {
 		ret = pil_mss_loadable_init(drv, pdev);
 		if (ret)
diff --git a/arch/arm/mach-msm/platsmp-8625.c b/arch/arm/mach-msm/platsmp-8625.c
index 3b31b9f..0e75cae 100644
--- a/arch/arm/mach-msm/platsmp-8625.c
+++ b/arch/arm/mach-msm/platsmp-8625.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -121,10 +121,10 @@
 	write_pen_release(-1);
 
 	/* clear the IPC pending SPI */
-	if (power_collapsed) {
+	if (per_cpu(power_collapsed, cpu)) {
 		raise_clear_spi(cpu, false);
 		clear_pending_spi(cpu_data[cpu].ipc_irq);
-		power_collapsed = 0;
+		per_cpu(power_collapsed, cpu) = 0;
 	}
 
 	/*
@@ -216,7 +216,7 @@
 	 * GDFS which needs to be brought out by raising an SPI.
 	 */
 
-	if (power_collapsed) {
+	if (per_cpu(power_collapsed, cpu)) {
 		gic_configure_and_raise(cpu_data[cpu].ipc_irq, cpu);
 		raise_clear_spi(cpu, true);
 	} else {
diff --git a/arch/arm/mach-msm/pm.h b/arch/arm/mach-msm/pm.h
index faefe34..bd61feb 100644
--- a/arch/arm/mach-msm/pm.h
+++ b/arch/arm/mach-msm/pm.h
@@ -1,7 +1,7 @@
 /* arch/arm/mach-msm/pm.h
  *
  * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
  * Author: San Mehat <san@android.com>
  *
  * This software is licensed under the terms of the GNU General Public
@@ -27,7 +27,7 @@
 #define msm_secondary_startup NULL
 #endif
 
-extern int power_collapsed;
+DECLARE_PER_CPU(int,  power_collapsed);
 
 struct msm_pm_irq_calls {
 	unsigned int (*irq_pending)(void);
diff --git a/arch/arm/mach-msm/pm2.c b/arch/arm/mach-msm/pm2.c
index ae2a4bc..96c1218 100644
--- a/arch/arm/mach-msm/pm2.c
+++ b/arch/arm/mach-msm/pm2.c
@@ -3,7 +3,7 @@
  * MSM Power Management Routines
  *
  * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2008-2012 Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2008-2012 The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -75,8 +75,9 @@
 	MSM_PM_DEBUG_HOTPLUG = BIT(7),
 };
 
+DEFINE_PER_CPU(int, power_collapsed);
+
 static int msm_pm_debug_mask;
-int power_collapsed;
 module_param_named(
 	debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
 );
@@ -565,7 +566,7 @@
 		__raw_writel(0, APPS_PWRDOWN);
 		mb();
 
-		if (power_collapsed) {
+		if (per_cpu(power_collapsed, 1)) {
 			/*
 			 * enable the SCU while coming out of power
 			 * collapse.
@@ -983,6 +984,7 @@
 	 * path by reading the MPA5_GDFS_CNT_VAL register.
 	 */
 	if (cpu_is_msm8625()) {
+		int cpu;
 		/*
 		 * on system reset, default value of MPA5_GDFS_CNT_VAL
 		 * is = 0x0, later modem reprogram this value to
@@ -997,7 +999,11 @@
 		/* 8x25Q */
 		if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 3) {
 			if (val != 0x000F0002) {
-				power_collapsed = 1;
+				for_each_possible_cpu(cpu) {
+					if (!cpu)
+						continue;
+					per_cpu(power_collapsed, cpu) = 1;
+				}
 				/*
 				 * override DBGNOPOWERDN and program the GDFS
 				 * count val
@@ -1008,7 +1014,11 @@
 				modem_early_exit = 1;
 		} else {
 			if (val != 0x00030002) {
-				power_collapsed = 1;
+				for_each_possible_cpu(cpu) {
+					if (!cpu)
+						continue;
+					per_cpu(power_collapsed, cpu) = 1;
+				}
 				/*
 				 * override DBGNOPOWERDN and program the GDFS
 				 * count val
diff --git a/arch/arm/mach-msm/pmu.c b/arch/arm/mach-msm/pmu.c
index c426ff9..f0b83f9 100644
--- a/arch/arm/mach-msm/pmu.c
+++ b/arch/arm/mach-msm/pmu.c
@@ -16,8 +16,20 @@
 #include <mach/irqs.h>
 #include <mach/socinfo.h>
 
+/*
+ * If a GIC is present, then all IRQ's < 32 are PPI's and can only be
+ * requested and free'd using the percpu IRQ API.
+ * If a VIC is present, then only the traditional request, free API works.
+ *
+ * All MPCore's have GIC's. The Cortex A5 however may or may not be MPcore, but
+ * it still has a GIC. Except, the 7x27a, which is an A5 and yet has a VIC.
+ * So if the chip is A5 but does not have a GIC, default to the traditional
+ * IRQ {request, free}_irq API.
+ */
+
 #if defined(CONFIG_ARCH_MSM_KRAITMP) || defined(CONFIG_ARCH_MSM_SCORPIONMP) \
-	|| defined(CONFIG_ARCH_MSM8625)
+	|| defined(CONFIG_ARCH_MSM8625) || \
+	(defined(CONFIG_ARCH_MSM_CORTEX_A5) && !defined(CONFIG_MSM_VIC))
 static DEFINE_PER_CPU(u32, pmu_irq_cookie);
 
 static void enable_irq_callback(void *info)
@@ -141,8 +153,10 @@
 	 * handlers to call the percpu API.
 	 * Defaults to unicore API {request,free}_irq().
 	 * See arch/arm/kernel/perf_event.c
+	 * See Comment above on the A5 and MSM_VIC.
 	 */
-#if defined(CONFIG_ARCH_MSM_KRAITMP) || defined(CONFIG_ARCH_MSM_SCORPIONMP)
+#if defined(CONFIG_ARCH_MSM_KRAITMP) || defined(CONFIG_ARCH_MSM_SCORPIONMP) \
+	|| (defined(CONFIG_ARCH_MSM_CORTEX_A5) && !defined(CONFIG_MSM_VIC))
 	cpu_pmu_device.dev.platform_data = &multicore_data;
 #endif
 
diff --git a/arch/arm/mach-msm/qdsp5/audio_acdb.c b/arch/arm/mach-msm/qdsp5/audio_acdb.c
index 16f23f4..d7a4607 100644
--- a/arch/arm/mach-msm/qdsp5/audio_acdb.c
+++ b/arch/arm/mach-msm/qdsp5/audio_acdb.c
@@ -2575,6 +2575,7 @@
 
 	memset(&acdb_data, 0, sizeof(acdb_data));
 	spin_lock_init(&acdb_data.dsp_lock);
+	init_waitqueue_head(&acdb_data.wait);
 	acdb_data.cb_thread_task = kthread_run(acdb_calibrate_device,
 		NULL, "acdb_cb_thread");
 
@@ -2590,7 +2591,6 @@
 		MM_ERR("RTC ACDB=>INIT Failure\n");
 
 #endif
-	init_waitqueue_head(&acdb_data.wait);
 
 	return misc_register(&acdb_misc);
 err:
diff --git a/arch/arm/mach-msm/qdsp6v2/adsp-loader.c b/arch/arm/mach-msm/qdsp6v2/adsp-loader.c
index c28e403..02dbece 100644
--- a/arch/arm/mach-msm/qdsp6v2/adsp-loader.c
+++ b/arch/arm/mach-msm/qdsp6v2/adsp-loader.c
@@ -19,6 +19,7 @@
 #include <linux/platform_device.h>
 #include <mach/subsystem_restart.h>
 #include <mach/qdsp6v2/apr.h>
+#include <linux/of_device.h>
 
 #define Q6_PIL_GET_DELAY_MS 100
 
@@ -30,25 +31,41 @@
 {
 	struct adsp_loader_private *priv;
 	int rc = 0;
+	const char *adsp_dt = "qcom,adsp-state";
+	u32 adsp_state;
 
-	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	platform_set_drvdata(pdev, priv);
-
-	priv->pil_h = subsystem_get("adsp");
-	if (IS_ERR(priv->pil_h)) {
-		pr_err("%s: pil get adsp failed, error:%d\n", __func__, rc);
-		devm_kfree(&pdev->dev, priv);
-		goto fail;
+	rc = of_property_read_u32(pdev->dev.of_node, adsp_dt, &adsp_state);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: ADSP state = %x\n", __func__, adsp_state);
+		return rc;
 	}
 
-	/* Query the DSP to check if resources are available */
-	msleep(Q6_PIL_GET_DELAY_MS);
+	if (adsp_state == APR_SUBSYS_DOWN) {
+		priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+		if (!priv)
+			return -ENOMEM;
 
-	/* Set the state of the ADSP in APR driver */
-	apr_set_q6_state(APR_SUBSYS_LOADED);
+		platform_set_drvdata(pdev, priv);
+
+		priv->pil_h = subsystem_get("adsp");
+		if (IS_ERR(priv->pil_h)) {
+			pr_err("%s: pil get adsp failed, error:%d\n",
+				__func__, rc);
+			devm_kfree(&pdev->dev, priv);
+			goto fail;
+		}
+
+		/* Query the DSP to check if resources are available */
+		msleep(Q6_PIL_GET_DELAY_MS);
+
+		/* Set the state of the ADSP in APR driver */
+		apr_set_q6_state(APR_SUBSYS_LOADED);
+	} else if (adsp_state == APR_SUBSYS_LOADED) {
+		dev_dbg(&pdev->dev,
+			"%s:MDM9x25 ADSP state = %x\n", __func__, adsp_state);
+		apr_set_q6_state(APR_SUBSYS_LOADED);
+	}
 
 	/* Query for MMPM API */
 
@@ -62,7 +79,8 @@
 	struct adsp_loader_private *priv;
 
 	priv = platform_get_drvdata(pdev);
-	subsystem_put(priv->pil_h);
+	if (priv != NULL)
+		subsystem_put(priv->pil_h);
 	pr_info("%s: Q6/ADSP image is unloaded\n", __func__);
 
 	return 0;
diff --git a/arch/arm/mach-msm/qdsp6v2/q6audio_common.h b/arch/arm/mach-msm/qdsp6v2/q6audio_common.h
index e4291e7..3bc8454 100644
--- a/arch/arm/mach-msm/qdsp6v2/q6audio_common.h
+++ b/arch/arm/mach-msm/qdsp6v2/q6audio_common.h
@@ -15,7 +15,7 @@
 #ifndef __Q6_AUDIO_COMMON_H__
 #define __Q6_AUDIO_COMMON_H__
 
-#ifdef CONFIG_ARCH_MSM8974
+#if defined(CONFIG_ARCH_MSM8974) || defined(CONFIG_ARCH_MSM9625)
 #include <sound/apr_audio-v2.h>
 #include <sound/q6asm-v2.h>
 #else
diff --git a/arch/arm/mach-msm/qdsp6v2/rtac_v2.c b/arch/arm/mach-msm/qdsp6v2/rtac_v2.c
index 2d0607c..409d796 100644
--- a/arch/arm/mach-msm/qdsp6v2/rtac_v2.c
+++ b/arch/arm/mach-msm/qdsp6v2/rtac_v2.c
@@ -24,6 +24,7 @@
 #include <mach/qdsp6v2/rtac.h>
 #include "q6audio_common.h"
 #include <sound/q6afe-v2.h>
+#include <sound/apr_audio-v2.h>
 
 #ifndef CONFIG_RTAC
 
@@ -45,10 +46,6 @@
 
 #else
 
-#define VOICE_CMD_SET_PARAM		0x00011006
-#define VOICE_CMD_GET_PARAM		0x00011007
-#define VOICE_EVT_GET_PARAM_ACK		0x00011008
-
 /* Max size of payload (buf size - apr header) */
 #define MAX_PAYLOAD_SIZE		4076
 #define RTAC_MAX_ACTIVE_DEVICES		4
@@ -353,7 +350,7 @@
 	return;
 }
 
-static int get_voice_index(u32 cvs_handle)
+static int get_voice_index_cvs(u32 cvs_handle)
 {
 	u32 i;
 
@@ -367,6 +364,32 @@
 	return 0;
 }
 
+static int get_voice_index_cvp(u32 cvp_handle)
+{
+	u32 i;
+
+	for (i = 0; i < rtac_voice_data.num_of_voice_combos; i++) {
+		if (rtac_voice_data.voice[i].cvp_handle == cvp_handle)
+			return i;
+	}
+
+	pr_err("%s: No voice index for CVP handle %d found returning 0\n",
+	       __func__, cvp_handle);
+	return 0;
+}
+
+static int get_voice_index(u32 mode, u32 handle)
+{
+	if (mode == RTAC_CVP)
+		return get_voice_index_cvp(handle);
+	if (mode == RTAC_CVS)
+		return get_voice_index_cvs(handle);
+
+	pr_err("%s: Invalid mode %d, returning 0\n",
+	       __func__, mode);
+	return 0;
+}
+
 
 /* ADM APR */
 void rtac_set_adm_handle(void *handle)
@@ -402,6 +425,7 @@
 		if (payload_size > rtac_adm_user_buf_size) {
 			pr_err("%s: Buffer set not big enough for returned data, buf size = %d, ret data = %d\n",
 			 __func__, rtac_adm_user_buf_size, payload_size);
+			rtac_adm_payload_size = 0;
 			goto done;
 		}
 		memcpy(rtac_adm_buffer + sizeof(u32), payload, payload_size);
@@ -470,6 +494,7 @@
 
 	/* Set globals for copy of returned payload */
 	rtac_adm_user_buf_size = count;
+
 	/* Copy buffer to in-band payload */
 	if (copy_from_user(rtac_adm_buffer + sizeof(adm_params),
 			buf + 3 * sizeof(u32), payload_size)) {
@@ -572,6 +597,7 @@
 		if (payload_size > rtac_asm_user_buf_size) {
 			pr_err("%s: Buffer set not big enough for returned data, buf size = %d, ret data = %d\n",
 			 __func__, rtac_asm_user_buf_size, payload_size);
+			rtac_asm_payload_size = 0;
 			goto done;
 		}
 		memcpy(rtac_asm_buffer + sizeof(u32), payload, payload_size);
@@ -619,6 +645,7 @@
 			__func__);
 		goto done;
 	}
+
 	if (session_id > (SESSION_MAX + 1)) {
 		pr_err("%s: Invalid Session = %d\n", __func__, session_id);
 		goto done;
@@ -739,6 +766,7 @@
 		if (payload_size > rtac_voice_user_buf_size) {
 			pr_err("%s: Buffer set not big enough for returned data, buf size = %d, ret data = %d\n",
 			 __func__, rtac_voice_user_buf_size, payload_size);
+			rtac_voice_payload_size = 0;
 			goto done;
 		}
 		memcpy(rtac_voice_buffer + sizeof(u32), payload, payload_size);
@@ -753,7 +781,7 @@
 	u32	count = 0;
 	u32	bytes_returned = 0;
 	u32	payload_size;
-	u16	dest_port;
+	u32	dest_port;
 	struct apr_hdr		voice_params;
 	pr_debug("%s\n", __func__);
 
@@ -818,10 +846,10 @@
 	voice_params.src_svc = 0;
 	voice_params.src_domain = APR_DOMAIN_APPS;
 	voice_params.src_port = voice_session_id[
-					get_voice_index(dest_port)];
+					get_voice_index(mode, dest_port)];
 	voice_params.dest_svc = 0;
 	voice_params.dest_domain = APR_DOMAIN_MODEM;
-	voice_params.dest_port = dest_port;
+	voice_params.dest_port = (u16)dest_port;
 	voice_params.token = 0;
 	voice_params.opcode = opcode;
 
diff --git a/arch/arm/mach-msm/rpm-regulator-smd.c b/arch/arm/mach-msm/rpm-regulator-smd.c
index d1c61fe..bb33283 100644
--- a/arch/arm/mach-msm/rpm-regulator-smd.c
+++ b/arch/arm/mach-msm/rpm-regulator-smd.c
@@ -659,19 +659,6 @@
 	return uV;
 }
 
-static int rpm_vreg_list_voltage(struct regulator_dev *rdev, unsigned selector)
-{
-	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
-	int uV = 0;
-
-	if (selector == 0)
-		uV = reg->min_uV;
-	else if (selector == 1)
-		uV = reg->max_uV;
-
-	return uV;
-}
-
 static int rpm_vreg_set_voltage_corner(struct regulator_dev *rdev, int min_uV,
 				int max_uV, unsigned *selector)
 {
@@ -1030,7 +1017,6 @@
 	.is_enabled		= rpm_vreg_is_enabled,
 	.set_voltage		= rpm_vreg_set_voltage,
 	.get_voltage		= rpm_vreg_get_voltage,
-	.list_voltage		= rpm_vreg_list_voltage,
 	.set_mode		= rpm_vreg_set_mode,
 	.get_mode		= rpm_vreg_get_mode,
 	.get_optimum_mode	= rpm_vreg_get_optimum_mode,
@@ -1043,7 +1029,6 @@
 	.is_enabled		= rpm_vreg_is_enabled,
 	.set_voltage		= rpm_vreg_set_voltage_corner,
 	.get_voltage		= rpm_vreg_get_voltage_corner,
-	.list_voltage		= rpm_vreg_list_voltage,
 	.set_mode		= rpm_vreg_set_mode,
 	.get_mode		= rpm_vreg_get_mode,
 	.get_optimum_mode	= rpm_vreg_get_optimum_mode,
@@ -1056,7 +1041,6 @@
 	.is_enabled		= rpm_vreg_is_enabled,
 	.set_voltage		= rpm_vreg_set_voltage,
 	.get_voltage		= rpm_vreg_get_voltage,
-	.list_voltage		= rpm_vreg_list_voltage,
 	.set_mode		= rpm_vreg_set_mode,
 	.get_mode		= rpm_vreg_get_mode,
 	.get_optimum_mode	= rpm_vreg_get_optimum_mode,
@@ -1069,7 +1053,6 @@
 	.is_enabled		= rpm_vreg_is_enabled,
 	.set_voltage		= rpm_vreg_set_voltage_corner,
 	.get_voltage		= rpm_vreg_get_voltage_corner,
-	.list_voltage		= rpm_vreg_list_voltage,
 	.set_mode		= rpm_vreg_set_mode,
 	.get_mode		= rpm_vreg_get_mode,
 	.get_optimum_mode	= rpm_vreg_get_optimum_mode,
@@ -1089,7 +1072,6 @@
 	.is_enabled		= rpm_vreg_is_enabled,
 	.set_voltage		= rpm_vreg_set_voltage,
 	.get_voltage		= rpm_vreg_get_voltage,
-	.list_voltage		= rpm_vreg_list_voltage,
 	.enable_time		= rpm_vreg_enable_time,
 };
 
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
index 6d1d038..a59b338 100644
--- a/arch/arm/mach-msm/rpm-smd.c
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -36,7 +36,8 @@
 #include <mach/msm_smd.h>
 #include <mach/rpm-smd.h>
 #include "rpm-notifier.h"
-
+#define CREATE_TRACE_POINTS
+#include "trace_rpm_smd.h"
 /* Debug Definitions */
 
 enum {
@@ -151,6 +152,8 @@
 
 LIST_HEAD(msm_rpm_ack_list);
 
+static DECLARE_COMPLETION(data_ready);
+
 static void msm_rpm_notify_sleep_chain(struct rpm_message_header *hdr,
 		struct msm_rpm_kvp_data *kvp)
 {
@@ -225,9 +228,6 @@
 	memcpy(handle->kvp[i].value, data, size);
 	handle->kvp[i].valid = true;
 
-	if (handle->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET)
-		msm_rpm_notify_sleep_chain(&handle->msg_hdr, &handle->kvp[i]);
-
 	return 0;
 
 }
@@ -339,7 +339,7 @@
 
 	switch (event) {
 	case SMD_EVENT_DATA:
-		queue_work(msm_rpm_smd_wq, &pdata->work);
+		complete(&data_ready);
 		break;
 	case SMD_EVENT_OPEN:
 		complete(&pdata->smd_open);
@@ -529,17 +529,19 @@
 	int errno;
 	char buf[MAX_ERR_BUFFER_SIZE] = {0};
 
-	if (!spin_trylock(&msm_rpm_data.smd_lock_read))
-		return;
-	while (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
-		if (msm_rpm_read_smd_data(buf)) {
-			break;
+	while (1) {
+		wait_for_completion(&data_ready);
+
+		spin_lock(&msm_rpm_data.smd_lock_read);
+		while (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
+			if (msm_rpm_read_smd_data(buf))
+				break;
+			msg_id = msm_rpm_get_msg_id_from_ack(buf);
+			errno = msm_rpm_get_error_from_ack(buf);
+			msm_rpm_process_ack(msg_id, errno);
 		}
-		msg_id = msm_rpm_get_msg_id_from_ack(buf);
-		errno = msm_rpm_get_error_from_ack(buf);
-		msm_rpm_process_ack(msg_id, errno);
+		spin_unlock(&msm_rpm_data.smd_lock_read);
 	}
-	spin_unlock(&msm_rpm_data.smd_lock_read);
 }
 
 #define DEBUG_PRINT_BUFFER_SIZE 512
@@ -731,6 +733,11 @@
 
 		memcpy(tmpbuff, cdata->kvp[i].value, cdata->kvp[i].nbytes);
 		tmpbuff += cdata->kvp[i].nbytes;
+
+		if (cdata->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET)
+			msm_rpm_notify_sleep_chain(&cdata->msg_hdr,
+					&cdata->kvp[i]);
+
 	}
 
 	if (msm_rpm_debug_mask
@@ -773,6 +780,10 @@
 	spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
 
 	if (ret == msg_size) {
+		trace_rpm_send_message(noirq, cdata->msg_hdr.set,
+				cdata->msg_hdr.resource_type,
+				cdata->msg_hdr.resource_id,
+				cdata->msg_hdr.msg_id);
 		for (i = 0; (i < cdata->write_idx); i++)
 			cdata->kvp[i].valid = false;
 		cdata->msg_hdr.data_len = 0;
@@ -828,6 +839,8 @@
 		return 0;
 
 	wait_for_completion(&elem->ack);
+	trace_rpm_ack_recd(0, msg_id);
+
 	msm_rpm_free_list_entry(elem);
 	return elem->errno;
 }
@@ -880,9 +893,14 @@
 	}
 
 	rc = elem->errno;
+	trace_rpm_ack_recd(1, msg_id);
+
 	msm_rpm_free_list_entry(elem);
 wait_ack_cleanup:
 	spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
+
+	if (smd_is_pkt_avail(msm_rpm_data.ch_info))
+		complete(&data_ready);
 	return rc;
 }
 EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
@@ -1004,6 +1022,7 @@
 		msm_rpm_smd_wq = create_singlethread_workqueue("rpm-smd");
 		if (!msm_rpm_smd_wq)
 			return -EINVAL;
+		queue_work(msm_rpm_smd_wq, &msm_rpm_data.work);
 	}
 
 	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
diff --git a/arch/arm/mach-msm/saw-regulator.c b/arch/arm/mach-msm/saw-regulator.c
index 6762648..0a81a33 100644
--- a/arch/arm/mach-msm/saw-regulator.c
+++ b/arch/arm/mach-msm/saw-regulator.c
@@ -54,11 +54,17 @@
 	struct regulator_dev		*rdev;
 	char				*name;
 	int				uV;
+	int				last_set_uV;
+	unsigned			vlevel;
+	bool				online;
 };
 
 /* Minimum core operating voltage */
 #define MIN_CORE_VOLTAGE		950000
 
+/* Specifies an uninitialized voltage */
+#define INVALID_VOLTAGE			-1
+
 /* Specifies the PMIC internal slew rate in uV/us. */
 #define REGULATOR_SLEW_RATE		1250
 
@@ -69,12 +75,32 @@
 	return vreg->uV;
 }
 
+static int _set_voltage(struct regulator_dev *rdev)
+{
+	struct saw_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = msm_spm_set_vdd(rdev_get_id(rdev), vreg->vlevel);
+	if (!rc) {
+		if (vreg->uV > vreg->last_set_uV) {
+			/* Wait for voltage to stabalize. */
+			udelay((vreg->uV - vreg->last_set_uV) /
+						REGULATOR_SLEW_RATE);
+		}
+		vreg->last_set_uV = vreg->uV;
+	} else {
+		pr_err("%s: msm_spm_set_vdd failed %d\n", vreg->name, rc);
+		vreg->uV = vreg->last_set_uV;
+	}
+
+	return rc;
+}
+
 static int saw_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
 			   unsigned *selector)
 {
 	struct saw_vreg *vreg = rdev_get_drvdata(rdev);
 	int uV = min_uV;
-	int rc;
 	u8 vprog, band;
 
 	if (uV < FTSMPS_BAND1_UV_MIN && max_uV >= FTSMPS_BAND1_UV_MIN)
@@ -119,23 +145,51 @@
 		return -EINVAL;
 	}
 
-	rc = msm_spm_set_vdd(rdev_get_id(rdev), band | vprog);
-	if (!rc) {
-		if (uV > vreg->uV) {
-			/* Wait for voltage to stabalize. */
-			udelay((uV - vreg->uV) / REGULATOR_SLEW_RATE);
-		}
-		vreg->uV = uV;
-	} else {
-		pr_err("%s: msm_spm_set_vdd failed %d\n", vreg->name, rc);
-	}
+	vreg->vlevel = band | vprog;
+	vreg->uV = uV;
+
+	if (!vreg->online)
+		return 0;
+
+	return _set_voltage(rdev);
+}
+
+static int saw_enable(struct regulator_dev *rdev)
+{
+	struct saw_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	if (vreg->uV != vreg->last_set_uV)
+		rc = _set_voltage(rdev);
+
+	if (!rc)
+		vreg->online = true;
 
 	return rc;
 }
 
+static int saw_disable(struct regulator_dev *rdev)
+{
+	struct saw_vreg *vreg = rdev_get_drvdata(rdev);
+
+	vreg->online = false;
+
+	return 0;
+}
+
+static int saw_is_enabled(struct regulator_dev *rdev)
+{
+	struct saw_vreg *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->online;
+}
+
 static struct regulator_ops saw_ops = {
 	.get_voltage = saw_get_voltage,
 	.set_voltage = saw_set_voltage,
+	.enable	     = saw_enable,
+	.disable     = saw_disable,
+	.is_enabled  = saw_is_enabled,
 };
 
 static int __devinit saw_probe(struct platform_device *pdev)
@@ -168,12 +222,13 @@
 		goto free_vreg;
 	}
 
-	vreg->desc.name  = vreg->name;
-	vreg->desc.id    = pdev->id;
-	vreg->desc.ops   = &saw_ops;
-	vreg->desc.type  = REGULATOR_VOLTAGE;
-	vreg->desc.owner = THIS_MODULE;
-	vreg->uV	 = MIN_CORE_VOLTAGE;
+	vreg->desc.name	  = vreg->name;
+	vreg->desc.id	  = pdev->id;
+	vreg->desc.ops	  = &saw_ops;
+	vreg->desc.type	  = REGULATOR_VOLTAGE;
+	vreg->desc.owner  = THIS_MODULE;
+	vreg->uV	  = INVALID_VOLTAGE;
+	vreg->last_set_uV = MIN_CORE_VOLTAGE;
 
 	vreg->rdev = regulator_register(&vreg->desc, &pdev->dev,
 							init_data, vreg, NULL);
@@ -233,5 +288,4 @@
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("SAW regulator driver");
-MODULE_VERSION("1.0");
 MODULE_ALIAS("platform:saw-regulator");
diff --git a/arch/arm/mach-msm/scm.c b/arch/arm/mach-msm/scm.c
index 6013efc..f4dae89 100644
--- a/arch/arm/mach-msm/scm.c
+++ b/arch/arm/mach-msm/scm.c
@@ -204,10 +204,13 @@
 	return ret;
 }
 
-static u32 cacheline_size;
-
 static void scm_inv_range(unsigned long start, unsigned long end)
 {
+	u32 cacheline_size, ctr;
+
+	asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
+	cacheline_size = 4 << ((ctr >> 16) & 0xf);
+
 	start = round_down(start, cacheline_size);
 	end = round_up(end, cacheline_size);
 	outer_inv_range(start, end);
@@ -444,13 +447,3 @@
 }
 EXPORT_SYMBOL(scm_get_feat_version);
 
-static int scm_init(void)
-{
-	u32 ctr;
-
-	asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
-	cacheline_size =  4 << ((ctr >> 16) & 0xf);
-
-	return 0;
-}
-early_initcall(scm_init);
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
index b1dd1db..7427899 100644
--- a/arch/arm/mach-msm/smd.c
+++ b/arch/arm/mach-msm/smd.c
@@ -36,6 +36,8 @@
 #include <linux/notifier.h>
 #include <linux/sort.h>
 #include <linux/suspend.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
 #include <mach/msm_smd.h>
 #include <mach/msm_iomap.h>
 #include <mach/system.h>
@@ -3502,6 +3504,295 @@
 	return err_ret;
 }
 
+static int __devinit parse_smd_devicetree(struct device_node *node,
+						void *irq_out_base)
+{
+	uint32_t edge;
+	char *key;
+	int ret;
+	uint32_t irq_offset;
+	uint32_t irq_bitmask;
+	uint32_t irq_line;
+	unsigned long irq_flags = IRQF_TRIGGER_RISING;
+	const char *pilstr;
+	struct interrupt_config_item *private_irq;
+
+	key = "qcom,smd-edge";
+	ret = of_property_read_u32(node, key, &edge);
+	if (ret)
+		goto missing_key;
+	SMD_DBG("%s: %s = %d", __func__, key, edge);
+
+	key = "qcom,smd-irq-offset";
+	ret = of_property_read_u32(node, key, &irq_offset);
+	if (ret)
+		goto missing_key;
+	SMD_DBG("%s: %s = %x", __func__, key, irq_offset);
+
+	key = "qcom,smd-irq-bitmask";
+	ret = of_property_read_u32(node, key, &irq_bitmask);
+	if (ret)
+		goto missing_key;
+	SMD_DBG("%s: %s = %x", __func__, key, irq_bitmask);
+
+	key = "interrupts";
+	irq_line = irq_of_parse_and_map(node, 0);
+	if (!irq_line)
+		goto missing_key;
+	SMD_DBG("%s: %s = %d", __func__, key, irq_line);
+
+	key = "qcom,pil-string";
+	pilstr = of_get_property(node, key, NULL);
+	if (pilstr)
+		SMD_DBG("%s: %s = %s", __func__, key, pilstr);
+
+	key = "qcom,irq-no-suspend";
+	ret = of_property_read_bool(node, key);
+	if (ret)
+		irq_flags |= IRQF_NO_SUSPEND;
+
+	private_irq = &private_intr_config[edge_to_pids[edge].remote_pid].smd;
+	private_irq->out_bit_pos = irq_bitmask;
+	private_irq->out_offset = irq_offset;
+	private_irq->out_base = irq_out_base;
+	private_irq->irq_id = irq_line;
+
+	ret = request_irq(irq_line,
+				private_irq->irq_handler,
+				irq_flags,
+				"smd_dev",
+				NULL);
+	if (ret < 0) {
+		pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
+		return ret;
+	} else {
+		ret = enable_irq_wake(irq_line);
+		if (ret < 0)
+			pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+					irq_line);
+	}
+
+	if (pilstr)
+		strlcpy(edge_to_pids[edge].subsys_name, pilstr,
+						SMD_MAX_CH_NAME_LEN);
+
+	return 0;
+
+missing_key:
+	pr_err("%s: missing key: %s", __func__, key);
+	return -ENODEV;
+}
+
+static int __devinit parse_smsm_devicetree(struct device_node *node,
+						void *irq_out_base)
+{
+	uint32_t edge;
+	char *key;
+	int ret;
+	uint32_t irq_offset;
+	uint32_t irq_bitmask;
+	uint32_t irq_line;
+	struct interrupt_config_item *private_irq;
+
+	key = "qcom,smsm-edge";
+	ret = of_property_read_u32(node, key, &edge);
+	if (ret)
+		goto missing_key;
+	SMD_DBG("%s: %s = %d", __func__, key, edge);
+
+	key = "qcom,smsm-irq-offset";
+	ret = of_property_read_u32(node, key, &irq_offset);
+	if (ret)
+		goto missing_key;
+	SMD_DBG("%s: %s = %x", __func__, key, irq_offset);
+
+	key = "qcom,smsm-irq-bitmask";
+	ret = of_property_read_u32(node, key, &irq_bitmask);
+	if (ret)
+		goto missing_key;
+	SMD_DBG("%s: %s = %x", __func__, key, irq_bitmask);
+
+	key = "interrupts";
+	irq_line = irq_of_parse_and_map(node, 0);
+	if (!irq_line)
+		goto missing_key;
+	SMD_DBG("%s: %s = %d", __func__, key, irq_line);
+
+	private_irq = &private_intr_config[edge_to_pids[edge].remote_pid].smsm;
+	private_irq->out_bit_pos = irq_bitmask;
+	private_irq->out_offset = irq_offset;
+	private_irq->out_base = irq_out_base;
+	private_irq->irq_id = irq_line;
+
+	ret = request_irq(irq_line,
+				private_irq->irq_handler,
+				IRQF_TRIGGER_RISING,
+				"smsm_dev",
+				NULL);
+	if (ret < 0) {
+		pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
+		return ret;
+	} else {
+		ret = enable_irq_wake(irq_line);
+		if (ret < 0)
+			pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+					irq_line);
+	}
+
+	return 0;
+
+missing_key:
+	pr_err("%s: missing key: %s", __func__, key);
+	return -ENODEV;
+}
+
+static void __devinit unparse_smd_devicetree(struct device_node *node)
+{
+	uint32_t irq_line;
+
+	irq_line = irq_of_parse_and_map(node, 0);
+
+	free_irq(irq_line, NULL);
+}
+
+static void __devinit unparse_smsm_devicetree(struct device_node *node)
+{
+	uint32_t irq_line;
+
+	irq_line = irq_of_parse_and_map(node, 0);
+
+	free_irq(irq_line, NULL);
+}
+
+static int __devinit smd_core_devicetree_init(struct platform_device *pdev)
+{
+	char *key;
+	struct resource *r;
+	void *irq_out_base;
+	void *aux_mem_base;
+	uint32_t aux_mem_size;
+	int temp_string_size = 11; /* max 3 digit count */
+	char temp_string[temp_string_size];
+	int count;
+	struct device_node *node;
+	int ret;
+	const char *compatible;
+	int subnode_num = 0;
+
+	disable_smsm_reset_handshake = 1;
+
+	key = "irq-reg-base";
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!r) {
+		pr_err("%s: missing '%s'\n", __func__, key);
+		return -ENODEV;
+	}
+	irq_out_base = (void *)(r->start);
+	SMD_DBG("%s: %s = %p", __func__, key, irq_out_base);
+
+	count = 1;
+	while (1) {
+		scnprintf(temp_string, temp_string_size, "aux-mem%d", count);
+		r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								temp_string);
+		if (!r)
+			break;
+
+		++num_smem_areas;
+		++count;
+		if (count > 999) {
+			pr_err("%s: max num aux mem regions reached\n",
+								__func__);
+			break;
+		}
+	}
+
+	if (num_smem_areas) {
+		smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
+					GFP_KERNEL);
+		if (!smem_areas) {
+			pr_err("%s: smem areas kmalloc failed\n", __func__);
+			num_smem_areas = 0;
+			return -ENOMEM;
+		}
+		count = 1;
+		while (1) {
+			scnprintf(temp_string, temp_string_size, "aux-mem%d",
+									count);
+			r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								temp_string);
+			if (!r)
+				break;
+			aux_mem_base = (void *)(r->start);
+			aux_mem_size = (uint32_t)(resource_size(r));
+			SMD_DBG("%s: %s = %p %x", __func__, temp_string,
+					aux_mem_base, aux_mem_size);
+			smem_areas[count - 1].phys_addr = aux_mem_base;
+			smem_areas[count - 1].size = aux_mem_size;
+			smem_areas[count - 1].virt_addr = ioremap_nocache(
+				(unsigned long)(smem_areas[count-1].phys_addr),
+				smem_areas[count - 1].size);
+			if (!smem_areas[count - 1].virt_addr) {
+				pr_err("%s: ioremap_nocache() of addr:%p size: %x\n",
+					__func__,
+					smem_areas[count - 1].phys_addr,
+					smem_areas[count - 1].size);
+				ret = -ENOMEM;
+				goto free_smem_areas;
+			}
+
+			++count;
+			if (count > 999) {
+				pr_err("%s: max num aux mem regions reached\n",
+								__func__);
+				break;
+			}
+		}
+		sort(smem_areas, num_smem_areas,
+				sizeof(struct smem_area),
+				sort_cmp_func, NULL);
+	}
+
+	for_each_child_of_node(pdev->dev.of_node, node) {
+		compatible = of_get_property(node, "compatible", NULL);
+		if (!strcmp(compatible, "qcom,smd")) {
+			ret = parse_smd_devicetree(node, irq_out_base);
+			if (ret)
+				goto rollback_subnodes;
+		} else if (!strcmp(compatible, "qcom,smsm")) {
+			ret = parse_smsm_devicetree(node, irq_out_base);
+			if (ret)
+				goto rollback_subnodes;
+		} else {
+			pr_err("%s: invalid child node named: %s\n", __func__,
+							compatible);
+			ret = -ENODEV;
+			goto rollback_subnodes;
+		}
+		++subnode_num;
+	}
+
+	return 0;
+
+rollback_subnodes:
+	count = 0;
+	for_each_child_of_node(pdev->dev.of_node, node) {
+		if (count >= subnode_num)
+			break;
+		++count;
+		compatible = of_get_property(node, "compatible", NULL);
+		if (!strcmp(compatible, "qcom,smd"))
+			unparse_smd_devicetree(node);
+		else
+			unparse_smsm_devicetree(node);
+	}
+free_smem_areas:
+	num_smem_areas = 0;
+	kfree(smem_areas);
+	smem_areas = NULL;
+	return ret;
+}
+
 static int __devinit msm_smd_probe(struct platform_device *pdev)
 {
 	int ret;
@@ -3522,8 +3813,12 @@
 
 	if (pdev) {
 		if (pdev->dev.of_node) {
-			pr_err("SMD: Device tree not currently supported\n");
-			return -ENODEV;
+			ret = smd_core_devicetree_init(pdev);
+			if (ret) {
+				pr_err("%s: device tree init failed\n",
+								__func__);
+				return ret;
+			}
 		} else if (pdev->dev.platform_data) {
 			ret = smd_core_platform_init(pdev);
 			if (ret) {
@@ -3600,11 +3895,17 @@
 }
 late_initcall(modem_restart_late_init);
 
+static struct of_device_id msm_smem_match_table[] = {
+	{ .compatible = "qcom,smem" },
+	{},
+};
+
 static struct platform_driver msm_smd_driver = {
 	.probe = msm_smd_probe,
 	.driver = {
 		.name = MODULE_NAME,
 		.owner = THIS_MODULE,
+		.of_match_table = msm_smem_match_table,
 	},
 };
 
diff --git a/arch/arm/mach-msm/smd_rpcrouter.c b/arch/arm/mach-msm/smd_rpcrouter.c
index 1bea82a..ff68d81 100644
--- a/arch/arm/mach-msm/smd_rpcrouter.c
+++ b/arch/arm/mach-msm/smd_rpcrouter.c
@@ -545,12 +545,13 @@
 			D("%s: registering device %x\n",
 			  __func__, board_info->dev->prog);
 			list_del(&board_info->list);
+			spin_unlock_irqrestore(&rpc_board_dev_list_lock, flags);
 			rc = platform_device_register(&board_info->dev->pdev);
 			if (rc)
 				pr_err("%s: board dev register failed %d\n",
 				       __func__, rc);
 			kfree(board_info);
-			break;
+			return;
 		}
 	}
 	spin_unlock_irqrestore(&rpc_board_dev_list_lock, flags);
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 6cb9339..2743547 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -303,7 +303,13 @@
 	[154] = MSM_CPU_8930AB,
 	[155] = MSM_CPU_8930AB,
 	[156] = MSM_CPU_8930AB,
-	[157] = MSM_CPU_8930AB
+	[157] = MSM_CPU_8930AB,
+
+	/* 8625Q IDs */
+	[168] = MSM_CPU_8625Q,
+	[169] = MSM_CPU_8625Q,
+	[170] = MSM_CPU_8625Q,
+
 
 	/* Uninitialized IDs are not known to run Linux.
 	   MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index b61604a..3af066d 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -23,12 +23,14 @@
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/percpu.h>
+#include <linux/mm.h>
 
 #include <asm/localtimer.h>
 #include <asm/mach/time.h>
 #include <asm/hardware/gic.h>
 #include <asm/sched_clock.h>
 #include <asm/smp_plat.h>
+#include <asm/user_accessible_timer.h>
 #include <mach/msm_iomap.h>
 #include <mach/irqs.h>
 #include <mach/socinfo.h>
@@ -1161,6 +1163,16 @@
 	}
 	msm_sched_clock_init();
 
+	if (use_user_accessible_timers()) {
+		if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_apq8064()) {
+			struct msm_clock *gtclock = &msm_clocks[MSM_CLOCK_GPT];
+			void __iomem *addr = gtclock->regbase +
+				TIMER_COUNT_VAL + global_timer_offset;
+			setup_user_timer_offset(virt_to_phys(addr)&0xfff);
+			set_user_accessible_timer_flag(true);
+		}
+	}
+
 #ifdef ARCH_HAS_READ_CURRENT_TIMER
 	if (is_smp()) {
 		__raw_writel(1,
diff --git a/arch/arm/mach-msm/timer_page.c b/arch/arm/mach-msm/timer_page.c
new file mode 100644
index 0000000..24d2a35
--- /dev/null
+++ b/arch/arm/mach-msm/timer_page.c
@@ -0,0 +1,36 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mm.h>
+#include <linux/export.h>
+#include <asm/user_accessible_timer.h>
+#include "mach/socinfo.h"
+#include "mach/msm_iomap.h"
+
+#include "timer.h"
+
+inline int get_timer_page_address(void)
+{
+	if (!use_user_accessible_timers())
+		return ARM_USER_ACCESSIBLE_TIMERS_INVALID_PAGE;
+
+	if (cpu_is_msm8960())
+		return MSM8960_TMR0_PHYS;
+	else if (cpu_is_msm8930())
+		return MSM8930_TMR0_PHYS;
+	else if (cpu_is_apq8064())
+		return APQ8064_TMR0_PHYS;
+	else
+		return ARM_USER_ACCESSIBLE_TIMERS_INVALID_PAGE;
+}
+EXPORT_SYMBOL(get_timer_page_address);
+
diff --git a/arch/arm/mach-msm/trace_rpm_smd.h b/arch/arm/mach-msm/trace_rpm_smd.h
new file mode 100644
index 0000000..eff4860
--- /dev/null
+++ b/arch/arm/mach-msm/trace_rpm_smd.h
@@ -0,0 +1,79 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpm_smd
+
+#if !defined(_TRACE_RPM_SMD_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RPM_SMD_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(rpm_ack_recd,
+
+	TP_PROTO(unsigned int irq, unsigned int msg_id),
+
+	TP_ARGS(irq, msg_id),
+
+	TP_STRUCT__entry(
+		__field(int, irq)
+		__field(int, msg_id)
+	),
+
+	TP_fast_assign(
+		__entry->irq = irq;
+		__entry->msg_id = msg_id;
+	),
+
+	TP_printk("ctx:%s id:%d",
+		__entry->irq ? "noslp" : "sleep",
+		__entry->msg_id)
+);
+
+TRACE_EVENT(rpm_send_message,
+
+	TP_PROTO(unsigned int irq, unsigned int set, unsigned int rsc_type,
+		unsigned int rsc_id, unsigned int msg_id),
+
+	TP_ARGS(irq, set, rsc_type, rsc_id, msg_id),
+
+	TP_STRUCT__entry(
+		__field(u32, irq)
+		__field(u32, set)
+		__field(u32, rsc_type)
+		__field(u32, rsc_id)
+		__field(u32, msg_id)
+		__array(char, name, 5)
+	),
+
+	TP_fast_assign(
+		__entry->irq	= irq;
+		__entry->name[4] = 0;
+		__entry->set = set;
+		__entry->rsc_type = rsc_type;
+		__entry->rsc_id = rsc_id;
+		__entry->msg_id = msg_id;
+		memcpy(__entry->name, &rsc_type, sizeof(uint32_t));
+
+	),
+
+	TP_printk("ctx:%s set:%s rsc_type:0x%08x(%s), rsc_id:0x%08x, id:%d",
+			__entry->irq ? "noslp" : "sleep",
+			__entry->set ? "slp" : "act",
+			__entry->rsc_type, __entry->name,
+			__entry->rsc_id, __entry->msg_id)
+);
+#endif
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_rpm_smd
+#include <trace/define_trace.h>
diff --git a/arch/arm/mm/cache-pl310-erp.c b/arch/arm/mm/cache-pl310-erp.c
index ad75143..191060f 100644
--- a/arch/arm/mm/cache-pl310-erp.c
+++ b/arch/arm/mm/cache-pl310-erp.c
@@ -34,6 +34,7 @@
 	unsigned int slverr;
 	unsigned int decerr;
 	void __iomem *base;
+	unsigned int intr_mask_reg;
 };
 
 #define ECNTR	BIT(0)
@@ -128,19 +129,20 @@
 
 static void pl310_mask_int(struct pl310_drv_data *p, bool enable)
 {
-	uint16_t mask;
-
+	/* L2CC register contents needs to be saved
+	 * as it's power rail will be removed during suspend
+	 */
 	if (enable)
-		mask = 0x1FF;
+		p->intr_mask_reg = 0x1FF;
 	else
-		mask = 0x0;
+		p->intr_mask_reg = 0x0;
 
-	writel_relaxed(mask, p->base + L2X0_INTR_MASK);
+	writel_relaxed(p->intr_mask_reg, p->base + L2X0_INTR_MASK);
 
 	/* Make sure Mask is updated */
 	mb();
 
-	pr_debug("Mask interrupt %x\n",
+	pr_debug("Mask interrupt 0%x\n",
 			readl_relaxed(p->base + L2X0_INTR_MASK));
 }
 
@@ -258,12 +260,42 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM
+static int pl310_suspend(struct device *dev)
+{
+	struct pl310_drv_data *p = dev_get_drvdata(dev);
+
+	disable_irq(p->irq);
+
+	return 0;
+}
+
+static int pl310_resume_early(struct device *dev)
+{
+	struct pl310_drv_data *p = dev_get_drvdata(dev);
+
+	pl310_mask_int(p, true);
+
+	enable_irq(p->irq);
+
+	return 0;
+}
+
+static const struct dev_pm_ops pl310_cache_pm_ops = {
+	.suspend = pl310_suspend,
+	.resume_early = pl310_resume_early,
+};
+#endif
+
 static struct platform_driver pl310_cache_erp_driver = {
 	.probe = pl310_cache_erp_probe,
 	.remove = __devexit_p(pl310_cache_erp_remove),
 	.driver = {
 		.name = MODULE_NAME,
 		.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm = &pl310_cache_pm_ops,
+#endif
 	},
 };
 
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 57f41ca..0ebc2b9 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -375,29 +375,61 @@
 	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
 }
 
+unsigned long memory_hole_offset;
+EXPORT_SYMBOL(memory_hole_offset);
+unsigned long memory_hole_start;
+EXPORT_SYMBOL(memory_hole_start);
+unsigned long memory_hole_end;
+EXPORT_SYMBOL(memory_hole_end);
+
 #ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
-unsigned long membank0_size;
-EXPORT_SYMBOL(membank0_size);
-unsigned long membank1_start;
-EXPORT_SYMBOL(membank1_start);
-
-void __init find_membank0_hole(void)
+void find_memory_hole(void)
 {
-	sort(&meminfo.bank, meminfo.nr_banks,
-		sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+	int i;
+	unsigned long hole_start;
+	unsigned long hole_size;
 
-	membank0_size = meminfo.bank[0].size;
-	membank1_start = meminfo.bank[1].start;
+	/*
+	 * Find the start and end of the hole, using meminfo
+	 * if it hasnt been found already.
+	 */
+	if (memory_hole_start == 0 && memory_hole_end == 0) {
+		for (i = 0; i < (meminfo.nr_banks - 1); i++) {
+			if ((meminfo.bank[i].start + meminfo.bank[i].size) !=
+						meminfo.bank[i+1].start) {
+				if (meminfo.bank[i].start + meminfo.bank[i].size
+							<= MAX_HOLE_ADDRESS) {
+
+					hole_start = meminfo.bank[i].start +
+							meminfo.bank[i].size;
+					hole_size = meminfo.bank[i+1].start -
+								hole_start;
+
+					if (memory_hole_start == 0 &&
+							memory_hole_end == 0) {
+						memory_hole_start = hole_start;
+						memory_hole_end = hole_start +
+								hole_size;
+					} else if ((memory_hole_end -
+					memory_hole_start) <= hole_size) {
+						memory_hole_start = hole_start;
+						memory_hole_end = hole_start +
+								hole_size;
+					}
+				}
+			}
+		}
+	}
+	memory_hole_offset = memory_hole_start - PHYS_OFFSET;
 }
+
 #endif
 
 void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
 {
 	int i;
 
-#ifndef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
 	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
-#endif
 
 	for (i = 0; i < mi->nr_banks; i++)
 		memblock_add(mi->bank[i].start, mi->bank[i].size);
@@ -771,6 +803,9 @@
 
 	printk(KERN_NOTICE "Virtual kernel memory layout:\n"
 			"    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+#ifdef CONFIG_ARM_USE_USER_ACCESSIBLE_TIMERS
+			"    timers  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+#endif
 #ifdef CONFIG_HAVE_TCM
 			"    DTCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 			"    ITCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
@@ -791,6 +826,11 @@
 
 			MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
 				(PAGE_SIZE)),
+#ifdef CONFIG_ARM_USE_USER_ACCESSIBLE_TIMERS
+			MLK(UL(CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE),
+				UL(CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE)
+					+ (PAGE_SIZE)),
+#endif
 #ifdef CONFIG_HAVE_TCM
 			MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
 			MLK(ITCM_OFFSET, (unsigned long) itcm_end),
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index bae23b0..8575f78 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -33,6 +33,8 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 
+#include <asm/user_accessible_timer.h>
+
 #include "mm.h"
 
 /*
@@ -309,6 +311,13 @@
 		.prot_l1   = PMD_TYPE_TABLE,
 		.domain    = DOMAIN_KERNEL,
 	},
+	[MT_DEVICE_USER_ACCESSIBLE] = {
+		.prot_pte  = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
+				L_PTE_SHARED | L_PTE_USER | L_PTE_RDONLY,
+		.prot_l1   = PMD_TYPE_TABLE,
+		.prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
+		.domain    = DOMAIN_IO,
+	},
 };
 
 const struct mem_type *get_mem_type(unsigned int type)
@@ -764,7 +773,9 @@
 	const struct mem_type *type;
 	pgd_t *pgd;
 
-	if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
+	if ((md->virtual != vectors_base() &&
+		md->virtual != get_user_accessible_timers_base()) &&
+			md->virtual < TASK_SIZE) {
 		printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
 		       " at 0x%08lx in user region\n",
 		       (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
@@ -950,7 +961,7 @@
 	int i, j, highmem = 0;
 
 #ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
-	find_membank0_hole();
+	find_memory_hole();
 #endif
 
 #if (defined CONFIG_HIGHMEM) && (defined CONFIG_FIX_MOVABLE_ZONE)
@@ -1203,6 +1214,20 @@
 		mdesc->map_io();
 	fill_pmd_gaps();
 
+	if (use_user_accessible_timers()) {
+		/*
+		 * Generate a mapping for the timer page.
+		 */
+		int page_addr = get_timer_page_address();
+		if (page_addr != ARM_USER_ACCESSIBLE_TIMERS_INVALID_PAGE) {
+			map.pfn = __phys_to_pfn(page_addr);
+			map.virtual = CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE;
+			map.length = PAGE_SIZE;
+			map.type = MT_DEVICE_USER_ACCESSIBLE;
+			create_mapping(&map, false);
+		}
+	}
+
 	/*
 	 * Finally flush the caches and tlb to ensure that we're in a
 	 * consistent state wrt the writebuffer.  This also ensures that
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 5fd98ea..5751d28 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -15,7 +15,7 @@
 config IOSCHED_TEST
 	tristate "Test I/O scheduler"
 	depends on DEBUG_FS
-	default m
+	default y
 	---help---
 	  The test I/O scheduler is a duplicate of the noop scheduler with
 	  addition of test utlity.
diff --git a/block/test-iosched.c b/block/test-iosched.c
index 52070ac..71e8669 100644
--- a/block/test-iosched.c
+++ b/block/test-iosched.c
@@ -663,7 +663,7 @@
 			test_name = ptd->test_info.get_test_case_str_fn(ptd);
 		else
 			test_name = "Unknown testcase";
-		test_pr_info("%s: Starting test %s\n", __func__, test_name);
+		test_pr_info("%s: Starting test %s", __func__, test_name);
 
 		ret = prepare_test(ptd);
 		if (ret) {
diff --git a/drivers/char/diag/Kconfig b/drivers/char/diag/Kconfig
index 8f8707f..91fcdfc 100644
--- a/drivers/char/diag/Kconfig
+++ b/drivers/char/diag/Kconfig
@@ -30,9 +30,9 @@
 	 SDIO Transport Layer for DIAG Router
 endmenu
 
-menu "HSIC support for DIAG"
+menu "HSIC/SMUX support for DIAG"
 
-config DIAG_BRIDGE_CODE
+config DIAGFWD_BRIDGE_CODE
 	depends on USB_QCOM_DIAG_BRIDGE
 	default y
 	bool "Enable QSC/9K DIAG traffic over SMUX/HSIC"
diff --git a/drivers/char/diag/Makefile b/drivers/char/diag/Makefile
index 6ecc970..c9204ea 100644
--- a/drivers/char/diag/Makefile
+++ b/drivers/char/diag/Makefile
@@ -1,5 +1,6 @@
 obj-$(CONFIG_DIAG_CHAR) := diagchar.o
 obj-$(CONFIG_DIAG_SDIO_PIPE) += diagfwd_sdio.o
-obj-$(CONFIG_DIAG_BRIDGE_CODE) += diagfwd_hsic.o
-obj-$(CONFIG_DIAG_BRIDGE_CODE) += diagfwd_smux.o
+obj-$(CONFIG_DIAGFWD_BRIDGE_CODE) += diagfwd_bridge.o
+obj-$(CONFIG_DIAGFWD_BRIDGE_CODE) += diagfwd_hsic.o
+obj-$(CONFIG_DIAGFWD_BRIDGE_CODE) += diagfwd_smux.o
 diagchar-objs := diagchar_core.o diagchar_hdlc.o diagfwd.o diagmem.o diagfwd_cntl.o diag_dci.o diag_masks.o diag_debugfs.o
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 5cd5ce9..e78a2aa 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -134,33 +134,37 @@
 		pr_alert("diag: No matching PID for DCI data\n");
 	/* Using PID of client process, find client buffer */
 	for (i = 0; i < MAX_DCI_CLIENTS; i++) {
-		if (curr_client_pid == driver->dci_client_tbl[i].client->tgid) {
-			/* copy pkt rsp in client buf */
-			entry = &(driver->dci_client_tbl[i]);
-			if (DCI_CHK_CAPACITY(entry, 8+write_len)) {
-				pr_alert("diag: create capacity for pkt rsp\n");
-				entry->total_capacity += 8+write_len;
-				temp_buf = krealloc(entry->dci_data,
-					 entry->total_capacity, GFP_KERNEL);
-				if (!temp_buf) {
-					pr_err("diag: DCI realloc failed\n");
-					break;
-				} else {
-					entry->dci_data = temp_buf;
+		if (driver->dci_client_tbl[i].client != NULL) {
+			if (curr_client_pid ==
+				driver->dci_client_tbl[i].client->tgid) {
+				/* copy pkt rsp in client buf */
+				entry = &(driver->dci_client_tbl[i]);
+				if (DCI_CHK_CAPACITY(entry, 8+write_len)) {
+					pr_alert("diag: create capacity for pkt rsp\n");
+					entry->total_capacity += 8+write_len;
+					temp_buf = krealloc(entry->dci_data,
+					entry->total_capacity, GFP_KERNEL);
+					if (!temp_buf) {
+						pr_err("diag: DCI realloc failed\n");
+						break;
+					} else {
+						entry->dci_data = temp_buf;
+					}
 				}
-			}
-			*(int *)(entry->dci_data+entry->data_len) =
+				*(int *)(entry->dci_data+entry->data_len) =
 							DCI_PKT_RSP_TYPE;
-			entry->data_len += 4;
-			*(int *)(entry->dci_data+entry->data_len) = write_len;
-			entry->data_len += 4;
-			memcpy(entry->dci_data+entry->data_len,
-				 buf+4+cmd_code_len, write_len);
-			entry->data_len += write_len;
-			/* delete immediate response entry */
-			if (driver->buf_in_dci[8+cmd_code_len] != 0x80)
-				driver->req_tracking_tbl[index].pid = 0;
-			break;
+				entry->data_len += 4;
+				*(int *)(entry->dci_data+entry->data_len)
+								= write_len;
+				entry->data_len += 4;
+				memcpy(entry->dci_data+entry->data_len,
+					buf+4+cmd_code_len, write_len);
+				entry->data_len += write_len;
+				/* delete immediate response entry */
+				if (driver->buf_in_dci[8+cmd_code_len] != 0x80)
+					driver->req_tracking_tbl[index].pid = 0;
+				break;
+			}
 		}
 	}
 }
@@ -408,6 +412,7 @@
 	int count, set_mask, num_codes, byte_index, bit_index, event_id;
 	uint8_t equip_id, *log_mask_ptr, *head_log_mask_ptr, byte_mask;
 	uint8_t *event_mask_ptr;
+	int offset = 0;
 
 	/* This is Pkt request/response transaction */
 	if (*(int *)temp > 0) {
@@ -463,10 +468,12 @@
 	} else if (*(int *)temp == DCI_LOG_TYPE) {
 		/* find client id and table */
 		for (i = 0; i < MAX_DCI_CLIENTS; i++) {
-			if (driver->dci_client_tbl[i].client->tgid ==
-							 current->tgid) {
-				found = 1;
-				break;
+			if (driver->dci_client_tbl[i].client != NULL) {
+				if (driver->dci_client_tbl[i].client->tgid ==
+							current->tgid) {
+					found = 1;
+					break;
+				}
 			}
 		}
 		if (!found) {
@@ -495,6 +502,7 @@
 			 */
 			log_mask_ptr = head_log_mask_ptr;
 			found = 0;
+			offset = 0;
 			while (log_mask_ptr) {
 				if (*log_mask_ptr == equip_id) {
 					found = 1;
@@ -505,6 +513,7 @@
 					pr_debug("diag: did not find equip id = %x at %p\n",
 						 equip_id, log_mask_ptr);
 					log_mask_ptr += 514;
+					offset += 514;
 				}
 			}
 			if (!found) {
@@ -517,21 +526,25 @@
 				*log_mask_ptr |= byte_mask;
 			else
 				*log_mask_ptr &= ~byte_mask;
+			/* add to cumulative mask */
+			update_dci_cumulative_log_mask(
+				offset, byte_index,
+				byte_mask);
 			temp += 2;
 			count++;
 			ret = DIAG_DCI_NO_ERROR;
 		}
-		/* add to cumulative mask */
-		update_dci_cumulative_log_mask(i);
 		/* send updated mask to peripherals */
 		diag_send_dci_log_mask(driver->ch_cntl);
 	} else if (*(int *)temp == DCI_EVENT_TYPE) {
 		/* find client id and table */
 		for (i = 0; i < MAX_DCI_CLIENTS; i++) {
-			if (driver->dci_client_tbl[i].client->tgid ==
-							 current->tgid) {
-				found = 1;
-				break;
+			if (driver->dci_client_tbl[i].client != NULL) {
+				if (driver->dci_client_tbl[i].client->tgid ==
+							current->tgid) {
+					found = 1;
+					break;
+				}
 			}
 		}
 		if (!found) {
@@ -561,12 +574,12 @@
 				*(event_mask_ptr + byte_index) |= byte_mask;
 			else
 				*(event_mask_ptr + byte_index) &= ~byte_mask;
+			/* add to cumulative mask */
+			update_dci_cumulative_event_mask(byte_index, byte_mask);
 			temp += sizeof(int);
 			count++;
 			ret = DIAG_DCI_NO_ERROR;
 		}
-		/* add to cumulative mask */
-		update_dci_cumulative_event_mask(i);
 		/* send updated mask to peripherals */
 		diag_send_dci_event_mask(driver->ch_cntl);
 	} else {
@@ -575,16 +588,29 @@
 	return ret;
 }
 
-void update_dci_cumulative_event_mask(int client_index)
+void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask)
 {
 	int i;
-	uint8_t *update_ptr = dci_cumulative_event_mask;
 	uint8_t *event_mask_ptr;
+	uint8_t *update_ptr = dci_cumulative_event_mask;
+	bool is_set = false;
 
 	mutex_lock(&dci_event_mask_mutex);
-	event_mask_ptr = driver->dci_client_tbl[client_index].dci_event_mask;
-	for (i = 0; i < DCI_EVENT_MASK_SIZE; i++)
-		*(update_ptr+i) |= *(event_mask_ptr+i);
+	update_ptr += offset;
+	for (i = 0; i < MAX_DCI_CLIENTS; i++) {
+		event_mask_ptr =
+			driver->dci_client_tbl[i].dci_event_mask;
+		event_mask_ptr += offset;
+		if ((*event_mask_ptr & byte_mask) == byte_mask) {
+			is_set = true;
+			/* break even if one client has the event mask set */
+			break;
+		}
+	}
+	if (is_set == false)
+		*update_ptr &= ~byte_mask;
+	else
+		*update_ptr |= byte_mask;
 	mutex_unlock(&dci_event_mask_mutex);
 }
 
@@ -624,27 +650,39 @@
 	mutex_unlock(&driver->diag_cntl_mutex);
 }
 
-void update_dci_cumulative_log_mask(int client_index)
+void update_dci_cumulative_log_mask(int offset, int byte_index,
+						uint8_t byte_mask)
 {
-	int i, j;
+	int i;
 	uint8_t *update_ptr = dci_cumulative_log_mask;
-	uint8_t *log_mask_ptr =
-	driver->dci_client_tbl[client_index].dci_log_mask;
+	uint8_t *log_mask_ptr;
+	bool is_set = false;
 
 	mutex_lock(&dci_log_mask_mutex);
-	*update_ptr = 0; /* add first equip id */
-	/* skip the first equip id */
-	update_ptr++; log_mask_ptr++;
-	for (i = 0; i < 16; i++) {
-		for (j = 0; j < 513; j++) {
-			*update_ptr |= *log_mask_ptr;
-			update_ptr++;
-			log_mask_ptr++;
+	*update_ptr = 0;
+	/* set the equipment IDs */
+	for (i = 0; i < 16; i++)
+		*(update_ptr + (i*514)) = i;
+
+	update_ptr += offset;
+	/* update the dirty bit */
+	*(update_ptr+1) = 1;
+	update_ptr = update_ptr + byte_index;
+	for (i = 0; i < MAX_DCI_CLIENTS; i++) {
+		log_mask_ptr =
+			(driver->dci_client_tbl[i].dci_log_mask);
+		log_mask_ptr = log_mask_ptr + offset + byte_index;
+		if ((*log_mask_ptr & byte_mask) == byte_mask) {
+			is_set = true;
+			/* break even if one client has the log mask set */
+			break;
 		}
-		*update_ptr = i+1;
-		update_ptr++;
-		log_mask_ptr++;
 	}
+
+	if (is_set == false)
+		*update_ptr &= ~byte_mask;
+	else
+		*update_ptr |= byte_mask;
 	mutex_unlock(&dci_log_mask_mutex);
 }
 
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index afcabcc..435c750 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -85,11 +85,12 @@
 void extract_dci_pkt_rsp(unsigned char *buf);
 /* DCI Log streaming functions */
 void create_dci_log_mask_tbl(unsigned char *tbl_buf);
-void update_dci_cumulative_log_mask(int client_index);
+void update_dci_cumulative_log_mask(int offset, int byte_index,
+						uint8_t byte_mask);
 void diag_send_dci_log_mask(smd_channel_t *ch);
 void extract_dci_log(unsigned char *buf);
 /* DCI event streaming functions */
-void update_dci_cumulative_event_mask(int client_index);
+void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask);
 void diag_send_dci_event_mask(smd_channel_t *ch);
 void extract_dci_events(unsigned char *buf);
 void create_dci_event_mask_tbl(unsigned char *tbl_buf);
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index ed0f08e..7863f74 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -16,6 +16,7 @@
 #include <linux/debugfs.h>
 #include "diagchar.h"
 #include "diagfwd.h"
+#include "diagfwd_bridge.h"
 
 #define DEBUG_BUF_SIZE	4096
 static struct dentry *diag_dbgfs_dent;
@@ -195,8 +196,8 @@
 	return ret;
 }
 
-#ifdef CONFIG_DIAG_BRIDGE_CODE
-static ssize_t diag_dbgfs_read_hsic(struct file *file, char __user *ubuf,
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static ssize_t diag_dbgfs_read_bridge(struct file *file, char __user *ubuf,
 				    size_t count, loff_t *ppos)
 {
 	char *buf;
@@ -220,13 +221,17 @@
 		"count_hsic_write_pool: %d\n"
 		"diag_hsic_pool: %x\n"
 		"diag_hsic_write_pool: %x\n"
-		"write_len_mdm: %d\n"
+		"HSIC write_len: %d\n"
 		"num_hsic_buf_tbl_entries: %d\n"
-		"usb_mdm_connected: %d\n"
-		"diag_read_mdm_work: %d\n"
+		"HSIC usb_connected: %d\n"
+		"HSIC diag_read_work: %d\n"
 		"diag_read_hsic_work: %d\n"
 		"diag_disconnect_work: %d\n"
-		"diag_usb_read_complete_work: %d\n",
+		"diag_usb_read_complete_work: %d\n"
+		"smux ch: %d"
+		"smux enabled %d"
+		"smux in busy %d"
+		"smux connected %d",
 		driver->hsic_ch,
 		driver->hsic_inited,
 		driver->hsic_device_enabled,
@@ -238,13 +243,17 @@
 		driver->count_hsic_write_pool,
 		(unsigned int)driver->diag_hsic_pool,
 		(unsigned int)driver->diag_hsic_write_pool,
-		driver->write_len_mdm,
+			diag_bridge[HSIC].write_len,
 		driver->num_hsic_buf_tbl_entries,
-		driver->usb_mdm_connected,
-		work_pending(&(driver->diag_read_mdm_work)),
+			diag_bridge[HSIC].usb_connected,
+			work_pending(&(diag_bridge[HSIC].diag_read_work)),
 		work_pending(&(driver->diag_read_hsic_work)),
 		work_pending(&(driver->diag_disconnect_work)),
-		work_pending(&(driver->diag_usb_read_complete_work)));
+		work_pending(&(diag_bridge[HSIC].usb_read_complete_work)),
+		driver->lcid,
+		driver->diag_smux_enabled,
+		driver->in_busy_smux,
+		driver->smux_connected);
 
 	ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
 
@@ -252,8 +261,8 @@
 	return ret;
 }
 
-const struct file_operations diag_dbgfs_hsic_ops = {
-	.read = diag_dbgfs_read_hsic,
+const struct file_operations diag_dbgfs_bridge_ops = {
+	.read = diag_dbgfs_read_bridge,
 };
 #endif
 
@@ -284,9 +293,9 @@
 	debugfs_create_file("work_pending", 0444, diag_dbgfs_dent, 0,
 		&diag_dbgfs_workpending_ops);
 
-#ifdef CONFIG_DIAG_BRIDGE_CODE
-	debugfs_create_file("hsic", 0444, diag_dbgfs_dent, 0,
-		&diag_dbgfs_hsic_ops);
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	debugfs_create_file("bridge", 0444, diag_dbgfs_dent, 0,
+		&diag_dbgfs_bridge_ops);
 #endif
 
 	diag_dbgfs_table_index = 0;
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 28d0565..d1ec5f2 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -29,6 +29,7 @@
 #define IN_BUF_SIZE		16384
 #define MAX_IN_BUF_SIZE	32768
 #define MAX_SYNC_OBJ_NAME_SIZE	32
+#define UINT32_MAX	UINT_MAX
 /* Size of the buffer used for deframing a packet
   reveived from the PC tool*/
 #define HDLC_MAX 4096
@@ -272,7 +273,9 @@
 	struct diag_request *usb_read_mdm_ptr;
 	struct diag_request *write_ptr_mdm;
 #endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	/* common for all bridges */
+	struct work_struct diag_disconnect_work;
 	/* SGLTE variables */
 	int lcid;
 	unsigned char *buf_in_smux;
@@ -289,18 +292,6 @@
 	int in_busy_hsic_read_on_device;
 	int in_busy_hsic_write;
 	struct work_struct diag_read_hsic_work;
-	struct mutex bridge_mutex;
-	/* USB MDM channel variables */
-	int usb_mdm_connected;
-	int read_len_mdm;
-	int write_len_mdm;
-	unsigned char *usb_buf_mdm_out;
-	struct usb_diag_ch *mdm_ch;
-	struct workqueue_struct *diag_bridge_wq;
-	struct work_struct diag_read_mdm_work;
-	struct work_struct diag_disconnect_work;
-	struct work_struct diag_usb_read_complete_work;
-	struct diag_request *usb_read_mdm_ptr;
 	int count_hsic_pool;
 	int count_hsic_write_pool;
 	unsigned int poolsize_hsic;
@@ -315,5 +306,6 @@
 #endif
 };
 
+extern struct diag_bridge_dev *diag_bridge;
 extern struct diagchar_dev *driver;
 #endif
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index fe61d2d..645d916 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -32,13 +32,14 @@
 #ifdef CONFIG_DIAG_SDIO_PIPE
 #include "diagfwd_sdio.h"
 #endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 #include "diagfwd_hsic.h"
 #include "diagfwd_smux.h"
 #endif
 #include <linux/timer.h>
 #include "diag_debugfs.h"
 #include "diag_masks.h"
+#include "diagfwd_bridge.h"
 
 MODULE_DESCRIPTION("Diag Char Driver");
 MODULE_LICENSE("GPL v2");
@@ -127,7 +128,7 @@
 	mutex_unlock(&driver->diagchar_mutex);
 }
 
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 void diag_clear_hsic_tbl(void)
 {
 	int i;
@@ -278,7 +279,7 @@
 	if (driver->logging_process_id == current->tgid) {
 		driver->logging_mode = USB_MODE;
 		diagfwd_connect();
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 		diag_clear_hsic_tbl();
 		diagfwd_cancel_hsic();
 		diagfwd_connect_bridge(0);
@@ -358,7 +359,7 @@
 }
 
 void diag_add_reg(int j, struct bindpkt_params *params,
-					  int *success, int *count_entries)
+				  int *success, unsigned int *count_entries)
 {
 	*success = 1;
 	driver->table[j].cmd_code = params->cmd_code;
@@ -380,82 +381,172 @@
 	(*count_entries)++;
 }
 
+#ifdef CONFIG_DIAG_BRIDGE_CODE
+uint16_t diag_get_remote_device_mask(void)
+{
+	uint16_t remote_dev = 0;
+
+	if (driver->hsic_inited)
+		remote_dev |= (1 << 0);
+	if (driver->diag_smux_enabled)
+		remote_dev |= (1 << 1);
+
+	return remote_dev;
+}
+#else
+inline uint16_t diag_get_remote_device_mask(void) { return 0; }
+#endif
+
 long diagchar_ioctl(struct file *filp,
 			   unsigned int iocmd, unsigned long ioarg)
 {
-	int i, j, count_entries = 0, temp;
-	int success = -1;
+	int i, j, temp, success = -1, status;
+	unsigned int count_entries = 0, interim_count = 0;
 	void *temp_buf;
 	uint16_t support_list = 0;
-	struct diag_dci_client_tbl *params =
-		kzalloc(sizeof(struct diag_dci_client_tbl), GFP_KERNEL);
+	struct diag_dci_client_tbl *dci_params;
 	struct diag_dci_health_stats stats;
-	int status;
 
 	if (iocmd == DIAG_IOCTL_COMMAND_REG) {
-		struct bindpkt_params_per_process *pkt_params =
-			 (struct bindpkt_params_per_process *) ioarg;
+		struct bindpkt_params_per_process pkt_params;
+		struct bindpkt_params *params;
+		struct bindpkt_params *head_params;
+		if (copy_from_user(&pkt_params, (void *)ioarg,
+			   sizeof(struct bindpkt_params_per_process))) {
+			return -EFAULT;
+		}
+		if ((UINT32_MAX/sizeof(struct bindpkt_params)) <
+							 pkt_params.count) {
+			pr_warning("diag: integer overflow while multiply\n");
+			return -EFAULT;
+		}
+		params = kzalloc(pkt_params.count*sizeof(
+			struct bindpkt_params), GFP_KERNEL);
+		if (!params) {
+			pr_err("diag: unable to alloc memory\n");
+			return -ENOMEM;
+		} else
+			head_params = params;
+
+		if (copy_from_user(params, pkt_params.params,
+			   pkt_params.count*sizeof(struct bindpkt_params))) {
+			kfree(head_params);
+			return -EFAULT;
+		}
 		mutex_lock(&driver->diagchar_mutex);
 		for (i = 0; i < diag_max_reg; i++) {
 			if (driver->table[i].process_id == 0) {
-				diag_add_reg(i, pkt_params->params,
-						&success, &count_entries);
-				if (pkt_params->count > count_entries) {
-					pkt_params->params++;
+				diag_add_reg(i, params, &success,
+							 &count_entries);
+				if (pkt_params.count > count_entries) {
+					params++;
 				} else {
 					mutex_unlock(&driver->diagchar_mutex);
+					kfree(head_params);
 					return success;
 				}
 			}
 		}
 		if (i < diag_threshold_reg) {
 			/* Increase table size by amount required */
-			diag_max_reg += pkt_params->count -
+			if (pkt_params.count >= count_entries) {
+				interim_count = pkt_params.count -
 							 count_entries;
+			} else {
+				pr_warning("diag: error in params count\n");
+				kfree(head_params);
+				mutex_unlock(&driver->diagchar_mutex);
+				return -EFAULT;
+			}
+			if (UINT32_MAX - diag_max_reg >=
+							interim_count) {
+				diag_max_reg += interim_count;
+			} else {
+				pr_warning("diag: Integer overflow\n");
+				kfree(head_params);
+				mutex_unlock(&driver->diagchar_mutex);
+				return -EFAULT;
+			}
 			/* Make sure size doesnt go beyond threshold */
 			if (diag_max_reg > diag_threshold_reg) {
 				diag_max_reg = diag_threshold_reg;
 				pr_info("diag: best case memory allocation\n");
 			}
+			if (UINT32_MAX/sizeof(struct diag_master_table) <
+								 diag_max_reg) {
+				pr_warning("diag: integer overflow\n");
+				kfree(head_params);
+				mutex_unlock(&driver->diagchar_mutex);
+				return -EFAULT;
+			}
 			temp_buf = krealloc(driver->table,
 					 diag_max_reg*sizeof(struct
 					 diag_master_table), GFP_KERNEL);
 			if (!temp_buf) {
-				diag_max_reg -= pkt_params->count -
-							 count_entries;
-				pr_alert("diag: Insufficient memory for reg.");
+				pr_alert("diag: Insufficient memory for reg.\n");
 				mutex_unlock(&driver->diagchar_mutex);
+
+				if (pkt_params.count >= count_entries) {
+					interim_count = pkt_params.count -
+								 count_entries;
+				} else {
+					pr_warning("diag: params count error\n");
+					mutex_unlock(&driver->diagchar_mutex);
+					kfree(head_params);
+					return -EFAULT;
+				}
+				if (diag_max_reg >= interim_count) {
+					diag_max_reg -= interim_count;
+				} else {
+					pr_warning("diag: Integer underflow\n");
+					mutex_unlock(&driver->diagchar_mutex);
+					kfree(head_params);
+					return -EFAULT;
+				}
+				kfree(head_params);
 				return 0;
 			} else {
 				driver->table = temp_buf;
 			}
 			for (j = i; j < diag_max_reg; j++) {
-				diag_add_reg(j, pkt_params->params,
-						&success, &count_entries);
-				if (pkt_params->count > count_entries) {
-					pkt_params->params++;
+				diag_add_reg(j, params, &success,
+							 &count_entries);
+				if (pkt_params.count > count_entries) {
+					params++;
 				} else {
 					mutex_unlock(&driver->diagchar_mutex);
+					kfree(head_params);
 					return success;
 				}
 			}
+			kfree(head_params);
 			mutex_unlock(&driver->diagchar_mutex);
 		} else {
 			mutex_unlock(&driver->diagchar_mutex);
+			kfree(head_params);
 			pr_err("Max size reached, Pkt Registration failed for"
 						" Process %d", current->tgid);
 		}
 		success = 0;
 	} else if (iocmd == DIAG_IOCTL_GET_DELAYED_RSP_ID) {
-		struct diagpkt_delay_params *delay_params =
-					(struct diagpkt_delay_params *) ioarg;
-
-		if ((delay_params->rsp_ptr) &&
-		 (delay_params->size == sizeof(delayed_rsp_id)) &&
-				 (delay_params->num_bytes_ptr)) {
-			*((uint16_t *)delay_params->rsp_ptr) =
-				DIAGPKT_NEXT_DELAYED_RSP_ID(delayed_rsp_id);
-			*(delay_params->num_bytes_ptr) = sizeof(delayed_rsp_id);
+		struct diagpkt_delay_params delay_params;
+		uint16_t interim_rsp_id;
+		int interim_size;
+		if (copy_from_user(&delay_params, (void *)ioarg,
+					   sizeof(struct diagpkt_delay_params)))
+			return -EFAULT;
+		if ((delay_params.rsp_ptr) &&
+		 (delay_params.size == sizeof(delayed_rsp_id)) &&
+				 (delay_params.num_bytes_ptr)) {
+			interim_rsp_id = DIAGPKT_NEXT_DELAYED_RSP_ID(
+							delayed_rsp_id);
+			if (copy_to_user((void *)delay_params.rsp_ptr,
+					 &interim_rsp_id, sizeof(uint16_t)))
+				return -EFAULT;
+			interim_size = sizeof(delayed_rsp_id);
+			if (copy_to_user((void *)delay_params.num_bytes_ptr,
+						 &interim_size, sizeof(int)))
+				return -EFAULT;
 			success = 0;
 		}
 	} else if (iocmd == DIAG_IOCTL_DCI_REG) {
@@ -463,7 +554,13 @@
 			return DIAG_DCI_NO_REG;
 		if (driver->num_dci_client >= MAX_DCI_CLIENTS)
 			return DIAG_DCI_NO_REG;
-		if (copy_from_user(params, (void *)ioarg,
+		dci_params = kzalloc(sizeof(struct diag_dci_client_tbl),
+								 GFP_KERNEL);
+		if (dci_params == NULL) {
+			pr_err("diag: unable to alloc memory\n");
+			return -ENOMEM;
+		}
+		if (copy_from_user(dci_params, (void *)ioarg,
 				 sizeof(struct diag_dci_client_tbl)))
 			return -EFAULT;
 		mutex_lock(&driver->dci_mutex);
@@ -476,9 +573,9 @@
 			if (driver->dci_client_tbl[i].client == NULL) {
 				driver->dci_client_tbl[i].client = current;
 				driver->dci_client_tbl[i].list =
-							 params->list;
+							 dci_params->list;
 				driver->dci_client_tbl[i].signal_type =
-					 params->signal_type;
+					 dci_params->signal_type;
 				create_dci_log_mask_tbl(driver->
 					dci_client_tbl[i].dci_log_mask);
 				create_dci_event_mask_tbl(driver->
@@ -496,6 +593,7 @@
 			}
 		}
 		mutex_unlock(&driver->dci_mutex);
+		kfree(dci_params);
 		return driver->dci_client_id;
 	} else if (iocmd == DIAG_IOCTL_DCI_DEINIT) {
 		success = -1;
@@ -520,25 +618,29 @@
 	} else if (iocmd == DIAG_IOCTL_DCI_SUPPORT) {
 		if (driver->ch_dci)
 			support_list = support_list | DIAG_CON_MPSS;
-		*(uint16_t *)ioarg = support_list;
+		if (copy_to_user((void *)ioarg, &support_list,
+							 sizeof(uint16_t)))
+			return -EFAULT;
 		return DIAG_DCI_NO_ERROR;
 	} else if (iocmd == DIAG_IOCTL_DCI_HEALTH_STATS) {
 		if (copy_from_user(&stats, (void *)ioarg,
 				 sizeof(struct diag_dci_health_stats)))
 			return -EFAULT;
 		for (i = 0; i < MAX_DCI_CLIENTS; i++) {
-			params = &(driver->dci_client_tbl[i]);
-			if (params->client &&
-				params->client->tgid == current->tgid) {
-				stats.dropped_logs = params->dropped_logs;
-				stats.dropped_events = params->dropped_events;
-				stats.received_logs = params->received_logs;
-				stats.received_events = params->received_events;
+			dci_params = &(driver->dci_client_tbl[i]);
+			if (dci_params->client &&
+				dci_params->client->tgid == current->tgid) {
+				stats.dropped_logs = dci_params->dropped_logs;
+				stats.dropped_events =
+						 dci_params->dropped_events;
+				stats.received_logs = dci_params->received_logs;
+				stats.received_events =
+						 dci_params->received_events;
 				if (stats.reset_status) {
-					params->dropped_logs = 0;
-					params->dropped_events = 0;
-					params->received_logs = 0;
-					params->received_events = 0;
+					dci_params->dropped_logs = 0;
+					dci_params->dropped_events = 0;
+					dci_params->received_logs = 0;
+					dci_params->received_events = 0;
 				}
 				break;
 			}
@@ -551,7 +653,7 @@
 		for (i = 0; i < driver->num_clients; i++)
 			if (driver->client_map[i].pid == current->tgid)
 				break;
-		if (i == -1)
+		if (i == driver->num_clients)
 			return -EINVAL;
 		driver->data_ready[i] |= DEINIT_TYPE;
 		wake_up_interruptible(&driver->wait_q);
@@ -583,6 +685,10 @@
 				}
 			}
 		}
+		if (driver->logging_mode == SOCKET_MODE)
+			driver->socket_process = current;
+		if (driver->logging_mode == CALLBACK_MODE)
+			driver->callback_process = current;
 		if (driver->logging_mode == UART_MODE ||
 			driver->logging_mode == SOCKET_MODE ||
 			driver->logging_mode == CALLBACK_MODE) {
@@ -590,10 +696,6 @@
 			driver->mask_check = 0;
 			driver->logging_mode = MEMORY_DEVICE_MODE;
 		}
-		if (driver->logging_mode == SOCKET_MODE)
-			driver->socket_process = current;
-		if (driver->logging_mode == CALLBACK_MODE)
-			driver->callback_process = current;
 		driver->logging_process_id = current->tgid;
 		mutex_unlock(&driver->diagchar_mutex);
 		if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
@@ -607,7 +709,7 @@
 #ifdef CONFIG_DIAG_SDIO_PIPE
 			driver->in_busy_sdio = 1;
 #endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 			diagfwd_disconnect_bridge(0);
 			diag_clear_hsic_tbl();
 #endif
@@ -636,7 +738,7 @@
 				queue_work(driver->diag_sdio_wq,
 					&(driver->diag_read_sdio_work));
 #endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 			diagfwd_connect_bridge(0);
 #endif
 		}
@@ -644,13 +746,13 @@
 		else if (temp == USB_MODE && driver->logging_mode
 							 == NO_LOGGING_MODE) {
 			diagfwd_disconnect();
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 			diagfwd_disconnect_bridge(0);
 #endif
 		} else if (temp == NO_LOGGING_MODE && driver->logging_mode
 								== USB_MODE) {
 			diagfwd_connect();
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 			diagfwd_connect_bridge(0);
 #endif
 		} else if (temp == USB_MODE && driver->logging_mode
@@ -680,14 +782,14 @@
 				queue_work(driver->diag_sdio_wq,
 					&(driver->diag_read_sdio_work));
 #endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 			diagfwd_cancel_hsic();
 			diagfwd_connect_bridge(0);
 #endif
 		} else if (temp == MEMORY_DEVICE_MODE &&
 				 driver->logging_mode == USB_MODE) {
 			diagfwd_connect();
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 			diag_clear_hsic_tbl();
 			diagfwd_cancel_hsic();
 			diagfwd_connect_bridge(0);
@@ -695,6 +797,13 @@
 		}
 #endif /* DIAG over USB */
 		success = 1;
+	} else if (iocmd == DIAG_IOCTL_REMOTE_DEV) {
+		uint16_t remote_dev = diag_get_remote_device_mask();
+
+		if (copy_to_user((void *)ioarg, &remote_dev, sizeof(uint16_t)))
+			success = -EFAULT;
+		else
+			success = 1;
 	}
 
 	return success;
@@ -706,7 +815,7 @@
 	struct diag_dci_client_tbl *entry;
 	int index = -1, i = 0, ret = 0;
 	int num_data = 0, data_type;
-#if defined(CONFIG_DIAG_SDIO_PIPE) || defined(CONFIG_DIAG_BRIDGE_CODE)
+#if defined(CONFIG_DIAG_SDIO_PIPE) || defined(CONFIG_DIAGFWD_BRIDGE_CODE)
 	int mdm_token = MDM_TOKEN;
 #endif
 
@@ -725,7 +834,7 @@
 
 	if ((driver->data_ready[index] & USER_SPACE_DATA_TYPE) && (driver->
 					logging_mode == MEMORY_DEVICE_MODE)) {
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 		unsigned long spin_lock_flags;
 		struct diag_write_device hsic_buf_tbl[NUM_HSIC_BUF_TBL_ENTRIES];
 #endif
@@ -861,7 +970,7 @@
 			driver->in_busy_sdio = 0;
 		}
 #endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 		spin_lock_irqsave(&driver->hsic_spinlock, spin_lock_flags);
 		for (i = 0; i < driver->poolsize_hsic_write; i++) {
 			hsic_buf_tbl[i].buf = driver->hsic_buf_tbl[i].buf;
@@ -1012,14 +1121,17 @@
 		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
 		/* check the current client and copy its data */
 		for (i = 0; i < MAX_DCI_CLIENTS; i++) {
-			entry = &(driver->dci_client_tbl[i]);
-			if (entry && (current->tgid == entry->client->tgid)) {
-				COPY_USER_SPACE_OR_EXIT(buf+4,
-						entry->data_len, 4);
-				COPY_USER_SPACE_OR_EXIT(buf+8,
-					 *(entry->dci_data), entry->data_len);
-				entry->data_len = 0;
-				break;
+			if (driver->dci_client_tbl[i].client != NULL) {
+				entry = &(driver->dci_client_tbl[i]);
+				if (entry && (current->tgid ==
+						entry->client->tgid)) {
+					COPY_USER_SPACE_OR_EXIT(buf+4,
+							entry->data_len, 4);
+					COPY_USER_SPACE_OR_EXIT(buf+8,
+					*(entry->dci_data), entry->data_len);
+					entry->data_len = 0;
+					break;
+				}
 			}
 		}
 		driver->data_ready[index] ^= DCI_DATA_TYPE;
@@ -1045,7 +1157,7 @@
 	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
 	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
 	void *buf_copy = NULL;
-	int payload_size;
+	unsigned int payload_size;
 #ifdef CONFIG_DIAG_OVER_USB
 	if (((driver->logging_mode == USB_MODE) && (!driver->usb_connected)) ||
 				(driver->logging_mode == NO_LOGGING_MODE)) {
@@ -1056,8 +1168,17 @@
 	/* Get the packet type F3/log/event/Pkt response */
 	err = copy_from_user((&pkt_type), buf, 4);
 	/* First 4 bytes indicate the type of payload - ignore these */
+	if (count < 4) {
+		pr_err("diag: Client sending short data\n");
+		return -EBADMSG;
+	}
 	payload_size = count - 4;
-
+	if (payload_size > USER_SPACE_DATA) {
+		pr_err("diag: Dropping packet, packet payload size crosses 8KB limit. Current payload size %d\n",
+				payload_size);
+		driver->dropped_count++;
+		return -EBADMSG;
+	}
 	if (pkt_type == DCI_DATA_TYPE) {
 		err = copy_from_user(driver->user_space_data, buf + 4,
 							 payload_size);
@@ -1082,7 +1203,8 @@
 
 		/* Check masks for On-Device logging */
 		if (driver->mask_check) {
-			if (!mask_request_validate(driver->user_space_data)) {
+			if (!mask_request_validate(driver->user_space_data +
+							 token_offset)) {
 				pr_alert("diag: mask request Invalid\n");
 				return -EFAULT;
 			}
@@ -1107,7 +1229,7 @@
 			}
 		}
 #endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 		/* send masks to 9k too */
 		if (driver->hsic_ch && (payload_size > 0) && remote_data) {
 			/* wait sending mask updates if HSIC ch not ready */
@@ -1206,8 +1328,9 @@
 		if (err) {
 			/*Free the buffer right away if write failed */
 			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
-			diagmem_free(driver, (unsigned char *)driver->
-				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
+			if (driver->logging_mode == USB_MODE)
+				diagmem_free(driver, (unsigned char *)driver->
+					write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
 			ret = -EIO;
 			goto fail_free_hdlc;
 		}
@@ -1234,8 +1357,9 @@
 		if (err) {
 			/*Free the buffer right away if write failed */
 			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
-			diagmem_free(driver, (unsigned char *)driver->
-				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
+			if (driver->logging_mode == USB_MODE)
+				diagmem_free(driver, (unsigned char *)driver->
+					write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
 			ret = -EIO;
 			goto fail_free_hdlc;
 		}
@@ -1259,8 +1383,9 @@
 		if (err) {
 			/*Free the buffer right away if write failed */
 			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
-			diagmem_free(driver, (unsigned char *)driver->
-				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
+			if (driver->logging_mode == USB_MODE)
+				diagmem_free(driver, (unsigned char *)driver->
+					write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
 			ret = -EIO;
 			goto fail_free_hdlc;
 		}
@@ -1410,6 +1535,13 @@
 	return 0;
 }
 
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void diag_disconnect_work_fn(struct work_struct *w)
+{
+	diagfwd_disconnect_bridge(1);
+}
+#endif
+
 #ifdef CONFIG_DIAG_SDIO_PIPE
 void diag_sdio_fn(int type)
 {
@@ -1424,16 +1556,14 @@
 inline void diag_sdio_fn(int type) {}
 #endif
 
-#ifdef CONFIG_DIAG_BRIDGE_CODE
-void diag_bridge_fn(int type)
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+void diagfwd_bridge_fn(int type)
 {
-	if (type == INIT)
-		diagfwd_bridge_init();
-	else if (type == EXIT)
+	if (type == EXIT)
 		diagfwd_bridge_exit();
 }
 #else
-inline void diag_bridge_fn(int type) {}
+inline void diagfwd_bridge_fn(int type) { }
 #endif
 
 static int __init diagchar_init(void)
@@ -1443,6 +1573,12 @@
 
 	pr_debug("diagfwd initializing ..\n");
 	driver = kzalloc(sizeof(struct diagchar_dev) + 5, GFP_KERNEL);
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	diag_bridge = kzalloc(MAX_BRIDGES * sizeof(struct diag_bridge_dev),
+								 GFP_KERNEL);
+	if (!diag_bridge)
+		pr_warning("diag: could not allocate memory for bridge\n");
+#endif
 
 	if (driver) {
 		driver->used = 0;
@@ -1487,10 +1623,16 @@
 		diag_debugfs_init();
 		diag_masks_init();
 		diagfwd_init();
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+		diagfwd_bridge_init(HSIC);
+		diagfwd_bridge_init(SMUX);
+		INIT_WORK(&(driver->diag_disconnect_work),
+						 diag_disconnect_work_fn);
+#endif
 		diagfwd_cntl_init();
 		driver->dci_state = diag_dci_init();
 		diag_sdio_fn(INIT);
-		diag_bridge_fn(INIT);
+
 		pr_debug("diagchar initializing ..\n");
 		driver->num = 1;
 		driver->name = ((void *)driver) + sizeof(struct diagchar_dev);
@@ -1525,7 +1667,7 @@
 	diagfwd_cntl_exit();
 	diag_masks_exit();
 	diag_sdio_fn(EXIT);
-	diag_bridge_fn(EXIT);
+	diagfwd_bridge_fn(EXIT);
 	return -1;
 }
 
@@ -1539,7 +1681,7 @@
 	diagfwd_cntl_exit();
 	diag_masks_exit();
 	diag_sdio_fn(EXIT);
-	diag_bridge_fn(EXIT);
+	diagfwd_bridge_fn(EXIT);
 	diag_debugfs_cleanup();
 	diagchar_cleanup();
 	printk(KERN_INFO "done diagchar exit\n");
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 978b63b..cee4c96 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -40,6 +40,7 @@
 #endif
 #include "diag_dci.h"
 #include "diag_masks.h"
+#include "diagfwd_bridge.h"
 
 #define MODE_CMD		41
 #define RESET_ID		2
@@ -327,7 +328,7 @@
 				}
 		}
 
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 		else if (proc_num == HSIC_DATA) {
 			unsigned long flags;
 			int foundIndex = -1;
@@ -337,7 +338,7 @@
 				if (driver->hsic_buf_tbl[i].length == 0) {
 					driver->hsic_buf_tbl[i].buf = buf;
 					driver->hsic_buf_tbl[i].length =
-							driver->write_len_mdm;
+						diag_bridge[HSIC].write_len;
 					driver->num_hsic_buf_tbl_entries++;
 					foundIndex = i;
 					break;
@@ -349,7 +350,7 @@
 			else
 				pr_debug("diag: ENQUEUE HSIC buf ptr and length is %x , %d\n",
 					(unsigned int)buf,
-					driver->write_len_mdm);
+					 diag_bridge[HSIC].write_len);
 		}
 #endif
 		for (i = 0; i < driver->num_clients; i++)
@@ -386,10 +387,10 @@
 				&(driver->diag_read_sdio_work));
 		}
 #endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 		else if (proc_num == HSIC_DATA) {
 			if (driver->hsic_ch)
-				queue_work(driver->diag_bridge_wq,
+				queue_work(diag_bridge[HSIC].wq,
 					&(driver->diag_read_hsic_work));
 		}
 #endif
@@ -436,7 +437,7 @@
 						"while USB write\n");
 		}
 #endif
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 		else if (proc_num == HSIC_DATA) {
 			if (driver->hsic_device_enabled) {
 				struct diag_request *write_ptr_mdm;
@@ -447,9 +448,10 @@
 				if (write_ptr_mdm) {
 					write_ptr_mdm->buf = buf;
 					write_ptr_mdm->length =
-						driver->write_len_mdm;
-					err = usb_diag_write(driver->mdm_ch,
-								write_ptr_mdm);
+					   diag_bridge[HSIC].write_len;
+					write_ptr_mdm->context = (void *)HSIC;
+					err = usb_diag_write(
+					diag_bridge[HSIC].ch, write_ptr_mdm);
 					/* Return to the pool immediately */
 					if (err) {
 						diagmem_free(driver,
@@ -463,14 +465,16 @@
 					err = -1;
 				}
 			} else {
-				pr_err("diag: Incorrect hsic data "
+				pr_err("diag: Incorrect HSIC data "
 						"while USB write\n");
 				err = -1;
 			}
 		} else if (proc_num == SMUX_DATA) {
 				write_ptr->buf = buf;
+				write_ptr->context = (void *)SMUX;
 				pr_debug("diag: writing SMUX data\n");
-				err = usb_diag_write(driver->mdm_ch, write_ptr);
+				err = usb_diag_write(diag_bridge[SMUX].ch,
+								 write_ptr);
 		}
 #endif
 		APPEND_DEBUG('d');
diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c
new file mode 100644
index 0000000..75fdeb4
--- /dev/null
+++ b/drivers/char/diag/diagfwd_bridge.c
@@ -0,0 +1,355 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/diagchar.h>
+#include <linux/kmemleak.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/ratelimit.h>
+#include <linux/platform_device.h>
+#include <linux/smux.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <mach/usbdiag.h>
+#endif
+#include "diagchar.h"
+#include "diagmem.h"
+#include "diagfwd_cntl.h"
+#include "diagfwd_smux.h"
+#include "diagfwd_hsic.h"
+#include "diag_masks.h"
+#include "diagfwd_bridge.h"
+
+struct diag_bridge_dev *diag_bridge;
+
+/* diagfwd_connect_bridge is called when the USB mdm channel is connected */
+int diagfwd_connect_bridge(int process_cable)
+{
+	int i;
+
+	pr_debug("diag: in %s\n", __func__);
+
+	for (i = 0; i < MAX_BRIDGES; i++)
+		if (diag_bridge[i].enabled)
+			connect_bridge(process_cable, i);
+	return 0;
+}
+
+void connect_bridge(int process_cable, int index)
+{
+	int err;
+
+	mutex_lock(&diag_bridge[index].bridge_mutex);
+	/* If the usb cable is being connected */
+	if (process_cable) {
+		err = usb_diag_alloc_req(diag_bridge[index].ch, N_MDM_WRITE,
+			       N_MDM_READ);
+		if (err)
+			pr_err("diag: unable to alloc USB req on mdm ch err:%d\n",
+							 err);
+
+		diag_bridge[index].usb_connected = 1;
+	}
+
+	if (index == SMUX && driver->diag_smux_enabled) {
+		driver->in_busy_smux = 0;
+		diagfwd_connect_smux();
+	} else if (index == HSIC && driver->hsic_device_enabled) {
+		driver->in_busy_hsic_read_on_device = 0;
+		driver->in_busy_hsic_write = 0;
+		/* If the HSIC (diag_bridge) platform device is not open */
+		if (!driver->hsic_device_opened) {
+			err = diag_bridge_open(&hsic_diag_bridge_ops);
+			if (err) {
+				pr_err("diag: HSIC channel open error: %d\n",
+					  err);
+			} else {
+				pr_debug("diag: opened HSIC channel\n");
+				driver->hsic_device_opened = 1;
+			}
+		} else {
+			pr_debug("diag: HSIC channel already open\n");
+		}
+		/*
+		 * Turn on communication over usb mdm and HSIC, if the HSIC
+		 * device driver is enabled and opened
+		 */
+		if (driver->hsic_device_opened) {
+			driver->hsic_ch = 1;
+			/* Poll USB mdm channel to check for data */
+			if (driver->logging_mode == USB_MODE)
+				queue_work(diag_bridge[HSIC].wq,
+					  &diag_bridge[HSIC].diag_read_work);
+			/* Poll HSIC channel to check for data */
+			queue_work(diag_bridge[HSIC].wq,
+				   &driver->diag_read_hsic_work);
+		}
+	}
+	mutex_unlock(&diag_bridge[index].bridge_mutex);
+}
+
+/*
+ * diagfwd_disconnect_bridge is called when the USB mdm channel
+ * is disconnected. So disconnect should happen for all bridges
+ */
+int diagfwd_disconnect_bridge(int process_cable)
+{
+	int i;
+	pr_debug("diag: In %s, process_cable: %d\n", __func__, process_cable);
+
+	for (i = 0; i < MAX_BRIDGES; i++) {
+		if (diag_bridge[i].enabled) {
+			mutex_lock(&diag_bridge[i].bridge_mutex);
+			/* If the usb cable is being disconnected */
+			if (process_cable) {
+				diag_bridge[i].usb_connected = 0;
+				usb_diag_free_req(diag_bridge[i].ch);
+			}
+
+			if (i == HSIC && driver->hsic_device_enabled &&
+				 driver->logging_mode != MEMORY_DEVICE_MODE) {
+				driver->in_busy_hsic_read_on_device = 1;
+				driver->in_busy_hsic_write = 1;
+				/* Turn off communication over usb and HSIC */
+				diag_hsic_close();
+			} else if (i == SMUX && driver->diag_smux_enabled &&
+					driver->logging_mode == USB_MODE) {
+				driver->in_busy_smux = 1;
+				driver->lcid = LCID_INVALID;
+				driver->smux_connected = 0;
+				/* Turn off communication over usb and smux */
+				msm_smux_close(LCID_VALID);
+			}
+			mutex_unlock(&diag_bridge[i].bridge_mutex);
+		}
+	}
+	return 0;
+}
+
+/* Called after the asychronous usb_diag_read() on mdm channel is complete */
+int diagfwd_read_complete_bridge(struct diag_request *diag_read_ptr)
+{
+	 int index = (int)(diag_read_ptr->context);
+
+	/* The read of the usb on the mdm (not HSIC/SMUX) has completed */
+	diag_bridge[index].read_len = diag_read_ptr->actual;
+
+	if (index == SMUX) {
+		if (driver->diag_smux_enabled) {
+			diagfwd_read_complete_smux();
+			return 0;
+		} else {
+			pr_warning("diag: incorrect callback for smux\n");
+		}
+	}
+
+	/* If SMUX not enabled, check for HSIC */
+	driver->in_busy_hsic_read_on_device = 0;
+	if (!driver->hsic_ch) {
+		pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__);
+		return 0;
+	}
+
+	/*
+	 * The read of the usb driver on the mdm channel has completed.
+	 * If there is no write on the HSIC in progress, check if the
+	 * read has data to pass on to the HSIC. If so, pass the usb
+	 * mdm data on to the HSIC.
+	 */
+	if (!driver->in_busy_hsic_write && diag_bridge[HSIC].usb_buf_out &&
+		(diag_bridge[HSIC].read_len > 0)) {
+
+		/*
+		 * Initiate the HSIC write. The HSIC write is
+		 * asynchronous. When complete the write
+		 * complete callback function will be called
+		 */
+		int err;
+		driver->in_busy_hsic_write = 1;
+		err = diag_bridge_write(diag_bridge[HSIC].usb_buf_out,
+					diag_bridge[HSIC].read_len);
+		if (err) {
+			pr_err_ratelimited("diag: mdm data on HSIC write err: %d\n",
+					err);
+			/*
+			 * If the error is recoverable, then clear
+			 * the write flag, so we will resubmit a
+			 * write on the next frame.  Otherwise, don't
+			 * resubmit a write on the next frame.
+			 */
+			if ((-ENODEV) != err)
+				driver->in_busy_hsic_write = 0;
+		}
+	}
+
+	/*
+	 * If there is no write of the usb mdm data on the
+	 * HSIC channel
+	 */
+	if (!driver->in_busy_hsic_write)
+		queue_work(diag_bridge[HSIC].wq,
+			 &diag_bridge[HSIC].diag_read_work);
+
+	return 0;
+}
+
+static void diagfwd_bridge_notifier(void *priv, unsigned event,
+					struct diag_request *d_req)
+{
+	int index;
+
+	switch (event) {
+	case USB_DIAG_CONNECT:
+		diagfwd_connect_bridge(1);
+		break;
+	case USB_DIAG_DISCONNECT:
+		queue_work(driver->diag_wq,
+			 &driver->diag_disconnect_work);
+		break;
+	case USB_DIAG_READ_DONE:
+		index = (int)(d_req->context);
+		queue_work(diag_bridge[index].wq,
+		&diag_bridge[index].usb_read_complete_work);
+		break;
+	case USB_DIAG_WRITE_DONE:
+		index = (int)(d_req->context);
+		if (index == HSIC &&  driver->hsic_device_enabled)
+			diagfwd_write_complete_hsic(d_req);
+		else if (index == SMUX && driver->diag_smux_enabled)
+			diagfwd_write_complete_smux();
+		break;
+	default:
+		pr_err("diag: in %s: Unknown event from USB diag:%u\n",
+			__func__, event);
+		break;
+	}
+}
+
+void diagfwd_bridge_init(int index)
+{
+	int ret;
+	unsigned char name[20];
+
+	if (index == HSIC)
+		strlcpy(name, "hsic", sizeof(name));
+	else
+		strlcpy(name, "smux", sizeof(name));
+
+	strlcpy(diag_bridge[index].name, name, sizeof(diag_bridge[index].name));
+	strlcat(name, "_diag_wq", sizeof(diag_bridge[index].name));
+	diag_bridge[index].enabled = 1;
+	diag_bridge[index].wq = create_singlethread_workqueue(name);
+	diag_bridge[index].read_len = 0;
+	diag_bridge[index].write_len = 0;
+	if (diag_bridge[index].usb_buf_out == NULL)
+		diag_bridge[index].usb_buf_out =
+				 kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL);
+	if (diag_bridge[index].usb_buf_out == NULL)
+		goto err;
+	if (diag_bridge[index].usb_read_ptr == NULL)
+		diag_bridge[index].usb_read_ptr =
+			 kzalloc(sizeof(struct diag_request), GFP_KERNEL);
+	if (diag_bridge[index].usb_read_ptr == NULL)
+		goto err;
+	if (diag_bridge[index].usb_read_ptr->context == NULL)
+		diag_bridge[index].usb_read_ptr->context =
+					 kzalloc(sizeof(int), GFP_KERNEL);
+	if (diag_bridge[index].usb_read_ptr->context == NULL)
+		goto err;
+	mutex_init(&diag_bridge[index].bridge_mutex);
+
+	if (index == HSIC) {
+		INIT_WORK(&(diag_bridge[index].usb_read_complete_work),
+				 diag_usb_read_complete_hsic_fn);
+#ifdef CONFIG_DIAG_OVER_USB
+		INIT_WORK(&(diag_bridge[index].diag_read_work),
+		      diag_read_usb_hsic_work_fn);
+		diag_bridge[index].ch = usb_diag_open(DIAG_MDM, (void *)index,
+						  diagfwd_bridge_notifier);
+		if (IS_ERR(diag_bridge[index].ch)) {
+			pr_err("diag: Unable to open USB diag MDM channel\n");
+			goto err;
+		}
+#endif
+		/* register HSIC device */
+		ret = platform_driver_register(&msm_hsic_ch_driver);
+		if (ret)
+			pr_err("diag: could not register HSIC device, ret: %d\n",
+									 ret);
+	} else if (index == SMUX) {
+		INIT_WORK(&(diag_bridge[index].usb_read_complete_work),
+					 diag_usb_read_complete_smux_fn);
+#ifdef CONFIG_DIAG_OVER_USB
+		INIT_WORK(&(diag_bridge[index].diag_read_work),
+					 diag_read_usb_smux_work_fn);
+		diag_bridge[index].ch = usb_diag_open(DIAG_QSC, (void *)index,
+					     diagfwd_bridge_notifier);
+		if (IS_ERR(diag_bridge[index].ch)) {
+			pr_err("diag: Unable to open USB diag QSC channel\n");
+			goto err;
+		}
+#endif
+		ret = platform_driver_register(&msm_diagfwd_smux_driver);
+		if (ret)
+			pr_err("diag: could not register SMUX device, ret: %d\n",
+									 ret);
+	}
+	 return;
+err:
+	pr_err("diag: Could not initialize for bridge forwarding\n");
+	kfree(diag_bridge[index].usb_buf_out);
+	kfree(driver->hsic_buf_tbl);
+	kfree(driver->write_ptr_mdm);
+	kfree(diag_bridge[index].usb_read_ptr);
+	if (diag_bridge[index].wq)
+		destroy_workqueue(diag_bridge[index].wq);
+	return;
+}
+
+void diagfwd_bridge_exit(void)
+{
+	int i;
+	pr_debug("diag: in %s\n", __func__);
+
+	if (driver->hsic_device_enabled) {
+		diag_hsic_close();
+		driver->hsic_device_enabled = 0;
+		diag_bridge[HSIC].enabled = 0;
+	}
+	driver->hsic_inited = 0;
+	diagmem_exit(driver, POOL_TYPE_ALL);
+	if (driver->diag_smux_enabled) {
+		driver->lcid = LCID_INVALID;
+		kfree(driver->buf_in_smux);
+		driver->diag_smux_enabled = 0;
+		diag_bridge[SMUX].enabled = 0;
+	}
+	platform_driver_unregister(&msm_hsic_ch_driver);
+	platform_driver_unregister(&msm_diagfwd_smux_driver);
+	/* destroy USB MDM specific variables */
+	for (i = 0; i < MAX_BRIDGES; i++) {
+		if (diag_bridge[i].enabled) {
+#ifdef CONFIG_DIAG_OVER_USB
+			if (diag_bridge[i].usb_connected)
+				usb_diag_free_req(diag_bridge[i].ch);
+			usb_diag_close(diag_bridge[i].ch);
+#endif
+			kfree(diag_bridge[i].usb_buf_out);
+			kfree(diag_bridge[i].usb_read_ptr);
+			destroy_workqueue(diag_bridge[i].wq);
+			diag_bridge[i].enabled = 0;
+		}
+	}
+	kfree(driver->hsic_buf_tbl);
+	kfree(driver->write_ptr_mdm);
+}
diff --git a/drivers/char/diag/diagfwd_bridge.h b/drivers/char/diag/diagfwd_bridge.h
new file mode 100644
index 0000000..06e6a96
--- /dev/null
+++ b/drivers/char/diag/diagfwd_bridge.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_BRIDGE_H
+#define DIAGFWD_BRIDGE_H
+
+#include "diagfwd.h"
+
+#define MAX_BRIDGES	5
+#define HSIC	0
+#define SMUX	1
+
+int diagfwd_connect_bridge(int);
+void connect_bridge(int, int);
+int diagfwd_disconnect_bridge(int);
+void diagfwd_bridge_init(int index);
+void diagfwd_bridge_exit(void);
+int diagfwd_read_complete_bridge(struct diag_request *diag_read_ptr);
+
+/* Diag-Bridge structure, n bridges can be used at same time
+ * for instance SMUX, HSIC working at same time
+ */
+struct diag_bridge_dev {
+	char name[20];
+	int enabled;
+	struct mutex bridge_mutex;
+	int usb_connected;
+	int read_len;
+	int write_len;
+	unsigned char *usb_buf_out;
+	struct usb_diag_ch *ch;
+	struct workqueue_struct *wq;
+	struct work_struct diag_read_work;
+	struct diag_request *usb_read_ptr;
+	struct work_struct usb_read_complete_work;
+};
+
+#endif
diff --git a/drivers/char/diag/diagfwd_hsic.c b/drivers/char/diag/diagfwd_hsic.c
index 56f2fae..3d5eea5 100644
--- a/drivers/char/diag/diagfwd_hsic.c
+++ b/drivers/char/diag/diagfwd_hsic.c
@@ -31,6 +31,7 @@
 #include "diagfwd.h"
 #include "diagfwd_hsic.h"
 #include "diagfwd_smux.h"
+#include "diagfwd_bridge.h"
 
 #define READ_HSIC_BUF_SIZE 2048
 
@@ -72,7 +73,7 @@
 		write_ptrs_available--;
 
 		/*
-		 * No sense queuing a read if the hsic bridge was
+		 * No sense queuing a read if the HSIC bridge was
 		 * closed in another thread
 		 */
 		if (!driver->hsic_ch)
@@ -82,7 +83,7 @@
 							POOL_TYPE_HSIC);
 		if (buf_in_hsic) {
 			/*
-			 * Initiate the read from the hsic.  The hsic read is
+			 * Initiate the read from the HSIC.  The HSIC read is
 			 * asynchronous.  Once the read is complete the read
 			 * callback function will be called.
 			 */
@@ -116,7 +117,7 @@
 	if ((driver->count_hsic_pool < driver->poolsize_hsic) &&
 		(num_reads_submitted == 0) && (err != -ENODEV) &&
 		(driver->hsic_ch != 0))
-		queue_work(driver->diag_bridge_wq,
+		queue_work(diag_bridge[HSIC].wq,
 				 &driver->diag_read_hsic_work);
 }
 
@@ -127,7 +128,7 @@
 
 	if (!driver->hsic_ch) {
 		/*
-		 * The hsic channel is closed. Return the buffer to
+		 * The HSIC channel is closed. Return the buffer to
 		 * the pool.  Do not send it on.
 		 */
 		diagmem_free(driver, buf, POOL_TYPE_HSIC);
@@ -149,7 +150,7 @@
 			 * Send data in buf to be written on the
 			 * appropriate device, e.g. USB MDM channel
 			 */
-			driver->write_len_mdm = actual_size;
+			diag_bridge[HSIC].write_len = actual_size;
 			err = diag_device_write((void *)buf, HSIC_DATA, NULL);
 			/* If an error, return buffer to the pool */
 			if (err) {
@@ -170,13 +171,13 @@
 	}
 
 	/*
-	 * If for some reason there was no hsic data to write to the
+	 * If for some reason there was no HSIC data to write to the
 	 * mdm channel, set up another read
 	 */
 	if (err &&
 		((driver->logging_mode == MEMORY_DEVICE_MODE) ||
-		(driver->usb_mdm_connected && !driver->hsic_suspend))) {
-		queue_work(driver->diag_bridge_wq,
+		(diag_bridge[HSIC].usb_connected && !driver->hsic_suspend))) {
+		queue_work(diag_bridge[HSIC].wq,
 				 &driver->diag_read_hsic_work);
 	}
 }
@@ -195,8 +196,10 @@
 	if (actual_size < 0)
 		pr_err("DIAG in %s: actual_size: %d\n", __func__, actual_size);
 
-	if (driver->usb_mdm_connected)
-		queue_work(driver->diag_bridge_wq, &driver->diag_read_mdm_work);
+	if (diag_bridge[HSIC].usb_connected &&
+				 (driver->logging_mode == USB_MODE))
+		queue_work(diag_bridge[HSIC].wq,
+				 &diag_bridge[HSIC].diag_read_work);
 }
 
 static int diag_hsic_suspend(void *ctxt)
@@ -223,12 +226,12 @@
 
 	if ((driver->count_hsic_pool < driver->poolsize_hsic) &&
 		((driver->logging_mode == MEMORY_DEVICE_MODE) ||
-				(driver->usb_mdm_connected)))
-		queue_work(driver->diag_bridge_wq,
+				(diag_bridge[HSIC].usb_connected)))
+		queue_work(diag_bridge[HSIC].wq,
 			 &driver->diag_read_hsic_work);
 }
 
-static struct diag_bridge_ops hsic_diag_bridge_ops = {
+struct diag_bridge_ops hsic_diag_bridge_ops = {
 	.ctxt = NULL,
 	.read_complete_cb = diag_hsic_read_complete_callback,
 	.write_complete_cb = diag_hsic_write_complete_callback,
@@ -236,7 +239,7 @@
 	.resume = diag_hsic_resume,
 };
 
-static void diag_hsic_close(void)
+void diag_hsic_close(void)
 {
 	if (driver->hsic_device_enabled) {
 		driver->hsic_ch = 0;
@@ -257,7 +260,7 @@
 {
 	int err;
 
-	mutex_lock(&driver->bridge_mutex);
+	mutex_lock(&diag_bridge[HSIC].bridge_mutex);
 	if (driver->hsic_device_enabled) {
 		if (driver->hsic_device_opened) {
 			driver->hsic_ch = 0;
@@ -274,112 +277,7 @@
 			}
 		}
 	}
-
-	mutex_unlock(&driver->bridge_mutex);
-	return 0;
-}
-
-/* diagfwd_connect_bridge is called when the USB mdm channel is connected */
-int diagfwd_connect_bridge(int process_cable)
-{
-	int err;
-
-	pr_debug("diag: in %s\n", __func__);
-
-	mutex_lock(&driver->bridge_mutex);
-	/* If the usb cable is being connected */
-	if (process_cable) {
-		err = usb_diag_alloc_req(driver->mdm_ch, N_MDM_WRITE,
-			N_MDM_READ);
-		if (err)
-			pr_err("diag: unable to alloc USB req on mdm"
-				" ch err:%d\n", err);
-
-		driver->usb_mdm_connected = 1;
-	}
-
-	if (driver->hsic_device_enabled) {
-		driver->in_busy_hsic_read_on_device = 0;
-		driver->in_busy_hsic_write = 0;
-	} else if (driver->diag_smux_enabled) {
-		driver->in_busy_smux = 0;
-		diagfwd_connect_smux();
-		mutex_unlock(&driver->bridge_mutex);
-		return 0;
-	}
-
-	/* If the hsic (diag_bridge) platform device is not open */
-	if (driver->hsic_device_enabled) {
-		if (!driver->hsic_device_opened) {
-			err = diag_bridge_open(&hsic_diag_bridge_ops);
-			if (err) {
-				pr_err("diag: HSIC channel open error: %d\n",
-					err);
-			} else {
-				pr_debug("diag: opened HSIC channel\n");
-				driver->hsic_device_opened = 1;
-			}
-		} else {
-			pr_debug("diag: HSIC channel already open\n");
-		}
-
-		/*
-		 * Turn on communication over usb mdm and hsic, if the hsic
-		 * device driver is enabled and opened
-		 */
-		if (driver->hsic_device_opened) {
-			driver->hsic_ch = 1;
-
-			/* Poll USB mdm channel to check for data */
-			if (driver->logging_mode == USB_MODE)
-				queue_work(driver->diag_bridge_wq,
-						&driver->diag_read_mdm_work);
-
-			/* Poll HSIC channel to check for data */
-			queue_work(driver->diag_bridge_wq,
-					 &driver->diag_read_hsic_work);
-		}
-	} else {
-		/* The hsic device driver has not yet been enabled */
-		pr_info("diag: HSIC channel not yet enabled\n");
-	}
-
-	mutex_unlock(&driver->bridge_mutex);
-	return 0;
-}
-
-/*
- * diagfwd_disconnect_bridge is called when the USB mdm channel
- * is disconnected
- */
-int diagfwd_disconnect_bridge(int process_cable)
-{
-	pr_debug("diag: In %s, process_cable: %d\n", __func__, process_cable);
-
-	mutex_lock(&driver->bridge_mutex);
-
-	/* If the usb cable is being disconnected */
-	if (process_cable) {
-		driver->usb_mdm_connected = 0;
-		usb_diag_free_req(driver->mdm_ch);
-	}
-
-	if (driver->hsic_device_enabled &&
-		driver->logging_mode != MEMORY_DEVICE_MODE) {
-		driver->in_busy_hsic_read_on_device = 1;
-		driver->in_busy_hsic_write = 1;
-		/* Turn off communication over usb mdm and hsic */
-		diag_hsic_close();
-	} else if (driver->diag_smux_enabled &&
-		driver->logging_mode == USB_MODE) {
-		driver->in_busy_smux = 1;
-		driver->lcid = LCID_INVALID;
-		driver->smux_connected = 0;
-		/* Turn off communication over usb mdm and smux */
-		msm_smux_close(LCID_VALID);
-	}
-
-	mutex_unlock(&driver->bridge_mutex);
+	mutex_unlock(&diag_bridge[HSIC].bridge_mutex);
 	return 0;
 }
 
@@ -403,225 +301,128 @@
 		return 0;
 	}
 
-	/* Read data from the hsic */
-	queue_work(driver->diag_bridge_wq, &driver->diag_read_hsic_work);
+	/* Read data from the HSIC */
+	queue_work(diag_bridge[HSIC].wq, &driver->diag_read_hsic_work);
 
 	return 0;
 }
 
-/* Called after the asychronous usb_diag_read() on mdm channel is complete */
-static int diagfwd_read_complete_bridge(struct diag_request *diag_read_ptr)
+void diag_usb_read_complete_hsic_fn(struct work_struct *w)
 {
-	/* The read of the usb driver on the mdm (not hsic) has completed */
-	driver->in_busy_hsic_read_on_device = 0;
-	driver->read_len_mdm = diag_read_ptr->actual;
+	diagfwd_read_complete_bridge(diag_bridge[HSIC].usb_read_ptr);
+}
 
-	if (driver->diag_smux_enabled) {
-		diagfwd_read_complete_smux();
-		return 0;
-	}
-	/* If SMUX not enabled, check for HSIC */
+
+void diag_read_usb_hsic_work_fn(struct work_struct *work)
+{
 	if (!driver->hsic_ch) {
-		pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__);
-		return 0;
-	}
-
-	/*
-	 * The read of the usb driver on the mdm channel has completed.
-	 * If there is no write on the hsic in progress, check if the
-	 * read has data to pass on to the hsic. If so, pass the usb
-	 * mdm data on to the hsic.
-	 */
-	if (!driver->in_busy_hsic_write && driver->usb_buf_mdm_out &&
-		(driver->read_len_mdm > 0)) {
-
-		/*
-		 * Initiate the hsic write. The hsic write is
-		 * asynchronous. When complete the write
-		 * complete callback function will be called
-		 */
-		int err;
-		driver->in_busy_hsic_write = 1;
-		err = diag_bridge_write(driver->usb_buf_mdm_out,
-					driver->read_len_mdm);
-		if (err) {
-			pr_err_ratelimited("diag: mdm data on hsic write err: %d\n",
-					err);
-			/*
-			 * If the error is recoverable, then clear
-			 * the write flag, so we will resubmit a
-			 * write on the next frame.  Otherwise, don't
-			 * resubmit a write on the next frame.
-			 */
-			if ((-ENODEV) != err)
-				driver->in_busy_hsic_write = 0;
-		}
-	}
-
-	/*
-	 * If there is no write of the usb mdm data on the
-	 * hsic channel
-	 */
-	if (!driver->in_busy_hsic_write)
-		queue_work(driver->diag_bridge_wq, &driver->diag_read_mdm_work);
-
-	return 0;
-}
-
-static void diagfwd_bridge_notifier(void *priv, unsigned event,
-					struct diag_request *d_req)
-{
-	switch (event) {
-	case USB_DIAG_CONNECT:
-		diagfwd_connect_bridge(1);
-		break;
-	case USB_DIAG_DISCONNECT:
-		queue_work(driver->diag_bridge_wq,
-			 &driver->diag_disconnect_work);
-		break;
-	case USB_DIAG_READ_DONE:
-		queue_work(driver->diag_bridge_wq,
-				&driver->diag_usb_read_complete_work);
-		break;
-	case USB_DIAG_WRITE_DONE:
-		if (driver->hsic_device_enabled)
-			diagfwd_write_complete_hsic(d_req);
-		else if (driver->diag_smux_enabled)
-			diagfwd_write_complete_smux();
-		break;
-	default:
-		pr_err("diag: in %s: Unknown event from USB diag:%u\n",
-			__func__, event);
-		break;
-	}
-}
-
-static void diag_usb_read_complete_fn(struct work_struct *w)
-{
-	diagfwd_read_complete_bridge(driver->usb_read_mdm_ptr);
-}
-
-static void diag_disconnect_work_fn(struct work_struct *w)
-{
-	diagfwd_disconnect_bridge(1);
-}
-
-static void diag_read_mdm_work_fn(struct work_struct *work)
-{
-	int ret;
-	if (driver->diag_smux_enabled) {
-		if (driver->lcid && driver->usb_buf_mdm_out &&
-				(driver->read_len_mdm > 0) &&
-				driver->smux_connected) {
-			ret = msm_smux_write(driver->lcid,  NULL,
-		 driver->usb_buf_mdm_out, driver->read_len_mdm);
-			if (ret)
-				pr_err("diag: writing to SMUX ch, r = %d,"
-					"lcid = %d\n", ret, driver->lcid);
-		}
-		driver->usb_read_mdm_ptr->buf = driver->usb_buf_mdm_out;
-		driver->usb_read_mdm_ptr->length = USB_MAX_OUT_BUF;
-		usb_diag_read(driver->mdm_ch, driver->usb_read_mdm_ptr);
+		pr_err("diag: in %s: driver->hsic_ch == 0\n", __func__);
 		return;
 	}
-
-	/* if SMUX not enabled, check for HSIC */
-	if (!driver->hsic_ch) {
-		pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__);
-		return;
-	}
-
 	/*
 	 * If there is no data being read from the usb mdm channel
 	 * and there is no mdm channel data currently being written
-	 * to the hsic
+	 * to the HSIC
 	 */
 	if (!driver->in_busy_hsic_read_on_device &&
-				 !driver->in_busy_hsic_write) {
+	     !driver->in_busy_hsic_write) {
 		APPEND_DEBUG('x');
-
 		/* Setup the next read from usb mdm channel */
 		driver->in_busy_hsic_read_on_device = 1;
-		driver->usb_read_mdm_ptr->buf = driver->usb_buf_mdm_out;
-		driver->usb_read_mdm_ptr->length = USB_MAX_OUT_BUF;
-		usb_diag_read(driver->mdm_ch, driver->usb_read_mdm_ptr);
+		diag_bridge[HSIC].usb_read_ptr->buf =
+				 diag_bridge[HSIC].usb_buf_out;
+		diag_bridge[HSIC].usb_read_ptr->length = USB_MAX_OUT_BUF;
+		diag_bridge[HSIC].usb_read_ptr->context = (void *)HSIC;
+		usb_diag_read(diag_bridge[HSIC].ch,
+				 diag_bridge[HSIC].usb_read_ptr);
 		APPEND_DEBUG('y');
 	}
-
-	/*
-	 * If for some reason there was no mdm channel read initiated,
+	/* If for some reason there was no mdm channel read initiated,
 	 * queue up the reading of data from the mdm channel
 	 */
-	if (!driver->in_busy_hsic_read_on_device)
-		queue_work(driver->diag_bridge_wq,
-			 &driver->diag_read_mdm_work);
+
+	if (!driver->in_busy_hsic_read_on_device &&
+		(driver->logging_mode == USB_MODE))
+		queue_work(diag_bridge[HSIC].wq,
+			 &(diag_bridge[HSIC].diag_read_work));
 }
 
 static int diag_hsic_probe(struct platform_device *pdev)
 {
 	int err = 0;
+
 	pr_debug("diag: in %s\n", __func__);
+	mutex_lock(&diag_bridge[HSIC].bridge_mutex);
 	if (!driver->hsic_inited) {
+		spin_lock_init(&driver->hsic_spinlock);
+		driver->num_hsic_buf_tbl_entries = 0;
+		if (driver->hsic_buf_tbl == NULL)
+			driver->hsic_buf_tbl = kzalloc(NUM_HSIC_BUF_TBL_ENTRIES
+				* sizeof(struct diag_write_device), GFP_KERNEL);
+		if (driver->hsic_buf_tbl == NULL) {
+			mutex_unlock(&diag_bridge[HSIC].bridge_mutex);
+			return -ENOMEM;
+		}
+		driver->count_hsic_pool = 0;
+		driver->count_hsic_write_pool = 0;
+		driver->itemsize_hsic = READ_HSIC_BUF_SIZE;
+		driver->poolsize_hsic = N_MDM_WRITE;
+		driver->itemsize_hsic_write = sizeof(struct diag_request);
+		driver->poolsize_hsic_write = N_MDM_WRITE;
 		diagmem_hsic_init(driver);
 		INIT_WORK(&(driver->diag_read_hsic_work),
-					 diag_read_hsic_work_fn);
+			    diag_read_hsic_work_fn);
 		driver->hsic_inited = 1;
 	}
-
-	mutex_lock(&driver->bridge_mutex);
-
 	/*
 	 * The probe function was called after the usb was connected
 	 * on the legacy channel OR ODL is turned on. Communication over usb
-	 * mdm and hsic needs to be turned on.
+	 * mdm and HSIC needs to be turned on.
 	 */
-	if (driver->usb_mdm_connected || (driver->logging_mode ==
-							 MEMORY_DEVICE_MODE)) {
+	if (diag_bridge[HSIC].usb_connected || (driver->logging_mode ==
+						   MEMORY_DEVICE_MODE)) {
 		if (driver->hsic_device_opened) {
 			/* should not happen. close it before re-opening */
 			pr_warn("diag: HSIC channel already opened in probe\n");
 			diag_bridge_close();
 		}
-
 		err = diag_bridge_open(&hsic_diag_bridge_ops);
 		if (err) {
 			pr_err("diag: could not open HSIC, err: %d\n", err);
 			driver->hsic_device_opened = 0;
-			mutex_unlock(&driver->bridge_mutex);
+			mutex_unlock(&diag_bridge[HSIC].bridge_mutex);
 			return err;
 		}
 
 		pr_info("diag: opened HSIC channel\n");
 		driver->hsic_device_opened = 1;
 		driver->hsic_ch = 1;
-
 		driver->in_busy_hsic_read_on_device = 0;
 		driver->in_busy_hsic_write = 0;
 
-		if (driver->usb_mdm_connected) {
+		if (diag_bridge[HSIC].usb_connected) {
 			/* Poll USB mdm channel to check for data */
-			queue_work(driver->diag_bridge_wq,
-					 &driver->diag_read_mdm_work);
+			queue_work(diag_bridge[HSIC].wq,
+			     &diag_bridge[HSIC].diag_read_work);
 		}
-
 		/* Poll HSIC channel to check for data */
-		queue_work(driver->diag_bridge_wq,
-				 &driver->diag_read_hsic_work);
+		queue_work(diag_bridge[HSIC].wq,
+			  &driver->diag_read_hsic_work);
 	}
-
-	/* The hsic (diag_bridge) platform device driver is enabled */
+	/* The HSIC (diag_bridge) platform device driver is enabled */
 	driver->hsic_device_enabled = 1;
-	mutex_unlock(&driver->bridge_mutex);
+	mutex_unlock(&diag_bridge[HSIC].bridge_mutex);
 	return err;
 }
 
 static int diag_hsic_remove(struct platform_device *pdev)
 {
 	pr_debug("diag: %s called\n", __func__);
-	mutex_lock(&driver->bridge_mutex);
+	mutex_lock(&diag_bridge[HSIC].bridge_mutex);
 	diag_hsic_close();
 	driver->hsic_device_enabled = 0;
-	mutex_unlock(&driver->bridge_mutex);
+	mutex_unlock(&diag_bridge[HSIC].bridge_mutex);
+
 	return 0;
 }
 
@@ -642,7 +443,7 @@
 	.runtime_resume = diagfwd_hsic_runtime_resume,
 };
 
-static struct platform_driver msm_hsic_ch_driver = {
+struct platform_driver msm_hsic_ch_driver = {
 	.probe = diag_hsic_probe,
 	.remove = diag_hsic_remove,
 	.driver = {
@@ -651,112 +452,3 @@
 		   .pm   = &diagfwd_hsic_dev_pm_ops,
 		   },
 };
-
-void diagfwd_bridge_init(void)
-{
-	int ret;
-
-	pr_debug("diag: in %s\n", __func__);
-	driver->diag_bridge_wq = create_singlethread_workqueue(
-							"diag_bridge_wq");
-	driver->read_len_mdm = 0;
-	driver->write_len_mdm = 0;
-	driver->num_hsic_buf_tbl_entries = 0;
-	spin_lock_init(&driver->hsic_spinlock);
-	if (driver->usb_buf_mdm_out  == NULL)
-		driver->usb_buf_mdm_out = kzalloc(USB_MAX_OUT_BUF,
-							 GFP_KERNEL);
-	if (driver->usb_buf_mdm_out == NULL)
-		goto err;
-	/* Only used by smux move to smux probe function */
-	if (driver->write_ptr_mdm == NULL)
-		driver->write_ptr_mdm = kzalloc(
-		sizeof(struct diag_request), GFP_KERNEL);
-	if (driver->write_ptr_mdm == NULL)
-		goto err;
-	if (driver->usb_read_mdm_ptr == NULL)
-		driver->usb_read_mdm_ptr = kzalloc(
-		sizeof(struct diag_request), GFP_KERNEL);
-	if (driver->usb_read_mdm_ptr == NULL)
-		goto err;
-
-	if (driver->hsic_buf_tbl == NULL)
-		driver->hsic_buf_tbl = kzalloc(NUM_HSIC_BUF_TBL_ENTRIES *
-				sizeof(struct diag_write_device), GFP_KERNEL);
-	if (driver->hsic_buf_tbl == NULL)
-		goto err;
-
-	driver->count_hsic_pool = 0;
-	driver->count_hsic_write_pool = 0;
-
-	driver->itemsize_hsic = READ_HSIC_BUF_SIZE;
-	driver->poolsize_hsic = N_MDM_WRITE;
-	driver->itemsize_hsic_write = sizeof(struct diag_request);
-	driver->poolsize_hsic_write = N_MDM_WRITE;
-
-	mutex_init(&driver->bridge_mutex);
-#ifdef CONFIG_DIAG_OVER_USB
-	INIT_WORK(&(driver->diag_read_mdm_work), diag_read_mdm_work_fn);
-#endif
-	INIT_WORK(&(driver->diag_disconnect_work), diag_disconnect_work_fn);
-	INIT_WORK(&(driver->diag_usb_read_complete_work),
-			diag_usb_read_complete_fn);
-#ifdef CONFIG_DIAG_OVER_USB
-	driver->mdm_ch = usb_diag_open(DIAG_MDM, driver,
-						 diagfwd_bridge_notifier);
-	if (IS_ERR(driver->mdm_ch)) {
-		pr_err("diag: Unable to open USB diag MDM channel\n");
-		goto err;
-	}
-#endif
-	/* register HSIC device */
-	ret = platform_driver_register(&msm_hsic_ch_driver);
-	if (ret)
-		pr_err("diag: could not register HSIC device, ret: %d\n", ret);
-	/* register SMUX device */
-	ret = platform_driver_register(&msm_diagfwd_smux_driver);
-	if (ret)
-		pr_err("diag: could not register SMUX device, ret: %d\n", ret);
-
-	return;
-err:
-	pr_err("diag: Could not initialize for bridge forwarding\n");
-	kfree(driver->usb_buf_mdm_out);
-	kfree(driver->hsic_buf_tbl);
-	kfree(driver->write_ptr_mdm);
-	kfree(driver->usb_read_mdm_ptr);
-	if (driver->diag_bridge_wq)
-		destroy_workqueue(driver->diag_bridge_wq);
-
-	return;
-}
-
-void diagfwd_bridge_exit(void)
-{
-	pr_debug("diag: in %s\n", __func__);
-
-	if (driver->hsic_device_enabled) {
-		diag_hsic_close();
-		driver->hsic_device_enabled = 0;
-	}
-	driver->hsic_inited = 0;
-	diagmem_exit(driver, POOL_TYPE_ALL);
-	if (driver->diag_smux_enabled) {
-		driver->lcid = LCID_INVALID;
-		kfree(driver->buf_in_smux);
-		driver->diag_smux_enabled = 0;
-	}
-	platform_driver_unregister(&msm_hsic_ch_driver);
-	platform_driver_unregister(&msm_diagfwd_smux_driver);
-	/* destroy USB MDM specific variables */
-#ifdef CONFIG_DIAG_OVER_USB
-	if (driver->usb_mdm_connected)
-		usb_diag_free_req(driver->mdm_ch);
-	usb_diag_close(driver->mdm_ch);
-#endif
-	kfree(driver->usb_buf_mdm_out);
-	kfree(driver->hsic_buf_tbl);
-	kfree(driver->write_ptr_mdm);
-	kfree(driver->usb_read_mdm_ptr);
-	destroy_workqueue(driver->diag_bridge_wq);
-}
diff --git a/drivers/char/diag/diagfwd_hsic.h b/drivers/char/diag/diagfwd_hsic.h
index 19ed3c7..2190fff 100644
--- a/drivers/char/diag/diagfwd_hsic.h
+++ b/drivers/char/diag/diagfwd_hsic.h
@@ -17,14 +17,14 @@
 
 #define N_MDM_WRITE	8
 #define N_MDM_READ	1
-
 #define NUM_HSIC_BUF_TBL_ENTRIES N_MDM_WRITE
 
-int diagfwd_connect_bridge(int);
-int diagfwd_disconnect_bridge(int);
 int diagfwd_write_complete_hsic(struct diag_request *);
 int diagfwd_cancel_hsic(void);
-void diagfwd_bridge_init(void);
-void diagfwd_bridge_exit(void);
+void diag_read_usb_hsic_work_fn(struct work_struct *work);
+void diag_usb_read_complete_hsic_fn(struct work_struct *w);
+extern struct diag_bridge_ops hsic_diag_bridge_ops;
+extern struct platform_driver msm_hsic_ch_driver;
+void diag_hsic_close(void);
 
 #endif
diff --git a/drivers/char/diag/diagfwd_smux.c b/drivers/char/diag/diagfwd_smux.c
index ae90686..0a97baf 100644
--- a/drivers/char/diag/diagfwd_smux.c
+++ b/drivers/char/diag/diagfwd_smux.c
@@ -18,6 +18,8 @@
 #include "diagchar.h"
 #include "diagfwd.h"
 #include "diagfwd_smux.h"
+#include "diagfwd_hsic.h"
+#include "diagfwd_bridge.h"
 
 void diag_smux_event(void *priv, int event_type, const void *metadata)
 {
@@ -30,8 +32,8 @@
 		driver->smux_connected = 1;
 		driver->in_busy_smux = 0;
 		/* read data from USB MDM channel & Initiate first write */
-		queue_work(driver->diag_bridge_wq,
-				 &(driver->diag_read_mdm_work));
+		queue_work(diag_bridge[SMUX].wq,
+			   &diag_bridge[SMUX].diag_read_work);
 		break;
 	case SMUX_DISCONNECTED:
 		driver->smux_connected = 0;
@@ -67,7 +69,7 @@
 
 int diagfwd_read_complete_smux(void)
 {
-	queue_work(driver->diag_bridge_wq, &(driver->diag_read_mdm_work));
+	queue_work(diag_bridge[SMUX].wq, &diag_bridge[SMUX].diag_read_work);
 	return 0;
 }
 
@@ -85,6 +87,36 @@
 	return 0;
 }
 
+void diag_usb_read_complete_smux_fn(struct work_struct *w)
+{
+	diagfwd_read_complete_bridge(diag_bridge[SMUX].usb_read_ptr);
+}
+
+void diag_read_usb_smux_work_fn(struct work_struct *work)
+{
+	int ret;
+
+	if (driver->diag_smux_enabled) {
+		if (driver->lcid && diag_bridge[SMUX].usb_buf_out &&
+			(diag_bridge[SMUX].read_len > 0) &&
+				driver->smux_connected) {
+			ret = msm_smux_write(driver->lcid, NULL,
+			      diag_bridge[SMUX].usb_buf_out,
+				 diag_bridge[SMUX].read_len);
+			if (ret)
+				pr_err("diag: writing to SMUX ch, r = %d, lcid = %d\n",
+						 ret, driver->lcid);
+		}
+		diag_bridge[SMUX].usb_read_ptr->buf =
+					 diag_bridge[SMUX].usb_buf_out;
+		diag_bridge[SMUX].usb_read_ptr->length = USB_MAX_OUT_BUF;
+		diag_bridge[SMUX].usb_read_ptr->context = (void *)SMUX;
+		usb_diag_read(diag_bridge[SMUX].ch,
+					 diag_bridge[SMUX].usb_read_ptr);
+		return;
+	}
+}
+
 static int diagfwd_smux_runtime_suspend(struct device *dev)
 {
 	dev_dbg(dev, "pm_runtime: suspending...\n");
@@ -120,7 +152,7 @@
 		}
 	}
 	/* Poll USB channel to check for data*/
-	queue_work(driver->diag_bridge_wq, &(driver->diag_read_mdm_work));
+	queue_work(diag_bridge[SMUX].wq, &(diag_bridge[SMUX].diag_read_work));
 	return ret;
 }
 
@@ -142,6 +174,11 @@
 	 * if (ret)
 	 *	pr_err("diag: error setting SMUX ch option, r = %d\n", ret);
 	 */
+	if (driver->write_ptr_mdm == NULL)
+		driver->write_ptr_mdm = kzalloc(sizeof(struct diag_request),
+								 GFP_KERNEL);
+	if (driver->write_ptr_mdm == NULL)
+		goto err;
 	ret = diagfwd_connect_smux();
 	return ret;
 
diff --git a/drivers/char/diag/diagfwd_smux.h b/drivers/char/diag/diagfwd_smux.h
index e78b7ed..b45fd5d 100644
--- a/drivers/char/diag/diagfwd_smux.h
+++ b/drivers/char/diag/diagfwd_smux.h
@@ -20,6 +20,8 @@
 int diagfwd_read_complete_smux(void);
 int diagfwd_write_complete_smux(void);
 int diagfwd_connect_smux(void);
+void diag_usb_read_complete_smux_fn(struct work_struct *w);
+void diag_read_usb_smux_work_fn(struct work_struct *work);
 extern struct platform_driver msm_diagfwd_smux_driver;
 
 #endif
diff --git a/drivers/char/diag/diagmem.c b/drivers/char/diag/diagmem.c
index 1a522d5..ab1aa75 100644
--- a/drivers/char/diag/diagmem.c
+++ b/drivers/char/diag/diagmem.c
@@ -51,7 +51,7 @@
 				driver->diag_write_struct_pool, GFP_ATOMIC);
 			}
 		}
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 	} else if (pool_type == POOL_TYPE_HSIC) {
 		if (driver->diag_hsic_pool) {
 			if (driver->count_hsic_pool < driver->poolsize_hsic) {
@@ -105,7 +105,7 @@
 		} else if (driver->ref_count == 0 && pool_type == POOL_TYPE_ALL)
 			printk(KERN_ALERT "Unable to destroy STRUCT mempool");
 	}
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 	if (driver->diag_hsic_pool && (driver->hsic_inited == 0)) {
 		if (driver->count_hsic_pool == 0) {
 			mempool_destroy(driver->diag_hdlc_pool);
@@ -156,7 +156,7 @@
 			pr_err("diag: Attempt to free up DIAG driver "
 			   "USB structure mempool which is already free %d ",
 				    driver->count_write_struct_pool);
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 	} else if (pool_type == POOL_TYPE_HSIC) {
 		if (driver->diag_hsic_pool != NULL &&
 			driver->count_hsic_pool > 0) {
@@ -210,7 +210,7 @@
 		printk(KERN_INFO "Cannot allocate diag USB struct mempool\n");
 }
 
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 void diagmem_hsic_init(struct diagchar_dev *driver)
 {
 	if (driver->count_hsic_pool == 0)
diff --git a/drivers/char/diag/diagmem.h b/drivers/char/diag/diagmem.h
index 8665c75..36def72f 100644
--- a/drivers/char/diag/diagmem.h
+++ b/drivers/char/diag/diagmem.h
@@ -18,7 +18,7 @@
 void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type);
 void diagmem_init(struct diagchar_dev *driver);
 void diagmem_exit(struct diagchar_dev *driver, int pool_type);
-#ifdef CONFIG_DIAG_BRIDGE_CODE
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 void diagmem_hsic_init(struct diagchar_dev *driver);
 #endif
 #endif
diff --git a/drivers/char/hw_random/msm_rng.c b/drivers/char/hw_random/msm_rng.c
index 60ca44f..f2e5439 100644
--- a/drivers/char/hw_random/msm_rng.c
+++ b/drivers/char/hw_random/msm_rng.c
@@ -181,7 +181,12 @@
 	msm_rng_dev->base = base;
 
 	/* create a handle for clock control */
-	msm_rng_dev->prng_clk = clk_get(&pdev->dev, "core_clk");
+	if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node,
+					"qcom,msm-rng-iface-clk")))
+		msm_rng_dev->prng_clk = clk_get(&pdev->dev,
+							"iface_clk");
+	else
+		msm_rng_dev->prng_clk = clk_get(&pdev->dev, "core_clk");
 	if (IS_ERR(msm_rng_dev->prng_clk)) {
 		dev_err(&pdev->dev, "failed to register clock source\n");
 		error = -EPERM;
diff --git a/drivers/char/msm_rotator.c b/drivers/char/msm_rotator.c
index e1e3ff5..b3843fa 100644
--- a/drivers/char/msm_rotator.c
+++ b/drivers/char/msm_rotator.c
@@ -422,6 +422,7 @@
 	case MDP_Y_CRCB_H2V1:
 	case MDP_Y_CBCR_H2V1:
 	case MDP_Y_CRCB_H1V2:
+	case MDP_Y_CBCR_H1V2:
 		p->num_planes = 2;
 		p->plane_size[0] = w * h;
 		p->plane_size[1] = w * h;
@@ -470,8 +471,24 @@
 				  unsigned int out_chroma_paddr)
 {
 	int bpp;
-
-	if (info->src.format != info->dst.format)
+	uint32_t dst_format;
+	switch (info->src.format) {
+	case MDP_Y_CRCB_H2V1:
+		if (info->rotations & MDP_ROT_90)
+			dst_format = MDP_Y_CRCB_H1V2;
+		else
+			dst_format = info->src.format;
+		break;
+	case MDP_Y_CBCR_H2V1:
+		if (info->rotations & MDP_ROT_90)
+			dst_format = MDP_Y_CBCR_H1V2;
+		else
+			dst_format = info->src.format;
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (info->dst.format != dst_format)
 		return -EINVAL;
 
 	bpp = get_bpp(info->src.format);
@@ -1281,10 +1298,18 @@
 		is_rgb = 1;
 		info.dst.format = info.src.format;
 		break;
+	case MDP_Y_CBCR_H2V1:
+	if (info.rotations & MDP_ROT_90) {
+		info.dst.format = MDP_Y_CBCR_H1V2;
+		break;
+	}
+	case MDP_Y_CRCB_H2V1:
+	if (info.rotations & MDP_ROT_90) {
+		info.dst.format = MDP_Y_CRCB_H1V2;
+		break;
+	}
 	case MDP_Y_CBCR_H2V2:
 	case MDP_Y_CRCB_H2V2:
-	case MDP_Y_CBCR_H2V1:
-	case MDP_Y_CRCB_H2V1:
 	case MDP_YCBCR_H1V1:
 	case MDP_YCRCB_H1V1:
 		info.dst.format = info.src.format;
diff --git a/drivers/coresight/coresight-etm.c b/drivers/coresight/coresight-etm.c
index f3fe70f..9f96b19 100644
--- a/drivers/coresight/coresight-etm.c
+++ b/drivers/coresight/coresight-etm.c
@@ -234,6 +234,8 @@
 	uint32_t			timestamp_event;
 	bool				pcsave_impl;
 	bool				pcsave_enable;
+	bool				pcsave_sticky_enable;
+	bool				pcsave_boot_enable;
 };
 
 static struct etm_drvdata *etmdrvdata[NR_CPUS];
@@ -1516,7 +1518,7 @@
 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 }
 
-static int __etm_store_pcsave(struct etm_drvdata *drvdata, unsigned long val)
+static int ____etm_store_pcsave(struct etm_drvdata *drvdata, unsigned long val)
 {
 	int ret = 0;
 
@@ -1524,7 +1526,6 @@
 	if (ret)
 		return ret;
 
-	get_online_cpus();
 	spin_lock(&drvdata->spinlock);
 	if (val) {
 		if (drvdata->pcsave_enable)
@@ -1535,6 +1536,7 @@
 		if (ret)
 			goto out;
 		drvdata->pcsave_enable = true;
+		drvdata->pcsave_sticky_enable = true;
 
 		dev_info(drvdata->dev, "PC save enabled\n");
 	} else {
@@ -1551,12 +1553,22 @@
 	}
 out:
 	spin_unlock(&drvdata->spinlock);
-	put_online_cpus();
 
 	clk_disable_unprepare(drvdata->clk);
 	return ret;
 }
 
+static int __etm_store_pcsave(struct etm_drvdata *drvdata, unsigned long val)
+{
+	int ret;
+
+	get_online_cpus();
+	ret = ____etm_store_pcsave(drvdata, val);
+	put_online_cpus();
+
+	return ret;
+}
+
 static ssize_t etm_store_pcsave(struct device *dev,
 				struct device_attribute *attr,
 				const char *buf, size_t size)
@@ -1642,6 +1654,13 @@
 		}
 		break;
 
+	case CPU_ONLINE:
+		if (etmdrvdata[cpu] && etmdrvdata[cpu]->pcsave_boot_enable &&
+		    !etmdrvdata[cpu]->pcsave_sticky_enable) {
+			____etm_store_pcsave(etmdrvdata[cpu], 1);
+		}
+		break;
+
 	case CPU_DYING:
 		if (etmdrvdata[cpu] && etmdrvdata[cpu]->enable) {
 			spin_lock(&etmdrvdata[cpu]->spinlock);
@@ -1894,8 +1913,10 @@
 	if (boot_enable)
 		coresight_enable(drvdata->csdev);
 
-	if (drvdata->pcsave_impl && boot_pcsave_enable)
-		__etm_store_pcsave(drvdata, true);
+	if (drvdata->pcsave_impl && boot_pcsave_enable) {
+		__etm_store_pcsave(drvdata, 1);
+		drvdata->pcsave_boot_enable = true;
+	}
 
 	return 0;
 err2:
diff --git a/drivers/cpufreq/cpufreq_gov_msm.c b/drivers/cpufreq/cpufreq_gov_msm.c
index 6ddbf4e..8f086aa 100644
--- a/drivers/cpufreq/cpufreq_gov_msm.c
+++ b/drivers/cpufreq/cpufreq_gov_msm.c
@@ -253,6 +253,7 @@
 						msm_dcvs_freq_set,
 						msm_dcvs_freq_get,
 						msm_dcvs_idle_notifier,
+						NULL,
 						sensor);
 		if (gov->dcvs_core_id < 0) {
 			pr_err("Unable to register core for %d\n", cpu);
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 4361263..d605a61 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -46,7 +46,7 @@
 	uint32_t cnt;
 };
 static struct bam_registration_info bam_registry;
-
+static bool ce_bam_registered;
 /*
  * CE HW device structure.
  * Each engine has an instance of the structure.
@@ -250,7 +250,7 @@
 
 		pce = cmdlistinfo->go_proc;
 		if (i == authk_size_in_word) {
-			pce->addr = (uint32_t)(CRYPTO_GOPROC_OEM_KEY_REG +
+			pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
 							pce_dev->phy_iobase);
 		} else {
 			pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
@@ -434,7 +434,7 @@
 	pce = cmdlistinfo->go_proc;
 	if (i == enck_size_in_word) {
 		use_hw_key = true;
-		pce->addr = (uint32_t)(CRYPTO_GOPROC_OEM_KEY_REG +
+		pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
 						pce_dev->phy_iobase);
 	} else {
 		pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
@@ -1157,15 +1157,21 @@
 	pr_debug("bam virtual base=0x%x\n", (u32)bam.virt_addr);
 
 	mutex_lock(&bam_register_cnt);
+	if (ce_bam_registered == false) {
+		bam_registry.handle = 0;
+		bam_registry.cnt = 0;
+	}
 	if ((bam_registry.handle == 0) && (bam_registry.cnt == 0)) {
 		/* Register CE Peripheral BAM device to SPS driver */
 		rc = sps_register_bam_device(&bam, &bam_registry.handle);
 		if (rc) {
+			mutex_unlock(&bam_register_cnt);
 			pr_err("sps_register_bam_device() failed! err=%d", rc);
 			return -EIO;
 		}
 		bam_registry.cnt++;
 		register_bam = true;
+		ce_bam_registered = true;
 	} else {
 		   bam_registry.cnt++;
 	}
@@ -1191,9 +1197,14 @@
 sps_connect_consumer_err:
 	qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.producer);
 sps_connect_producer_err:
-	if (register_bam)
+	if (register_bam) {
+		mutex_lock(&bam_register_cnt);
 		sps_deregister_bam_device(pce_dev->ce_sps.bam_handle);
-
+		ce_bam_registered = false;
+		bam_registry.handle = 0;
+		bam_registry.cnt = 0;
+		mutex_unlock(&bam_register_cnt);
+	}
 	return rc;
 }
 
@@ -2787,21 +2798,5 @@
 }
 EXPORT_SYMBOL(qce_hw_support);
 
-static int __init qce_init(void)
-{
-	bam_registry.handle = 0;
-	bam_registry.cnt = 0;
-	return 0;
-}
-
-static void __exit qce_exit(void)
-{
-	bam_registry.handle = 0;
-	bam_registry.cnt = 0;
-}
-
-module_init(qce_init);
-module_exit(qce_exit);
-
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Crypto Engine driver");
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index 20f84d6..8699178 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -616,18 +616,15 @@
 	if (end < start)
 		goto out;
 
-	down_read(&mm->mmap_sem);
 	vma = find_vma(mm, start);
 	if (vma && vma->vm_start < end) {
 		if (start < vma->vm_start)
-			goto out_up;
+			goto out;
 		if (end > vma->vm_end)
-			goto out_up;
+			goto out;
 		ret = 0;
 	}
 
-out_up:
-	up_read(&mm->mmap_sem);
 out:
 	return ret;
 }
@@ -645,20 +642,12 @@
 		unsigned long start, end;
 		struct ion_handle *handle = NULL;
 		int ret;
+		struct mm_struct *mm = current->active_mm;
 
 		if (copy_from_user(&data, (void __user *)arg,
 					sizeof(struct ion_flush_data)))
 			return -EFAULT;
 
-		start = (unsigned long) data.vaddr;
-		end = (unsigned long) data.vaddr + data.length;
-
-		if (check_vaddr_bounds(start, end)) {
-			pr_err("%s: virtual address %p is out of bounds\n",
-				__func__, data.vaddr);
-			return -EINVAL;
-		}
-
 		if (!data.handle) {
 			handle = ion_import_dma_buf(client, data.fd);
 			if (IS_ERR(handle)) {
@@ -668,11 +657,27 @@
 			}
 		}
 
+		down_read(&mm->mmap_sem);
+
+		start = (unsigned long) data.vaddr;
+		end = (unsigned long) data.vaddr + data.length;
+
+		if (check_vaddr_bounds(start, end)) {
+			up_read(&mm->mmap_sem);
+			pr_err("%s: virtual address %p is out of bounds\n",
+				__func__, data.vaddr);
+			if (!data.handle)
+				ion_free(client, handle);
+			return -EINVAL;
+		}
+
 		ret = ion_do_cache_op(client,
 				data.handle ? data.handle : handle,
 				data.vaddr, data.offset, data.length,
 				cmd);
 
+		up_read(&mm->mmap_sem);
+
 		if (!data.handle)
 			ion_free(client, handle);
 
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 67cb34a..abaff20 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -253,6 +253,13 @@
 	if (result)
 		goto unmap_memstore_desc;
 
+	/*
+	 * Set the mpu end to the last "normal" global memory we use.
+	 * For the IOMMU, this will be used to restrict access to the
+	 * mapped registers.
+	 */
+	device->mh.mpu_range = device->mmu.setstate_memory.gpuaddr +
+				device->mmu.setstate_memory.size;
 	return result;
 
 unmap_memstore_desc:
@@ -1769,12 +1776,6 @@
 	return status;
 }
 
-static inline void adreno_poke(struct kgsl_device *device)
-{
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	adreno_regwrite(device, REG_CP_RB_WPTR, adreno_dev->ringbuffer.wptr);
-}
-
 static int adreno_ringbuffer_drain(struct kgsl_device *device,
 	unsigned int *regs)
 {
@@ -1795,12 +1796,8 @@
 
 	wait = jiffies + msecs_to_jiffies(100);
 
-	adreno_poke(device);
-
 	do {
 		if (time_after(jiffies, wait)) {
-			adreno_poke(device);
-
 			/* Check to see if the core is hung */
 			if (adreno_hang_detect(device, regs))
 				return -ETIMEDOUT;
@@ -2160,8 +2157,24 @@
 	if (!adreno_dev->fast_hang_detect)
 		return 0;
 
-	if (is_adreno_rbbm_status_idle(device))
+	if (is_adreno_rbbm_status_idle(device)) {
+
+		/*
+		 * On A2XX if the RPTR != WPTR and the device is idle, then
+		 * the last write to WPTR probably failed to latch so write it
+		 * again
+		 */
+
+		if (adreno_is_a2xx(adreno_dev)) {
+			unsigned int rptr;
+			adreno_regread(device, REG_CP_RB_RPTR, &rptr);
+			if (rptr != adreno_dev->ringbuffer.wptr)
+				adreno_regwrite(device, REG_CP_RB_WPTR,
+					adreno_dev->ringbuffer.wptr);
+		}
+
 		return 0;
+	}
 
 	for (i = 0; i < hang_detect_regs_count; i++) {
 
@@ -2179,178 +2192,225 @@
 	return hang_detected;
 }
 
-
-/* MUST be called with the device mutex held */
-static int adreno_waittimestamp(struct kgsl_device *device,
-				struct kgsl_context *context,
-				unsigned int timestamp,
-				unsigned int msecs)
+/**
+ * adreno_handle_hang - Process a hang detected in adreno_waittimestamp
+ * @device - pointer to a KGSL device structure
+ * @context - pointer to the active KGSL context
+ * @timestamp - the timestamp that the process was waiting for
+ *
+ * Process a possible GPU hang and try to recover from it cleanly
+ */
+static int adreno_handle_hang(struct kgsl_device *device,
+	struct kgsl_context *context, unsigned int timestamp)
 {
-	long status = 0;
-	uint io = 1;
-	static uint io_cnt;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
-	struct adreno_context *adreno_ctx = context ? context->devctxt : NULL;
-	int retries = 0;
-	unsigned int ts_issued;
 	unsigned int context_id = _get_context_id(context);
-	unsigned int time_elapsed = 0;
-	unsigned int prev_reg_val[hang_detect_regs_count];
-	unsigned int wait;
-	unsigned int retry_ts_cmp = 0;
-	unsigned int retry_ts_cmp_msecs = KGSL_SYNCOBJ_SERVER_TIMEOUT;
+	unsigned int ts_issued;
 
-	memset(prev_reg_val, 0, sizeof(prev_reg_val));
+	/* Do one last check to see if we somehow made it through */
+	if (kgsl_check_timestamp(device, context, timestamp))
+		return 0;
 
 	ts_issued = adreno_dev->ringbuffer.timestamp[context_id];
 
-	/* Don't wait forever, set a max value for now */
-	if (msecs == KGSL_TIMEOUT_DEFAULT)
-		msecs = adreno_dev->wait_timeout;
-
-	/*
-	 * With user generated ts, if this check fails perform this check
-	 * again after 'retry_ts_cmp_msecs' milliseconds.
-	 */
-	if (timestamp_cmp(timestamp, ts_issued) > 0) {
-		if (adreno_ctx == NULL ||
-			!(adreno_ctx->flags & CTXT_FLAGS_USER_GENERATED_TS)) {
-			if (context && !context->wait_on_invalid_ts) {
-				KGSL_DRV_ERR(device,
-				"Cannot wait for invalid ts <%d:0x%x>, "
-				"last issued ts <%d:0x%x>\n",
-				context_id, timestamp, context_id, ts_issued);
-				/*
-				 * Prevent the above message from spamming the
-				 * kernel logs and causing a watchdog
-				 */
-				context->wait_on_invalid_ts = true;
-			}
-			status = -EINVAL;
-			goto done;
-		} else
-			retry_ts_cmp = 1;
-	} else if (context && context->wait_on_invalid_ts) {
-		/* Once we wait for a valid ts reset the invalid wait flag */
-		context->wait_on_invalid_ts = false;
-	}
-
-	/*
-	 * Make the first timeout interval 100 msecs and then try to kick the
-	 * wptr again.  This helps to ensure the wptr is updated properly.  If
-	 * the requested timeout is less than 100 msecs, then wait 20msecs which
-	 * is the minimum amount of time we can safely wait at 100HZ
-	 */
-
-	if (msecs == 0 || msecs >= 100)
-		wait = 100;
-	else
-		wait = 20;
-
-	do {
-		/*
-		 * If the context ID is invalid, we are in a race with
-		 * the context being destroyed by userspace so bail.
-		 */
-		if (context_id == KGSL_CONTEXT_INVALID) {
-			KGSL_DRV_WARN(device, "context was detached");
-			status = -EINVAL;
-			goto done;
-		}
-		if (kgsl_check_timestamp(device, context, timestamp)) {
-			/* if the timestamp happens while we're not
-			 * waiting, there's a chance that an interrupt
-			 * will not be generated and thus the timestamp
-			 * work needs to be queued.
-			 */
-			queue_work(device->work_queue, &device->ts_expired_ws);
-			status = 0;
-			goto done;
-		}
-		adreno_poke(device);
-		io_cnt = (io_cnt + 1) % 100;
-		if (io_cnt <
-		    pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
-			io = 0;
-
-		if ((retries > 0) &&
-			(adreno_hang_detect(device, prev_reg_val)))
-			goto hang_dump;
-
-		mutex_unlock(&device->mutex);
-		/* We need to make sure that the process is
-		 * placed in wait-q before its condition is called
-		 */
-		status = kgsl_wait_event_interruptible_timeout(
-				device->wait_queue,
-				kgsl_check_interrupt_timestamp(device,
-					context, timestamp),
-				msecs_to_jiffies(wait), io);
-
-		mutex_lock(&device->mutex);
-
-		if (status > 0) {
-			/*completed before the wait finished */
-			status = 0;
-			goto done;
-		} else if (status < 0) {
-			/*an error occurred*/
-			goto done;
-		}
-		/*this wait timed out*/
-
-		time_elapsed += wait;
-		wait = KGSL_TIMEOUT_PART;
-
-		if (!retry_ts_cmp)
-			retries++;
-		else if (time_elapsed >= retry_ts_cmp_msecs) {
-			ts_issued =
-				adreno_dev->ringbuffer.timestamp[context_id];
-			if (timestamp_cmp(timestamp, ts_issued) > 0) {
-				if (context && !context->wait_on_invalid_ts) {
-					KGSL_DRV_ERR(device,
-					"Cannot wait for user-generated ts <%d:0x%x>, "
-					"not submitted within server timeout period. "
-					"last issued ts <%d:0x%x>\n",
-					context_id, timestamp, context_id,
-					ts_issued);
-					context->wait_on_invalid_ts = true;
-				}
-				status = -EINVAL;
-				goto done;
-			} else if (context && context->wait_on_invalid_ts) {
-				context->wait_on_invalid_ts = false;
-			}
-			retry_ts_cmp = 0;
-		}
-
-	} while (!msecs || time_elapsed < msecs);
-
-hang_dump:
-	/*
-	 * Check if timestamp has retired here because we may have hit
-	 * recovery which can take some time and cause waiting threads
-	 * to timeout
-	 */
-	if (kgsl_check_timestamp(device, context, timestamp))
-		goto done;
-	status = -ETIMEDOUT;
 	KGSL_DRV_ERR(device,
 		     "Device hang detected while waiting for timestamp: "
 		     "<%d:0x%x>, last submitted timestamp: <%d:0x%x>, "
 		     "wptr: 0x%x\n",
 		      context_id, timestamp, context_id, ts_issued,
 		      adreno_dev->ringbuffer.wptr);
-	if (!adreno_dump_and_recover(device)) {
-		/* The timestamp that this process wanted
-		 * to wait on may be invalid or expired now
-		 * after successful recovery */
-			status = 0;
+
+	/* Return 0 after a successful recovery */
+	if (!adreno_dump_and_recover(device))
+		return 0;
+
+	return -ETIMEDOUT;
+}
+
+static int _check_pending_timestamp(struct kgsl_device *device,
+		struct kgsl_context *context, unsigned int timestamp)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	unsigned int context_id = _get_context_id(context);
+	unsigned int ts_issued;
+
+	if (context_id == KGSL_CONTEXT_INVALID)
+		return -EINVAL;
+
+	ts_issued = adreno_dev->ringbuffer.timestamp[context_id];
+
+	if (timestamp_cmp(timestamp, ts_issued) <= 0)
+		return 0;
+
+	if (context && !context->wait_on_invalid_ts) {
+		KGSL_DRV_ERR(device, "Cannot wait for invalid ts <%d:0x%x>, last issued ts <%d:0x%x>\n",
+			context_id, timestamp, context_id, ts_issued);
+
+			/* Only print this message once */
+			context->wait_on_invalid_ts = true;
 	}
-done:
-	return (int)status;
+
+	return -EINVAL;
+}
+
+/**
+ * adreno_waittimestamp - sleep while waiting for the specified timestamp
+ * @device - pointer to a KGSL device structure
+ * @context - pointer to the active kgsl context
+ * @timestamp - GPU timestamp to wait for
+ * @msecs - amount of time to wait (in milliseconds)
+ *
+ * Wait 'msecs' milliseconds for the specified timestamp to expire. Wake up
+ * every KGSL_TIMEOUT_PART milliseconds to check for a device hang and process
+ * one if it happened.  Otherwise, spend most of our time in an interruptible
+ * wait for the timestamp interrupt to be processed.  This function must be
+ * called with the mutex already held.
+ */
+static int adreno_waittimestamp(struct kgsl_device *device,
+				struct kgsl_context *context,
+				unsigned int timestamp,
+				unsigned int msecs)
+{
+	static unsigned int io_cnt;
+	struct adreno_context *adreno_ctx = context ? context->devctxt : NULL;
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	unsigned int context_id = _get_context_id(context);
+	unsigned int prev_reg_val[hang_detect_regs_count];
+	unsigned int time_elapsed = 0;
+	unsigned int wait;
+	int ts_compare = 1;
+	int io, ret = -ETIMEDOUT;
+
+	/* Get out early if the context has already been destroyed */
+
+	if (context_id == KGSL_CONTEXT_INVALID) {
+		KGSL_DRV_WARN(device, "context was detached");
+		return -EINVAL;
+	}
+
+	/*
+	 * Check to see if the requested timestamp is "newer" then the last
+	 * timestamp issued. If it is complain once and return error.  Only
+	 * print the message once per context so that badly behaving
+	 * applications don't spam the logs
+	 */
+
+	if (adreno_ctx && !(adreno_ctx->flags & CTXT_FLAGS_USER_GENERATED_TS)) {
+		if (_check_pending_timestamp(device, context, timestamp))
+			return -EINVAL;
+
+		/* Reset the invalid timestamp flag on a valid wait */
+		context->wait_on_invalid_ts = false;
+	}
+
+
+	/* Clear the registers used for hang detection */
+	memset(prev_reg_val, 0, sizeof(prev_reg_val));
+
+	/*
+	 * On the first time through the loop only wait 100ms.
+	 * this gives enough time for the engine to start moving and oddly
+	 * provides better hang detection results than just going the full
+	 * KGSL_TIMEOUT_PART right off the bat. The exception to this rule
+	 * is if msecs happens to be < 100ms then just use the full timeout
+	 */
+
+	wait = 100;
+
+	do {
+		long status;
+
+		if (wait > (msecs - time_elapsed))
+			wait = msecs - time_elapsed;
+
+		/*
+		 * if the timestamp happens while we're not
+		 * waiting, there's a chance that an interrupt
+		 * will not be generated and thus the timestamp
+		 * work needs to be queued.
+		 */
+
+		if (kgsl_check_timestamp(device, context, timestamp)) {
+			queue_work(device->work_queue, &device->ts_expired_ws);
+			ret = 0;
+			break;
+		}
+
+		/* Check to see if the GPU is hung */
+		if (adreno_hang_detect(device, prev_reg_val)) {
+			ret = adreno_handle_hang(device, context, timestamp);
+			break;
+		}
+
+		/*
+		 * For proper power accounting sometimes we need to call
+		 * io_wait_interruptible_timeout and sometimes we need to call
+		 * plain old wait_interruptible_timeout. We call the regular
+		 * timeout N times out of 100, where N is a number specified by
+		 * the current power level
+		 */
+
+		io_cnt = (io_cnt + 1) % 100;
+		io = (io_cnt < pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
+			? 0 : 1;
+
+		mutex_unlock(&device->mutex);
+
+		/* Wait for a timestamp event */
+		status = kgsl_wait_event_interruptible_timeout(
+			device->wait_queue,
+			kgsl_check_interrupt_timestamp(device, context,
+				timestamp), msecs_to_jiffies(wait), io);
+
+		mutex_lock(&device->mutex);
+
+		/*
+		 * If status is non zero then either the condition was satisfied
+		 * or there was an error.  In either event, this is the end of
+		 * the line for us
+		 */
+
+		if (status != 0) {
+			ret = (status > 0) ? 0 : (int) status;
+			break;
+		}
+
+		time_elapsed += wait;
+
+		/* If user specified timestamps are being used, wait at least
+		 * KGSL_SYNCOBJ_SERVER_TIMEOUT msecs for the user driver to
+		 * issue a IB for a timestamp before checking to see if the
+		 * current timestamp we are waiting for is valid or not
+		 */
+
+		if (ts_compare && (adreno_ctx &&
+			(adreno_ctx->flags & CTXT_FLAGS_USER_GENERATED_TS))) {
+			if (time_elapsed > KGSL_SYNCOBJ_SERVER_TIMEOUT) {
+				ret = _check_pending_timestamp(device, context,
+					timestamp);
+				if (ret)
+					break;
+
+				/* Don't do this check again */
+				ts_compare = 0;
+
+				/*
+				 * Reset the invalid timestamp flag on a valid
+				 * wait
+				 */
+				context->wait_on_invalid_ts = false;
+			}
+		}
+
+		/*
+		 * all subsequent trips through the loop wait the full
+		 * KGSL_TIMEOUT_PART interval
+		 */
+		wait = KGSL_TIMEOUT_PART;
+
+	} while (!msecs || time_elapsed < msecs);
+
+	return ret;
 }
 
 static unsigned int adreno_readtimestamp(struct kgsl_device *device,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 9648f27..27343c5 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -555,6 +555,9 @@
 		total_sizedwords += 4; /* global timestamp for recovery*/
 	}
 
+	if (adreno_is_a20x(adreno_dev))
+		total_sizedwords += 2; /* CACHE_FLUSH */
+
 	ringcmds = adreno_ringbuffer_allocspace(rb, context, total_sizedwords);
 	if (!ringcmds) {
 		/*
@@ -672,6 +675,12 @@
 				rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
 	}
 
+	if (adreno_is_a20x(adreno_dev)) {
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			cp_type3_packet(CP_EVENT_WRITE, 1));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH);
+	}
+
 	if (context) {
 		/* Conditional execution based on memory values */
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index b8adbe67..c040bf3 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -193,6 +193,52 @@
 }
 EXPORT_SYMBOL(kgsl_cancel_events);
 
+int kgsl_memfree_hist_init(void)
+{
+	void *base;
+
+	base = kzalloc(KGSL_MEMFREE_HIST_SIZE, GFP_KERNEL);
+	kgsl_driver.memfree_hist.base_hist_rb = base;
+	if (base == NULL)
+		return -ENOMEM;
+	kgsl_driver.memfree_hist.size = KGSL_MEMFREE_HIST_SIZE;
+	kgsl_driver.memfree_hist.wptr = base;
+	return 0;
+}
+
+void kgsl_memfree_hist_exit(void)
+{
+	kfree(kgsl_driver.memfree_hist.base_hist_rb);
+	kgsl_driver.memfree_hist.base_hist_rb = NULL;
+}
+
+void kgsl_memfree_hist_set_event(unsigned int pid, unsigned int gpuaddr,
+			unsigned int size, int flags)
+{
+	struct kgsl_memfree_hist_elem *p;
+
+	void *base = kgsl_driver.memfree_hist.base_hist_rb;
+	int rbsize = kgsl_driver.memfree_hist.size;
+
+	if (base == NULL)
+		return;
+
+	mutex_lock(&kgsl_driver.memfree_hist_mutex);
+	p = kgsl_driver.memfree_hist.wptr;
+	p->pid = pid;
+	p->gpuaddr = gpuaddr;
+	p->size = size;
+	p->flags = flags;
+
+	kgsl_driver.memfree_hist.wptr++;
+	if ((void *)kgsl_driver.memfree_hist.wptr >= base+rbsize) {
+		kgsl_driver.memfree_hist.wptr =
+			(struct kgsl_memfree_hist_elem *)base;
+	}
+	mutex_unlock(&kgsl_driver.memfree_hist_mutex);
+}
+
+
 /* kgsl_get_mem_entry - get the mem_entry structure for the specified object
  * @device - Pointer to the device structure
  * @ptbase - the pagetable base of the object
@@ -853,13 +899,6 @@
 	dev_priv->device = device;
 	filep->private_data = dev_priv;
 
-	/* Get file (per process) private struct */
-	dev_priv->process_priv = kgsl_get_process_private(dev_priv);
-	if (dev_priv->process_priv ==  NULL) {
-		result = -ENOMEM;
-		goto err_freedevpriv;
-	}
-
 	mutex_lock(&device->mutex);
 	kgsl_check_suspended(device);
 
@@ -871,21 +910,38 @@
 
 		if (result) {
 			mutex_unlock(&device->mutex);
-			goto err_putprocess;
+			goto err_freedevpriv;
 		}
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
 	}
 	device->open_count++;
 	mutex_unlock(&device->mutex);
 
+	/*
+	 * Get file (per process) private struct. This must be done
+	 * after the first start so that the global pagetable mappings
+	 * are set up before we create the per-process pagetable.
+	 */
+	dev_priv->process_priv = kgsl_get_process_private(dev_priv);
+	if (dev_priv->process_priv ==  NULL) {
+		result = -ENOMEM;
+		goto err_stop;
+	}
+
 	KGSL_DRV_INFO(device, "Initialized %s: mmu=%s pagetable_count=%d\n",
 		device->name, kgsl_mmu_enabled() ? "on" : "off",
 		kgsl_pagetable_count);
 
 	return result;
 
-err_putprocess:
-	kgsl_put_process_private(device, dev_priv->process_priv);
+err_stop:
+	mutex_lock(&device->mutex);
+	device->open_count--;
+	if (device->open_count == 0) {
+		result = device->ftbl->stop(device);
+		kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+	}
+	mutex_unlock(&device->mutex);
 err_freedevpriv:
 	filep->private_data = NULL;
 	kfree(dev_priv);
@@ -1349,6 +1405,13 @@
 
 	if (entry) {
 		trace_kgsl_mem_free(entry);
+
+		kgsl_memfree_hist_set_event(
+			entry->priv->pid,
+			entry->memdesc.gpuaddr,
+			entry->memdesc.size,
+			entry->memdesc.flags);
+
 		kgsl_mem_entry_detach_process(entry);
 	} else {
 		KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
@@ -2328,6 +2391,8 @@
 	.process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
 	.ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
 	.devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
+	.memfree_hist_mutex =
+		__MUTEX_INITIALIZER(kgsl_driver.memfree_hist_mutex),
 };
 EXPORT_SYMBOL(kgsl_driver);
 
@@ -2658,6 +2723,7 @@
 		kgsl_driver.class = NULL;
 	}
 
+	kgsl_memfree_hist_exit();
 	unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
 }
 
@@ -2729,6 +2795,9 @@
 			goto err;
 	}
 
+	if (kgsl_memfree_hist_init())
+		KGSL_CORE_ERR("failed to init memfree_hist");
+
 	return 0;
 
 err:
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 17a5b67..d22cb6d 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -71,6 +71,23 @@
 #define KGSL_STATS_ADD(_size, _stat, _max) \
 	do { _stat += (_size); if (_stat > _max) _max = _stat; } while (0)
 
+
+#define KGSL_MEMFREE_HIST_SIZE	((int)(PAGE_SIZE * 2))
+
+struct kgsl_memfree_hist_elem {
+	unsigned int pid;
+	unsigned int gpuaddr;
+	unsigned int size;
+	unsigned int flags;
+};
+
+struct kgsl_memfree_hist {
+	void *base_hist_rb;
+	unsigned int size;
+	struct kgsl_memfree_hist_elem *wptr;
+};
+
+
 struct kgsl_device;
 
 struct kgsl_driver {
@@ -98,6 +115,9 @@
 
 	void *ptpool;
 
+	struct mutex memfree_hist_mutex;
+	struct kgsl_memfree_hist memfree_hist;
+
 	struct {
 		unsigned int vmalloc;
 		unsigned int vmalloc_max;
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index 52097dc..07a5ff4 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -106,6 +106,52 @@
 KGSL_DEBUGFS_LOG(mem_log);
 KGSL_DEBUGFS_LOG(pwr_log);
 
+static int memfree_hist_print(struct seq_file *s, void *unused)
+{
+	void *base = kgsl_driver.memfree_hist.base_hist_rb;
+
+	struct kgsl_memfree_hist_elem *wptr = kgsl_driver.memfree_hist.wptr;
+	struct kgsl_memfree_hist_elem *p;
+	char str[16];
+
+	seq_printf(s, "%8s %8s %8s %11s\n",
+			"pid", "gpuaddr", "size", "flags");
+
+	mutex_lock(&kgsl_driver.memfree_hist_mutex);
+	p = wptr;
+	for (;;) {
+		kgsl_get_memory_usage(str, sizeof(str), p->flags);
+		/*
+		 * if the ring buffer is not filled up yet
+		 * all its empty elems have size==0
+		 * just skip them ...
+		*/
+		if (p->size)
+			seq_printf(s, "%8d %08x %8d %11s\n",
+				p->pid, p->gpuaddr, p->size, str);
+		p++;
+		if ((void *)p >= base + kgsl_driver.memfree_hist.size)
+			p = (struct kgsl_memfree_hist_elem *) base;
+
+		if (p == kgsl_driver.memfree_hist.wptr)
+			break;
+	}
+	mutex_unlock(&kgsl_driver.memfree_hist_mutex);
+	return 0;
+}
+
+static int memfree_hist_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, memfree_hist_print, inode->i_private);
+}
+
+static const struct file_operations memfree_hist_fops = {
+	.open = memfree_hist_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
 void kgsl_device_debugfs_init(struct kgsl_device *device)
 {
 	if (kgsl_debugfs_dir && !IS_ERR(kgsl_debugfs_dir))
@@ -131,6 +177,8 @@
 				&mem_log_fops);
 	debugfs_create_file("log_level_pwr", 0644, device->d_debugfs, device,
 				&pwr_log_fops);
+	debugfs_create_file("memfree_history", 0444, device->d_debugfs, device,
+				&memfree_hist_fops);
 
 	/* Create postmortem dump control files */
 
@@ -190,7 +238,7 @@
 		entry = rb_entry(node, struct kgsl_mem_entry, node);
 		m = &entry->memdesc;
 
-		flags[0] = m->priv & KGSL_MEMDESC_GLOBAL ?  'g' : '-';
+		flags[0] = kgsl_memdesc_is_global(m) ?  'g' : '-';
 		flags[1] = m->flags & KGSL_MEMFLAGS_GPUREADONLY ? 'r' : '-';
 		flags[2] = get_alignflag(m);
 		flags[3] = '\0';
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 07ea48e..1bccd4d 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -702,6 +702,70 @@
 	}
 }
 
+/*
+ * kgsl_iommu_setup_regs - map iommu registers into a pagetable
+ * @mmu: Pointer to mmu structure
+ * @pt: the pagetable
+ *
+ * To do pagetable switches from the GPU command stream, the IOMMU
+ * registers need to be mapped into the GPU's pagetable. This function
+ * is used differently on different targets. On 8960, the registers
+ * are mapped into every pagetable during kgsl_setup_pt(). On
+ * all other targets, the registers are mapped only into the second
+ * context bank.
+ *
+ * Return - 0 on success else error code
+ */
+static int kgsl_iommu_setup_regs(struct kgsl_mmu *mmu,
+				    struct kgsl_pagetable *pt)
+{
+	int status;
+	int i = 0;
+	struct kgsl_iommu *iommu = mmu->priv;
+
+	if (!msm_soc_version_supports_iommu_v1())
+		return 0;
+
+	for (i = 0; i < iommu->unit_count; i++) {
+		iommu->iommu_units[i].reg_map.priv |= KGSL_MEMDESC_GLOBAL;
+		status = kgsl_mmu_map(pt,
+				&(iommu->iommu_units[i].reg_map),
+				GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+		if (status) {
+			iommu->iommu_units[i].reg_map.priv &=
+				~KGSL_MEMDESC_GLOBAL;
+			goto err;
+		}
+	}
+	return 0;
+err:
+	for (i--; i >= 0; i--) {
+		kgsl_mmu_unmap(pt,
+				&(iommu->iommu_units[i].reg_map));
+		iommu->iommu_units[i].reg_map.priv &= ~KGSL_MEMDESC_GLOBAL;
+	}
+	return status;
+}
+
+/*
+ * kgsl_iommu_cleanup_regs - unmap iommu registers from a pagetable
+ * @mmu: Pointer to mmu structure
+ * @pt: the pagetable
+ *
+ * Removes mappings created by kgsl_iommu_setup_regs().
+ *
+ * Return - 0 on success else error code
+ */
+static void kgsl_iommu_cleanup_regs(struct kgsl_mmu *mmu,
+					struct kgsl_pagetable *pt)
+{
+	struct kgsl_iommu *iommu = mmu->priv;
+	int i;
+	for (i = 0; i < iommu->unit_count; i++)
+		kgsl_mmu_unmap(pt, &(iommu->iommu_units[i].reg_map));
+}
+
+
 static int kgsl_iommu_init(struct kgsl_mmu *mmu)
 {
 	/*
@@ -744,6 +808,15 @@
 				KGSL_IOMMU_SETSTATE_NOP_OFFSET,
 				cp_nop_packet(1));
 
+	if (cpu_is_msm8960()) {
+		/*
+		 * 8960 doesn't have a second context bank, so the IOMMU
+		 * registers must be mapped into every pagetable.
+		 */
+		iommu_ops.mmu_setup_pt = kgsl_iommu_setup_regs;
+		iommu_ops.mmu_cleanup_pt = kgsl_iommu_cleanup_regs;
+	}
+
 	dev_info(mmu->device->dev, "|%s| MMU type set for device is IOMMU\n",
 			__func__);
 done:
@@ -766,9 +839,6 @@
 static int kgsl_iommu_setup_defaultpagetable(struct kgsl_mmu *mmu)
 {
 	int status = 0;
-	int i = 0;
-	struct kgsl_iommu *iommu = mmu->priv;
-	struct kgsl_pagetable *pagetable = NULL;
 
 	/* If chip is not 8960 then we use the 2nd context bank for pagetable
 	 * switching on the 3D side for which a separate table is allocated */
@@ -779,6 +849,9 @@
 			status = -ENOMEM;
 			goto err;
 		}
+		status = kgsl_iommu_setup_regs(mmu, mmu->priv_bank_table);
+		if (status)
+			goto err;
 	}
 	mmu->defaultpagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
 	/* Return error if the default pagetable doesn't exist */
@@ -786,31 +859,10 @@
 		status = -ENOMEM;
 		goto err;
 	}
-	pagetable = mmu->priv_bank_table ? mmu->priv_bank_table :
-				mmu->defaultpagetable;
-	/* Map the IOMMU regsiters to only defaultpagetable */
-	if (msm_soc_version_supports_iommu_v1()) {
-		for (i = 0; i < iommu->unit_count; i++) {
-			iommu->iommu_units[i].reg_map.priv |=
-						KGSL_MEMDESC_GLOBAL;
-			status = kgsl_mmu_map(pagetable,
-				&(iommu->iommu_units[i].reg_map),
-				GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
-			if (status) {
-				iommu->iommu_units[i].reg_map.priv &=
-							~KGSL_MEMDESC_GLOBAL;
-				goto err;
-			}
-		}
-	}
 	return status;
 err:
-	for (i--; i >= 0; i--) {
-		kgsl_mmu_unmap(pagetable,
-				&(iommu->iommu_units[i].reg_map));
-		iommu->iommu_units[i].reg_map.priv &= ~KGSL_MEMDESC_GLOBAL;
-	}
 	if (mmu->priv_bank_table) {
+		kgsl_iommu_cleanup_regs(mmu, mmu->priv_bank_table);
 		kgsl_mmu_putpagetable(mmu->priv_bank_table);
 		mmu->priv_bank_table = NULL;
 	}
@@ -839,10 +891,12 @@
 	 * a225, hence we still keep the MMU active on 8960 */
 	if (cpu_is_msm8960()) {
 		struct kgsl_mh *mh = &(mmu->device->mh);
+		BUG_ON(iommu->iommu_units[0].reg_map.gpuaddr != 0 &&
+			mh->mpu_base > iommu->iommu_units[0].reg_map.gpuaddr);
 		kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000001);
+
 		kgsl_regwrite(mmu->device, MH_MMU_MPU_END,
-			mh->mpu_base +
-			iommu->iommu_units[0].reg_map.gpuaddr);
+			mh->mpu_base + mh->mpu_range);
 	} else {
 		kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
 	}
@@ -915,14 +969,12 @@
 			"with err: %d\n", iommu_pt->domain, gpuaddr,
 			range, ret);
 
-#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
 	/*
 	 * Flushing only required if per process pagetables are used. With
 	 * global case, flushing will happen inside iommu_map function
 	 */
-	if (!ret && msm_soc_version_supports_iommu_v1())
+	if (!ret && kgsl_mmu_is_perprocess())
 		*tlb_flags = UINT_MAX;
-#endif
 	return 0;
 }
 
@@ -1003,22 +1055,23 @@
 {
 	struct kgsl_iommu *iommu = mmu->priv;
 	int i;
-	for (i = 0; i < iommu->unit_count; i++) {
-		struct kgsl_pagetable *pagetable = (mmu->priv_bank_table ?
-			mmu->priv_bank_table : mmu->defaultpagetable);
-		if (iommu->iommu_units[i].reg_map.gpuaddr)
-			kgsl_mmu_unmap(pagetable,
-			&(iommu->iommu_units[i].reg_map));
-		if (iommu->iommu_units[i].reg_map.hostptr)
-			iounmap(iommu->iommu_units[i].reg_map.hostptr);
-		kgsl_sg_free(iommu->iommu_units[i].reg_map.sg,
-				iommu->iommu_units[i].reg_map.sglen);
+
+	if (mmu->priv_bank_table != NULL) {
+		kgsl_iommu_cleanup_regs(mmu, mmu->priv_bank_table);
+		kgsl_mmu_putpagetable(mmu->priv_bank_table);
 	}
 
-	if (mmu->priv_bank_table)
-		kgsl_mmu_putpagetable(mmu->priv_bank_table);
-	if (mmu->defaultpagetable)
+	if (mmu->defaultpagetable != NULL)
 		kgsl_mmu_putpagetable(mmu->defaultpagetable);
+
+	for (i = 0; i < iommu->unit_count; i++) {
+		struct kgsl_memdesc *reg_map = &iommu->iommu_units[i].reg_map;
+
+		if (reg_map->hostptr)
+			iounmap(reg_map->hostptr);
+		kgsl_sg_free(reg_map->sg, reg_map->sglen);
+	}
+
 	kfree(iommu);
 
 	return 0;
@@ -1149,6 +1202,9 @@
 	.mmu_get_num_iommu_units = kgsl_iommu_get_num_iommu_units,
 	.mmu_pt_equal = kgsl_iommu_pt_equal,
 	.mmu_get_pt_base_addr = kgsl_iommu_get_pt_base_addr,
+	/* These callbacks will be set on some chipsets */
+	.mmu_setup_pt = NULL,
+	.mmu_cleanup_pt = NULL,
 };
 
 struct kgsl_mmu_pt_ops iommu_pt_ops = {
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 68cd167..6fe119d 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -36,17 +36,18 @@
 static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
 {
 	int i;
-	/* For IOMMU only unmap the global structures to global pt */
-	if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
-		(KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
-		(KGSL_MMU_GLOBAL_PT !=  pt->name) &&
-		(KGSL_MMU_PRIV_BANK_TABLE_NAME !=  pt->name))
-		return 0;
+	struct kgsl_device *device;
+
 	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
-		struct kgsl_device *device = kgsl_driver.devp[i];
+		device = kgsl_driver.devp[i];
 		if (device)
 			device->ftbl->cleanup_pt(device, pt);
 	}
+	/* Only the 3d device needs mmu specific pt entries */
+	device = kgsl_driver.devp[KGSL_DEVICE_3D0];
+	if (device->mmu.mmu_ops->mmu_cleanup_pt != NULL)
+		device->mmu.mmu_ops->mmu_cleanup_pt(&device->mmu, pt);
+
 	return 0;
 }
 
@@ -55,21 +56,23 @@
 {
 	int i = 0;
 	int status = 0;
+	struct kgsl_device *device;
 
-	/* For IOMMU only map the global structures to global pt */
-	if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
-		(KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
-		(KGSL_MMU_GLOBAL_PT !=  pt->name) &&
-		(KGSL_MMU_PRIV_BANK_TABLE_NAME !=  pt->name))
-		return 0;
 	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
-		struct kgsl_device *device = kgsl_driver.devp[i];
+		device = kgsl_driver.devp[i];
 		if (device) {
 			status = device->ftbl->setup_pt(device, pt);
 			if (status)
 				goto error_pt;
 		}
 	}
+	/* Only the 3d device needs mmu specific pt entries */
+	device = kgsl_driver.devp[KGSL_DEVICE_3D0];
+	if (device->mmu.mmu_ops->mmu_setup_pt != NULL) {
+		status = device->mmu.mmu_ops->mmu_setup_pt(&device->mmu, pt);
+		if (status)
+			goto error_pt;
+	}
 	return status;
 error_pt:
 	while (i >= 0) {
@@ -309,22 +312,6 @@
 	return ret;
 }
 
-unsigned int kgsl_mmu_get_ptsize(void)
-{
-	/*
-	 * For IOMMU, we could do up to 4G virtual range if we wanted to, but
-	 * it makes more sense to return a smaller range and leave the rest of
-	 * the virtual range for future improvements
-	 */
-
-	if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
-		return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
-	else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
-		return SZ_2G - KGSL_PAGETABLE_BASE;
-	else
-		return 0;
-}
-
 int
 kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu, unsigned int pt_base)
 {
@@ -480,7 +467,7 @@
 		goto err_kgsl_pool;
 	}
 
-	if (gen_pool_add(pagetable->pool, KGSL_PAGETABLE_BASE,
+	if (gen_pool_add(pagetable->pool, kgsl_mmu_get_base_addr(),
 				ptsize, -1)) {
 		KGSL_CORE_ERR("gen_pool_add failed\n");
 		goto err_pool;
@@ -528,11 +515,7 @@
 	if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
 		return (void *)(-1);
 
-#ifndef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
-	name = KGSL_MMU_GLOBAL_PT;
-#endif
-	/* We presently do not support per-process for IOMMU-v2 */
-	if (!msm_soc_version_supports_iommu_v1())
+	if (!kgsl_mmu_is_perprocess())
 		name = KGSL_MMU_GLOBAL_PT;
 
 	pt = kgsl_get_pagetable(name);
@@ -589,15 +572,6 @@
 	 */
 }
 
-static inline struct gen_pool *
-_get_pool(struct kgsl_pagetable *pagetable, unsigned int flags)
-{
-	if (pagetable->kgsl_pool &&
-		(KGSL_MEMDESC_GLOBAL & flags))
-		return pagetable->kgsl_pool;
-	return pagetable->pool;
-}
-
 int
 kgsl_mmu_map(struct kgsl_pagetable *pagetable,
 				struct kgsl_memdesc *memdesc,
@@ -628,28 +602,48 @@
 
 	size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
 
-	/* Allocate from kgsl pool if it exists for global mappings */
-	pool = _get_pool(pagetable, memdesc->priv);
+	pool = pagetable->pool;
 
-	/* Allocate aligned virtual addresses for iommu. This allows
-	 * more efficient pagetable entries if the physical memory
-	 * is also aligned. Don't do this for GPUMMU, because
-	 * the address space is so small.
-	 */
-	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype() &&
-	    kgsl_memdesc_get_align(memdesc) > 0)
-		page_align = kgsl_memdesc_get_align(memdesc);
-
-	memdesc->gpuaddr = gen_pool_alloc_aligned(pool, size, page_align);
-	if (memdesc->gpuaddr == 0) {
-		KGSL_CORE_ERR("gen_pool_alloc(%d) failed from pool: %s\n",
-			size,
-			(pool == pagetable->kgsl_pool) ?
-			"kgsl_pool" : "general_pool");
-		KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
-				pagetable->name, pagetable->stats.mapped,
-				pagetable->stats.entries);
-		return -ENOMEM;
+	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+		/* Allocate aligned virtual addresses for iommu. This allows
+		 * more efficient pagetable entries if the physical memory
+		 * is also aligned. Don't do this for GPUMMU, because
+		 * the address space is so small.
+		 */
+		if (kgsl_memdesc_get_align(memdesc) > 0)
+			page_align = kgsl_memdesc_get_align(memdesc);
+		if (kgsl_memdesc_is_global(memdesc)) {
+			/*
+			 * Only the default pagetable has a kgsl_pool, and
+			 * it is responsible for creating the mapping for
+			 * each global buffer. The mapping will be reused
+			 * in all other pagetables and it must already exist
+			 * when we're creating other pagetables which do not
+			 * have a kgsl_pool.
+			 */
+			pool = pagetable->kgsl_pool;
+			if (pool == NULL && memdesc->gpuaddr == 0) {
+				KGSL_CORE_ERR(
+				  "No address for global mapping into pt %d\n",
+				  pagetable->name);
+				return -EINVAL;
+			}
+		}
+	}
+	if (pool) {
+		memdesc->gpuaddr = gen_pool_alloc_aligned(pool, size,
+							  page_align);
+		if (memdesc->gpuaddr == 0) {
+			KGSL_CORE_ERR("gen_pool_alloc(%d) failed, pool: %s\n",
+					size,
+					(pool == pagetable->kgsl_pool) ?
+					"kgsl_pool" : "general_pool");
+			KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
+					pagetable->name,
+					pagetable->stats.mapped,
+					pagetable->stats.entries);
+			return -ENOMEM;
+		}
 	}
 
 	if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
@@ -676,7 +670,8 @@
 
 err_free_gpuaddr:
 	spin_unlock(&pagetable->lock);
-	gen_pool_free(pool, memdesc->gpuaddr, size);
+	if (pool)
+		gen_pool_free(pool, memdesc->gpuaddr, size);
 	memdesc->gpuaddr = 0;
 	return ret;
 }
@@ -711,14 +706,20 @@
 
 	spin_unlock(&pagetable->lock);
 
-	pool = _get_pool(pagetable, memdesc->priv);
-	gen_pool_free(pool, memdesc->gpuaddr, size);
+	pool = pagetable->pool;
+
+	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()
+		&& kgsl_memdesc_is_global(memdesc)) {
+		pool = pagetable->kgsl_pool;
+	}
+	if (pool)
+		gen_pool_free(pool, memdesc->gpuaddr, size);
 
 	/*
 	 * Don't clear the gpuaddr on global mappings because they
 	 * may be in use by other pagetables
 	 */
-	if (!(memdesc->priv & KGSL_MEMDESC_GLOBAL))
+	if (!kgsl_memdesc_is_global(memdesc))
 		memdesc->gpuaddr = 0;
 	return 0;
 }
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index b8b9149..b8eff60 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -13,13 +13,14 @@
 #ifndef __KGSL_MMU_H
 #define __KGSL_MMU_H
 
+#include <mach/iommu.h>
+
 /*
- * These defines control the split between ttbr1 and ttbr0 pagetables of IOMMU
- * and what ranges of memory we map to them
+ * These defines control the address range for allocations that
+ * are mapped into all pagetables.
  */
 #define KGSL_IOMMU_GLOBAL_MEM_BASE	0xC0000000
 #define KGSL_IOMMU_GLOBAL_MEM_SIZE	SZ_4M
-#define KGSL_IOMMU_TTBR1_SPLIT		2
 
 #define KGSL_MMU_ALIGN_SHIFT    13
 #define KGSL_MMU_ALIGN_MASK     (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
@@ -148,6 +149,10 @@
 	unsigned int (*mmu_get_pt_base_addr)
 			(struct kgsl_mmu *mmu,
 			struct kgsl_pagetable *pt);
+	int (*mmu_setup_pt) (struct kgsl_mmu *mmu,
+			struct kgsl_pagetable *pt);
+	void (*mmu_cleanup_pt) (struct kgsl_mmu *mmu,
+			struct kgsl_pagetable *pt);
 };
 
 struct kgsl_mmu_pt_ops {
@@ -209,7 +214,6 @@
 int kgsl_mmu_enabled(void);
 void kgsl_mmu_set_mmutype(char *mmutype);
 enum kgsl_mmutype kgsl_mmu_get_mmutype(void);
-unsigned int kgsl_mmu_get_ptsize(void);
 int kgsl_mmu_gpuaddr_in_range(unsigned int gpuaddr);
 
 /*
@@ -321,4 +325,58 @@
 		return 0;
 }
 
+/*
+ * kgsl_mmu_is_perprocess() - Runtime check for per-process
+ * pagetables.
+ *
+ * Returns non-zero if per-process pagetables are enabled,
+ * 0 if not.
+ */
+#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
+static inline int kgsl_mmu_is_perprocess(void)
+{
+
+	/* We presently do not support per-process for IOMMU-v2 */
+	return (kgsl_mmu_get_mmutype() != KGSL_MMU_TYPE_IOMMU)
+		|| msm_soc_version_supports_iommu_v1();
+}
+#else
+static inline int kgsl_mmu_is_perprocess(void)
+{
+	return 0;
+}
+#endif
+
+/*
+ * kgsl_mmu_base_addr() - Get gpu virtual address base.
+ *
+ * Returns the start address of the gpu
+ * virtual address space.
+ */
+static inline unsigned int kgsl_mmu_get_base_addr(void)
+{
+	return KGSL_PAGETABLE_BASE;
+}
+
+/*
+ * kgsl_mmu_get_ptsize() - Get gpu pagetable size
+ *
+ * Returns the usable size of the gpu address space.
+ */
+static inline unsigned int kgsl_mmu_get_ptsize(void)
+{
+	/*
+	 * For IOMMU, we could do up to 4G virtual range if we wanted to, but
+	 * it makes more sense to return a smaller range and leave the rest of
+	 * the virtual range for future improvements
+	 */
+	enum kgsl_mmutype mmu_type = kgsl_mmu_get_mmutype();
+
+	if (KGSL_MMU_TYPE_GPU == mmu_type)
+		return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
+	else if (KGSL_MMU_TYPE_IOMMU == mmu_type)
+		return SZ_2G;
+	return 0;
+}
+
 #endif /* __KGSL_MMU_H */
diff --git a/drivers/gpu/msm/kgsl_pwrscale_msm.c b/drivers/gpu/msm/kgsl_pwrscale_msm.c
index b302bee..c680d57 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_msm.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_msm.c
@@ -23,6 +23,8 @@
 	struct kgsl_device		*device;
 	int				enabled;
 	unsigned int			cur_freq;
+	unsigned int			req_level;
+	int				floor_level;
 	struct msm_dcvs_core_info	*core_info;
 	int				gpu_busy;
 	int				dcvs_core_id;
@@ -69,7 +71,39 @@
 		return 0;
 
 	mutex_lock(&device->mutex);
-	kgsl_pwrctrl_pwrlevel_change(device, i);
+	priv->req_level = i;
+	if (priv->req_level <= priv->floor_level) {
+		kgsl_pwrctrl_pwrlevel_change(device, priv->req_level);
+		priv->cur_freq = pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq;
+	}
+	mutex_unlock(&device->mutex);
+
+	/* return current frequency in kHz */
+	return priv->cur_freq / 1000;
+}
+
+static int msm_set_min_freq(int core_num, unsigned int freq)
+{
+	int i, delta = 5000000;
+	struct msm_priv *priv = the_msm_priv;
+	struct kgsl_device *device = priv->device;
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+	/* msm_dcvs manager uses frequencies in kHz */
+	freq *= 1000;
+	for (i = 0; i < pwr->num_pwrlevels; i++)
+		if (abs(pwr->pwrlevels[i].gpu_freq - freq) < delta)
+			break;
+	if (i == pwr->num_pwrlevels)
+		return 0;
+
+	mutex_lock(&device->mutex);
+	priv->floor_level = i;
+	if (priv->floor_level <= priv->req_level)
+		kgsl_pwrctrl_pwrlevel_change(device, priv->floor_level);
+	else if (priv->floor_level > priv->req_level)
+		kgsl_pwrctrl_pwrlevel_change(device, priv->req_level);
+
 	priv->cur_freq = pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq;
 	mutex_unlock(&device->mutex);
 
@@ -170,6 +204,7 @@
 
 		priv->core_info = pdata->core_info;
 		tbl = priv->core_info->freq_tbl;
+		priv->floor_level = pwr->num_pwrlevels - 1;
 		/* Fill in frequency table from low to high, reversing order. */
 		low_level = pwr->num_pwrlevels - KGSL_PWRLEVEL_LAST_OFFSET;
 		for (i = 0; i <= low_level; i++)
@@ -180,6 +215,7 @@
 				0,
 				priv->core_info,
 				msm_set_freq, msm_get_freq, msm_idle_enable,
+				msm_set_min_freq,
 				priv->core_info->sensors[0]);
 		if (priv->dcvs_core_id < 0) {
 			KGSL_PWR_ERR(device, "msm_dcvs_register_core failed");
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 92a6f27..53e88be 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -157,6 +157,17 @@
 	return 0;
 }
 
+/*
+ * kgsl_memdesc_is_global - is this a globally mapped buffer?
+ * @memdesc: the memdesc
+ *
+ * Returns nonzero if this is a global mapping, 0 otherwise
+ */
+static inline int kgsl_memdesc_is_global(const struct kgsl_memdesc *memdesc)
+{
+	return (memdesc->priv & KGSL_MEMDESC_GLOBAL) != 0;
+}
+
 static inline int
 kgsl_allocate(struct kgsl_memdesc *memdesc,
 		struct kgsl_pagetable *pagetable, size_t size)
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
index 712bc60..258dcfa 100644
--- a/drivers/gpu/msm/z180.c
+++ b/drivers/gpu/msm/z180.c
@@ -260,6 +260,13 @@
 				     GSL_PT_PAGE_RV);
 	if (result)
 		goto error_unmap_memstore;
+	/*
+	 * Set the mpu end to the last "normal" global memory we use.
+	 * For the IOMMU, this will be used to restrict access to the
+	 * mapped registers.
+	 */
+	device->mh.mpu_range = z180_dev->ringbuffer.cmdbufdesc.gpuaddr +
+				z180_dev->ringbuffer.cmdbufdesc.size;
 	return result;
 
 error_unmap_dummy:
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
index a384103..e319813 100644
--- a/drivers/hwmon/qpnp-adc-common.c
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -299,81 +299,83 @@
 	{419,		128000}
 };
 
+/* Voltage to temperature */
 static const struct qpnp_vadc_map_pt adcmap_100k_104ef_104fb[] = {
-	{-40,	1758},
-	{-35,	1742},
-	{-30,	1719},
-	{-25,	1691},
-	{-20,	1654},
-	{-15,	1608},
-	{-10,	1551},
-	{-5,	1483},
-	{0,	1404},
-	{5,	1315},
-	{10,	1218},
-	{15,	1114},
-	{20,	1007},
-	{25,	900},
-	{30,	795},
-	{35,	696},
-	{40,	605},
-	{45,	522},
-	{50,	448},
-	{55,	383},
-	{60,	327},
-	{65,	278},
-	{70,	237},
-	{75,	202},
-	{80,	172},
-	{85,	146},
-	{90,	125},
-	{95,	107},
-	{100,	92},
-	{105,	79},
-	{110,	68},
-	{115,	59},
-	{120,	51},
-	{125,	44}
+	{1758,	-40},
+	{1742,	-35},
+	{1719,	-30},
+	{1691,	-25},
+	{1654,	-20},
+	{1608,	-15},
+	{1551,	-10},
+	{1483,	-5},
+	{1404,	0},
+	{1315,	5},
+	{1218,	10},
+	{1114,	15},
+	{1007,	20},
+	{900,	25},
+	{795,	30},
+	{696,	35},
+	{605,	40},
+	{522,	45},
+	{448,	50},
+	{383,	55},
+	{327,	60},
+	{278,	65},
+	{237,	70},
+	{202,	75},
+	{172,	80},
+	{146,	85},
+	{125,	90},
+	{107,	95},
+	{92,	100},
+	{79,	105},
+	{68,	110},
+	{59,	115},
+	{51,	120},
+	{44,	125}
 };
 
+/* Voltage to temperature */
 static const struct qpnp_vadc_map_pt adcmap_150k_104ef_104fb[] = {
-	{-40,	1738},
-	{-35,	1714},
-	{-30,	1682},
-	{-25,	1641},
-	{-20,	1589},
-	{-15,	1526},
-	{-10,	1451},
-	{-5,	1363},
-	{0,	1266},
-	{5,	1159},
-	{10,	1048},
-	{15,	936},
-	{20,	825},
-	{25,	720},
-	{30,	622},
-	{35,	533},
-	{40,	454},
-	{45,	385},
-	{50,	326},
-	{55,	275},
-	{60,	232},
-	{65,	195},
-	{70,	165},
-	{75,	139},
-	{80,	118},
-	{85,	100},
-	{90,	85},
-	{95,	73},
-	{100,	62},
-	{105,	53},
-	{110,	46},
-	{115,	40},
-	{120,	34},
-	{125,	30}
+	{1738,	-40},
+	{1714,	-35},
+	{1682,	-30},
+	{1641,	-25},
+	{1589,	-20},
+	{1526,	-15},
+	{1451,	-10},
+	{1363,	-5},
+	{1266,	0},
+	{1159,	5},
+	{1048,	10},
+	{936,	15},
+	{825,	20},
+	{720,	25},
+	{622,	30},
+	{533,	35},
+	{454,	40},
+	{385,	45},
+	{326,	50},
+	{275,	55},
+	{232,	60},
+	{195,	65},
+	{165,	70},
+	{139,	75},
+	{118,	80},
+	{100,	85},
+	{85,	90},
+	{73,	95},
+	{62,	100},
+	{53,	105},
+	{46,	110},
+	{40,	115},
+	{34,	120},
+	{30,	125}
 };
 
-static int32_t qpnp_adc_map_linear(const struct qpnp_vadc_map_pt *pts,
+static int32_t qpnp_adc_map_voltage_temp(const struct qpnp_vadc_map_pt *pts,
 		uint32_t tablesize, int32_t input, int64_t *output)
 {
 	bool descending = 1;
@@ -419,7 +421,7 @@
 	return 0;
 }
 
-static int32_t qpnp_adc_map_batt_therm(const struct qpnp_vadc_map_pt *pts,
+static int32_t qpnp_adc_map_temp_voltage(const struct qpnp_vadc_map_pt *pts,
 		uint32_t tablesize, int32_t input, int64_t *output)
 {
 	bool descending = 1;
@@ -552,7 +554,7 @@
 	xo_thm = qpnp_adc_scale_ratiometric_calib(adc_code,
 			adc_properties, chan_properties);
 	xo_thm <<= 4;
-	qpnp_adc_map_linear(adcmap_ntcg_104ef_104fb,
+	qpnp_adc_map_voltage_temp(adcmap_ntcg_104ef_104fb,
 		ARRAY_SIZE(adcmap_ntcg_104ef_104fb),
 		xo_thm, &adc_chan_result->physical);
 
@@ -570,7 +572,7 @@
 	bat_voltage = qpnp_adc_scale_ratiometric_calib(adc_code,
 			adc_properties, chan_properties);
 
-	return qpnp_adc_map_batt_therm(
+	return qpnp_adc_map_temp_voltage(
 			adcmap_btm_threshold,
 			ARRAY_SIZE(adcmap_btm_threshold),
 			bat_voltage,
@@ -588,7 +590,7 @@
 	therm_voltage = qpnp_adc_scale_ratiometric_calib(adc_code,
 			adc_properties, chan_properties);
 
-	qpnp_adc_map_linear(adcmap_150k_104ef_104fb,
+	qpnp_adc_map_voltage_temp(adcmap_150k_104ef_104fb,
 		ARRAY_SIZE(adcmap_150k_104ef_104fb),
 		therm_voltage, &adc_chan_result->physical);
 
@@ -606,7 +608,7 @@
 	therm_voltage = qpnp_adc_scale_ratiometric_calib(adc_code,
 			adc_properties, chan_properties);
 
-	qpnp_adc_map_linear(adcmap_100k_104ef_104fb,
+	qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb,
 		ARRAY_SIZE(adcmap_100k_104ef_104fb),
 		therm_voltage, &adc_chan_result->physical);
 
diff --git a/drivers/input/misc/lis3dh_acc.c b/drivers/input/misc/lis3dh_acc.c
index af96d3f..cc4ee9f 100644
--- a/drivers/input/misc/lis3dh_acc.c
+++ b/drivers/input/misc/lis3dh_acc.c
@@ -1086,26 +1086,26 @@
 
 static struct device_attribute attributes[] = {
 
-	__ATTR(pollrate_ms, 0666, attr_get_polling_rate,
+	__ATTR(pollrate_ms, 0664, attr_get_polling_rate,
 			attr_set_polling_rate),
-	__ATTR(range, 0666, attr_get_range, attr_set_range),
-	__ATTR(enable, 0666, attr_get_enable, attr_set_enable),
-	__ATTR(int1_config, 0666, attr_get_intconfig1, attr_set_intconfig1),
-	__ATTR(int1_duration, 0666, attr_get_duration1, attr_set_duration1),
-	__ATTR(int1_threshold, 0666, attr_get_thresh1, attr_set_thresh1),
+	__ATTR(range, 0664, attr_get_range, attr_set_range),
+	__ATTR(enable, 0664, attr_get_enable, attr_set_enable),
+	__ATTR(int1_config, 0664, attr_get_intconfig1, attr_set_intconfig1),
+	__ATTR(int1_duration, 0664, attr_get_duration1, attr_set_duration1),
+	__ATTR(int1_threshold, 0664, attr_get_thresh1, attr_set_thresh1),
 	__ATTR(int1_source, 0444, attr_get_source1, NULL),
-	__ATTR(click_config, 0666, attr_get_click_cfg, attr_set_click_cfg),
+	__ATTR(click_config, 0664, attr_get_click_cfg, attr_set_click_cfg),
 	__ATTR(click_source, 0444, attr_get_click_source, NULL),
-	__ATTR(click_threshold, 0666, attr_get_click_ths, attr_set_click_ths),
-	__ATTR(click_timelimit, 0666, attr_get_click_tlim,
+	__ATTR(click_threshold, 0664, attr_get_click_ths, attr_set_click_ths),
+	__ATTR(click_timelimit, 0664, attr_get_click_tlim,
 			attr_set_click_tlim),
-	__ATTR(click_timelatency, 0666, attr_get_click_tlat,
+	__ATTR(click_timelatency, 0664, attr_get_click_tlat,
 							attr_set_click_tlat),
-	__ATTR(click_timewindow, 0666, attr_get_click_tw, attr_set_click_tw),
+	__ATTR(click_timewindow, 0664, attr_get_click_tw, attr_set_click_tw),
 
 #ifdef DEBUG
-	__ATTR(reg_value, 0666, attr_reg_get, attr_reg_set),
-	__ATTR(reg_addr, 0222, NULL, attr_addr_set),
+	__ATTR(reg_value, 0664, attr_reg_get, attr_reg_set),
+	__ATTR(reg_addr, 0220, NULL, attr_addr_set),
 #endif
 };
 
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index 04a7598..db6f93c 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -288,7 +288,7 @@
 
 static struct device_attribute attributes[] = {
 
-	__ATTR(pollrate_ms, 0666,
+	__ATTR(pollrate_ms, 0664,
 		mpu3050_attr_get_polling_rate,
 		mpu3050_attr_set_polling_rate),
 };
diff --git a/drivers/iommu/msm_iommu-v2.c b/drivers/iommu/msm_iommu-v2.c
index 9d88fdd..425eb8a 100644
--- a/drivers/iommu/msm_iommu-v2.c
+++ b/drivers/iommu/msm_iommu-v2.c
@@ -148,6 +148,9 @@
 	return ret;
 }
 
+/*
+ * May only be called for non-secure iommus
+ */
 static void __reset_iommu(void __iomem *base)
 {
 	int i, smt_size;
@@ -170,6 +173,9 @@
 	mb();
 }
 
+/*
+ * May only be called for non-secure iommus
+ */
 static void __program_iommu(void __iomem *base,
 			    struct msm_iommu_bfb_settings *bfb_settings)
 {
@@ -223,14 +229,14 @@
 
 static void __program_context(void __iomem *base, int ctx, int ncb,
 				phys_addr_t pgtable, int redirect,
-				u32 *sids, int len)
+				u32 *sids, int len, bool is_secure)
 {
 	unsigned int prrr, nmrr;
 	unsigned int pn;
 	int i, j, found, num = 0, smt_size;
 
 	__reset_context(base, ctx);
-	smt_size = GET_IDR0_NUMSMRG(base);
+
 	pn = pgtable >> CB_TTBR0_ADDR_SHIFT;
 	SET_TTBCR(base, ctx, 0);
 	SET_CB_TTBR0_ADDR(base, ctx, pn);
@@ -266,41 +272,44 @@
 		SET_CB_TTBR0_RGN(base, ctx, 1);   /* WB, WA */
 	}
 
-	/* Program the M2V tables for this context */
-	for (i = 0; i < len / sizeof(*sids); i++) {
-		for (; num < smt_size; num++)
-			if (GET_SMR_VALID(base, num) == 0)
-				break;
-		BUG_ON(num >= smt_size);
+	if (!is_secure) {
+		smt_size = GET_IDR0_NUMSMRG(base);
+		/* Program the M2V tables for this context */
+		for (i = 0; i < len / sizeof(*sids); i++) {
+			for (; num < smt_size; num++)
+				if (GET_SMR_VALID(base, num) == 0)
+					break;
+			BUG_ON(num >= smt_size);
 
-		SET_SMR_VALID(base, num, 1);
-		SET_SMR_MASK(base, num, 0);
-		SET_SMR_ID(base, num, sids[i]);
+			SET_SMR_VALID(base, num, 1);
+			SET_SMR_MASK(base, num, 0);
+			SET_SMR_ID(base, num, sids[i]);
 
-		SET_S2CR_N(base, num, 0);
-		SET_S2CR_CBNDX(base, num, ctx);
-		SET_S2CR_MEMATTR(base, num, 0x0A);
-		/* Set security bit override to be Non-secure */
-		SET_S2CR_NSCFG(base, num, 3);
+			SET_S2CR_N(base, num, 0);
+			SET_S2CR_CBNDX(base, num, ctx);
+			SET_S2CR_MEMATTR(base, num, 0x0A);
+			/* Set security bit override to be Non-secure */
+			SET_S2CR_NSCFG(base, num, 3);
+		}
+		SET_CBAR_N(base, ctx, 0);
+
+		/* Stage 1 Context with Stage 2 bypass */
+		SET_CBAR_TYPE(base, ctx, 1);
+
+		/* Route page faults to the non-secure interrupt */
+		SET_CBAR_IRPTNDX(base, ctx, 1);
+
+		/* Set VMID to non-secure HLOS */
+		SET_CBAR_VMID(base, ctx, 3);
+
+		/* Bypass is treated as inner-shareable */
+		SET_CBAR_BPSHCFG(base, ctx, 2);
+
+		/* Do not downgrade memory attributes */
+		SET_CBAR_MEMATTR(base, ctx, 0x0A);
+
 	}
 
-	SET_CBAR_N(base, ctx, 0);
-
-	/* Stage 1 Context with Stage 2 bypass */
-	SET_CBAR_TYPE(base, ctx, 1);
-
-	/* Route page faults to the non-secure interrupt */
-	SET_CBAR_IRPTNDX(base, ctx, 1);
-
-	/* Set VMID to non-secure HLOS */
-	SET_CBAR_VMID(base, ctx, 3);
-
-	/* Bypass is treated as inner-shareable */
-	SET_CBAR_BPSHCFG(base, ctx, 2);
-
-	/* Do not downgrade memory attributes */
-	SET_CBAR_MEMATTR(base, ctx, 0x0A);
-
        /* Find if this page table is used elsewhere, and re-use ASID */
 	found = 0;
 	for (i = 0; i < ncb; i++)
@@ -399,6 +408,7 @@
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 	struct msm_iommu_ctx_drvdata *tmp_drvdata;
 	int ret;
+	int is_secure;
 
 	mutex_lock(&msm_iommu_lock);
 
@@ -426,6 +436,8 @@
 			goto fail;
 		}
 
+	is_secure = iommu_drvdata->sec_id != -1;
+
 	ret = regulator_enable(iommu_drvdata->gdsc);
 	if (ret)
 		goto fail;
@@ -436,13 +448,25 @@
 		goto fail;
 	}
 
-	if (!msm_iommu_ctx_attached(dev->parent))
-		__program_iommu(iommu_drvdata->base,
+	if (!msm_iommu_ctx_attached(dev->parent)) {
+		if (!is_secure) {
+			__program_iommu(iommu_drvdata->base,
 				iommu_drvdata->bfb_settings);
+		} else {
+			ret = msm_iommu_sec_program_iommu(
+				iommu_drvdata->sec_id);
+			if (ret) {
+				regulator_disable(iommu_drvdata->gdsc);
+				__disable_clocks(iommu_drvdata);
+				goto fail;
+			}
+		}
+	}
 
 	__program_context(iommu_drvdata->base, ctx_drvdata->num,
 		iommu_drvdata->ncb, __pa(priv->pt.fl_table),
-		priv->pt.redirect, ctx_drvdata->sids, ctx_drvdata->nsid);
+		priv->pt.redirect, ctx_drvdata->sids, ctx_drvdata->nsid,
+		is_secure);
 	__disable_clocks(iommu_drvdata);
 
 	list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
@@ -460,6 +484,7 @@
 	struct msm_iommu_drvdata *iommu_drvdata;
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 	int ret;
+	int is_secure;
 
 	mutex_lock(&msm_iommu_lock);
 	priv = domain->priv;
@@ -475,11 +500,14 @@
 	if (ret)
 		goto fail;
 
+	is_secure = iommu_drvdata->sec_id != -1;
+
 	SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
 		GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_drvdata->num));
 
 	__reset_context(iommu_drvdata->base, ctx_drvdata->num);
-	__release_smg(iommu_drvdata->base, ctx_drvdata->num);
+	if (!is_secure)
+		__release_smg(iommu_drvdata->base, ctx_drvdata->num);
 
 	__disable_clocks(iommu_drvdata);
 
diff --git a/drivers/iommu/msm_iommu_sec.c b/drivers/iommu/msm_iommu_sec.c
index a89c4a8..72ec4a6 100644
--- a/drivers/iommu/msm_iommu_sec.c
+++ b/drivers/iommu/msm_iommu_sec.c
@@ -128,7 +128,7 @@
 	return ret;
 }
 
-static int msm_iommu_sec_program_iommu(int sec_id)
+int msm_iommu_sec_program_iommu(int sec_id)
 {
 	struct msm_scm_sec_cfg {
 		unsigned int id;
diff --git a/drivers/leds/leds-pm8xxx.c b/drivers/leds/leds-pm8xxx.c
index 255920e..a641ce9 100644
--- a/drivers/leds/leds-pm8xxx.c
+++ b/drivers/leds/leds-pm8xxx.c
@@ -46,8 +46,8 @@
 /* wled control registers */
 #define WLED_MOD_CTRL_REG		SSBI_REG_ADDR_WLED_CTRL(1)
 #define WLED_MAX_CURR_CFG_REG(n)	SSBI_REG_ADDR_WLED_CTRL(n + 2)
-#define WLED_BRIGHTNESS_CNTL_REG1(n)	SSBI_REG_ADDR_WLED_CTRL(n + 5)
-#define WLED_BRIGHTNESS_CNTL_REG2(n)	SSBI_REG_ADDR_WLED_CTRL(n + 6)
+#define WLED_BRIGHTNESS_CNTL_REG1(n)	SSBI_REG_ADDR_WLED_CTRL((2 * n) + 5)
+#define WLED_BRIGHTNESS_CNTL_REG2(n)	SSBI_REG_ADDR_WLED_CTRL((2 * n) + 6)
 #define WLED_SYNC_REG			SSBI_REG_ADDR_WLED_CTRL(11)
 #define WLED_OVP_CFG_REG		SSBI_REG_ADDR_WLED_CTRL(13)
 #define WLED_BOOST_CFG_REG		SSBI_REG_ADDR_WLED_CTRL(14)
@@ -640,7 +640,7 @@
 	/* program activation delay and maximum current */
 	for (i = 0; i < num_wled_strings; i++) {
 		rc = pm8xxx_readb(led->dev->parent,
-				WLED_MAX_CURR_CFG_REG(i + 2), &val);
+				WLED_MAX_CURR_CFG_REG(i), &val);
 		if (rc) {
 			dev_err(led->dev->parent, "can't read wled max current"
 				" config register rc=%d\n", rc);
@@ -665,7 +665,7 @@
 		val = (val & ~WLED_MAX_CURR_MASK) | led->max_current;
 
 		rc = pm8xxx_writeb(led->dev->parent,
-				WLED_MAX_CURR_CFG_REG(i + 2), val);
+				WLED_MAX_CURR_CFG_REG(i), val);
 		if (rc) {
 			dev_err(led->dev->parent, "can't write wled max current"
 				" config register rc=%d\n", rc);
diff --git a/drivers/media/dvb/dvb-core/demux.h b/drivers/media/dvb/dvb-core/demux.h
index 7190af8..f802a38 100644
--- a/drivers/media/dvb/dvb-core/demux.h
+++ b/drivers/media/dvb/dvb-core/demux.h
@@ -205,6 +205,8 @@
 			dmx_ts_data_ready_cb callback);
 	int (*notify_data_read)(struct dmx_ts_feed *feed,
 			u32 bytes_num);
+	int (*set_tsp_out_format) (struct dmx_ts_feed *feed,
+				enum dmx_tsp_format_t tsp_format);
 };
 
 /*--------------------------------------------------------------------------*/
@@ -336,6 +338,8 @@
 	void* priv;                  /* Pointer to private data of the API client */
 	struct data_buffer dvr_input; /* DVR input buffer */
 
+	struct dentry *debugfs_demux_dir; /* debugfs dir */
+
 	int (*open) (struct dmx_demux* demux);
 	int (*close) (struct dmx_demux* demux);
 	int (*write) (struct dmx_demux *demux, const char *buf, size_t count);
@@ -367,9 +371,6 @@
 	int (*set_tsp_format) (struct dmx_demux *demux,
 				enum dmx_tsp_format_t tsp_format);
 
-	int (*set_tsp_out_format) (struct dmx_demux *demux,
-				enum dmx_tsp_format_t tsp_format);
-
 	int (*set_playback_mode) (struct dmx_demux *demux,
 				 enum dmx_playback_mode_t mode,
 				 dmx_ts_fullness ts_fullness_callback,
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 625eb78..507c014 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -32,6 +32,8 @@
 #include <linux/wait.h>
 #include <linux/mm.h>
 #include <asm/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
 #include "dmxdev.h"
 
 static int debug;
@@ -463,6 +465,90 @@
 	return NULL;
 }
 
+static int dvr_input_thread_entry(void *arg)
+{
+	struct dmxdev *dmxdev = arg;
+	struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer;
+	int ret;
+	size_t todo;
+	size_t split;
+
+	while (1) {
+		/* wait for input */
+		ret = wait_event_interruptible(src->queue,
+						   (!src->data) ||
+					       (dvb_ringbuffer_avail(src)) ||
+					       (src->error != 0) ||
+					       (dmxdev->dvr_in_exit) ||
+					       kthread_should_stop());
+
+		if ((ret < 0) || kthread_should_stop())
+			break;
+
+		spin_lock(&dmxdev->dvr_in_lock);
+
+		if (!src->data || dmxdev->exit || dmxdev->dvr_in_exit) {
+			spin_unlock(&dmxdev->dvr_in_lock);
+			break;
+		}
+
+		if (src->error) {
+			spin_unlock(&dmxdev->dvr_in_lock);
+			wake_up_all(&src->queue);
+			break;
+		}
+
+		dmxdev->dvr_processing_input = 1;
+
+		ret = dvb_ringbuffer_avail(src);
+		todo = ret;
+
+		split = (src->pread + ret > src->size) ?
+				src->size - src->pread :
+				0;
+
+		/*
+		 * In DVR PULL mode, write might block.
+		 * Lock on DVR buffer is released before calling to
+		 * write, if DVR was released meanwhile, dvr_in_exit is
+		 * prompted. Lock is aquired when updating the read pointer
+		 * again to preserve read/write pointers consistancy
+		 */
+		if (split > 0) {
+			spin_unlock(&dmxdev->dvr_in_lock);
+			dmxdev->demux->write(dmxdev->demux,
+						src->data + src->pread,
+						split);
+
+			if (dmxdev->dvr_in_exit)
+				break;
+
+			spin_lock(&dmxdev->dvr_in_lock);
+
+			todo -= split;
+			DVB_RINGBUFFER_SKIP(src, split);
+		}
+
+		spin_unlock(&dmxdev->dvr_in_lock);
+		dmxdev->demux->write(dmxdev->demux,
+					src->data + src->pread, todo);
+
+		if (dmxdev->dvr_in_exit)
+			break;
+
+		spin_lock(&dmxdev->dvr_in_lock);
+
+		DVB_RINGBUFFER_SKIP(src, todo);
+		dmxdev->dvr_processing_input = 0;
+		spin_unlock(&dmxdev->dvr_in_lock);
+
+		wake_up_all(&src->queue);
+	}
+
+	return 0;
+}
+
+
 static int dvb_dvr_open(struct inode *inode, struct file *file)
 {
 	struct dvb_device *dvbdev = file->private_data;
@@ -540,6 +626,17 @@
 		dmxdev->demux->dvr_input.priv_handle = NULL;
 		dmxdev->demux->dvr_input.ringbuff = &dmxdev->dvr_input_buffer;
 		dvbdev->writers--;
+
+		dmxdev->dvr_input_thread =
+			kthread_run(
+				dvr_input_thread_entry,
+				(void *)dmxdev,
+				"dvr_input");
+
+		if (IS_ERR(dmxdev->dvr_input_thread)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ENOMEM;
+		}
 	}
 
 	dvbdev->users++;
@@ -599,11 +696,11 @@
 			dmxdev->demux->write_cancel(dmxdev->demux);
 
 		/*
-		 * Now flush dvr-in workqueue so that no one
+		 * Now stop dvr-input thread so that no one
 		 * would process data from dvr input buffer any more
 		 * before it gets freed.
 		 */
-		flush_workqueue(dmxdev->dvr_input_workqueue);
+		kthread_stop(dmxdev->dvr_input_thread);
 
 		dvbdev->writers++;
 		dmxdev->demux->disconnect_frontend(dmxdev->demux);
@@ -771,12 +868,7 @@
 		buf += ret;
 
 		mutex_unlock(&dmxdev->mutex);
-
 		wake_up_all(&src->queue);
-
-		if (!work_pending(&dmxdev->dvr_input_work))
-			queue_work(dmxdev->dvr_input_workqueue,
-						&dmxdev->dvr_input_work);
 	}
 
 	return (count - todo) ? (count - todo) : ret;
@@ -825,87 +917,6 @@
 	return res;
 }
 
-static void dvr_input_work_func(struct work_struct *worker)
-{
-	struct dmxdev *dmxdev =
-		container_of(worker, struct dmxdev, dvr_input_work);
-	struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer;
-	int ret;
-	size_t todo;
-	size_t split;
-
-	while (1) {
-		/* wait for input */
-		ret = wait_event_interruptible(src->queue,
-						   (!src->data) ||
-					       (dvb_ringbuffer_avail(src)) ||
-					       (src->error != 0) ||
-					       (dmxdev->dvr_in_exit));
-
-		if (ret < 0)
-			break;
-
-		spin_lock(&dmxdev->dvr_in_lock);
-
-		if (!src->data || dmxdev->exit || dmxdev->dvr_in_exit) {
-			spin_unlock(&dmxdev->dvr_in_lock);
-			break;
-		}
-
-		if (src->error) {
-			spin_unlock(&dmxdev->dvr_in_lock);
-			wake_up_all(&src->queue);
-			break;
-		}
-
-		dmxdev->dvr_processing_input = 1;
-
-		ret = dvb_ringbuffer_avail(src);
-		todo = ret;
-
-		split = (src->pread + ret > src->size) ?
-				src->size - src->pread :
-				0;
-
-		/*
-		 * In DVR PULL mode, write might block.
-		 * Lock on DVR buffer is released before calling to
-		 * write, if DVR was released meanwhile, dvr_in_exit is
-		 * prompted. Lock is aquired when updating the read pointer
-		 * again to preserve read/write pointers consistancy
-		 */
-		if (split > 0) {
-			spin_unlock(&dmxdev->dvr_in_lock);
-			dmxdev->demux->write(dmxdev->demux,
-						src->data + src->pread,
-						split);
-
-			if (dmxdev->dvr_in_exit)
-				break;
-
-			spin_lock(&dmxdev->dvr_in_lock);
-
-			todo -= split;
-			DVB_RINGBUFFER_SKIP(src, split);
-		}
-
-		spin_unlock(&dmxdev->dvr_in_lock);
-		dmxdev->demux->write(dmxdev->demux,
-					src->data + src->pread, todo);
-
-		if (dmxdev->dvr_in_exit)
-			break;
-
-		spin_lock(&dmxdev->dvr_in_lock);
-
-		DVB_RINGBUFFER_SKIP(src, todo);
-		dmxdev->dvr_processing_input = 0;
-		spin_unlock(&dmxdev->dvr_in_lock);
-
-		wake_up_all(&src->queue);
-	}
-}
-
 static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
 						unsigned int f_flags,
 						unsigned long size)
@@ -1203,10 +1214,6 @@
 
 	wake_up_all(&buffer->queue);
 
-	if (!work_pending(&dmxdev->dvr_input_work))
-		queue_work(dmxdev->dvr_input_workqueue,
-					&dmxdev->dvr_input_work);
-
 	return 0;
 }
 
@@ -1329,6 +1336,21 @@
 	return 0;
 }
 
+static int dvb_dmxdev_set_tsp_out_format(struct dmxdev_filter *dmxdevfilter,
+				enum dmx_tsp_format_t dmx_tsp_format)
+{
+	if (dmxdevfilter->state >= DMXDEV_STATE_GO)
+		return -EBUSY;
+
+	if ((dmx_tsp_format > DMX_TSP_FORMAT_192_HEAD) ||
+		(dmx_tsp_format < DMX_TSP_FORMAT_188))
+		return -EINVAL;
+
+	dmxdevfilter->dmx_tsp_format = dmx_tsp_format;
+
+	return 0;
+}
+
 static int dvb_dmxdev_set_pes_buffer_size(struct dmxdev_filter *dmxdevfilter,
 					unsigned long size)
 {
@@ -2253,6 +2275,9 @@
 		return ret;
 	}
 
+	if (tsfeed->set_tsp_out_format)
+		tsfeed->set_tsp_out_format(tsfeed, filter->dmx_tsp_format);
+
 	/* Support indexing for video PES */
 	if ((para->pes_type == DMX_PES_VIDEO0) ||
 	    (para->pes_type == DMX_PES_VIDEO1) ||
@@ -2463,6 +2488,8 @@
 
 	dmxdevfilter->pes_buffer_size = 32768;
 
+	dmxdevfilter->dmx_tsp_format = DMX_TSP_FORMAT_188;
+
 	dvbdev->users++;
 
 	mutex_unlock(&dmxdev->mutex);
@@ -2852,19 +2879,15 @@
 		break;
 
 	case DMX_SET_TS_OUT_FORMAT:
-		if (!dmxdev->demux->set_tsp_out_format) {
-			ret = -EINVAL;
-			break;
+		if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+			mutex_unlock(&dmxdev->mutex);
+			return -ERESTARTSYS;
 		}
 
-		if (dmxdevfilter->state >= DMXDEV_STATE_GO) {
-			ret = -EBUSY;
-			break;
-		}
-
-		ret = dmxdev->demux->set_tsp_out_format(
-				dmxdev->demux,
+		ret = dvb_dmxdev_set_tsp_out_format(dmxdevfilter,
 				*(enum dmx_tsp_format_t *)parg);
+
+		mutex_unlock(&dmxdevfilter->mutex);
 		break;
 
 	case DMX_SET_DECODER_BUFFER_SIZE:
@@ -3164,6 +3187,73 @@
 	.fops = &dvb_dvr_fops
 };
 
+
+/**
+ * debugfs service to print active filters information.
+ */
+static int dvb_dmxdev_dbgfs_print(struct seq_file *s, void *p)
+{
+	int i;
+	struct dmxdev *dmxdev = s->private;
+	struct dmxdev_filter *filter;
+	int active_count = 0;
+	struct dmx_buffer_status buffer_status;
+	const char *pes_feeds[] = {"DEC", "PES", "DVR", "REC"};
+
+	if (!dmxdev)
+		return 0;
+
+	for (i = 0; i < dmxdev->filternum; i++) {
+		filter = &dmxdev->filter[i];
+		if (filter->state >= DMXDEV_STATE_GO) {
+			active_count++;
+
+			seq_printf(s, "filter_%02d - ", i);
+
+		    if (filter->type == DMXDEV_TYPE_SEC) {
+				seq_printf(s, "type: SEC, ");
+				seq_printf(s, "PID %04d ",
+						filter->params.sec.pid);
+		    } else {
+				seq_printf(s, "type: %s, ",
+					pes_feeds[filter->params.pes.output]);
+				seq_printf(s, "PID: %04d ",
+						filter->params.pes.pid);
+		    }
+
+			if (0 == dvb_dmxdev_get_buffer_status(
+						filter, &buffer_status)) {
+				seq_printf(s, "buffer size: %08d, ",
+					buffer_status.size);
+				seq_printf(s, "buffer fullness: %08d\n",
+					buffer_status.fullness);
+				seq_printf(s, "buffer error: %08d\n",
+					buffer_status.error);
+			}
+		}
+	}
+
+	if (!active_count)
+		seq_printf(s, "No active filters\n");
+
+	seq_printf(s, "\n");
+
+	return 0;
+}
+
+static int dvb_dmxdev_dbgfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dvb_dmxdev_dbgfs_print, inode->i_private);
+}
+
+static const struct file_operations dbgfs_filters_fops = {
+	.open = dvb_dmxdev_dbgfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
 int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
 {
 	int i;
@@ -3175,14 +3265,6 @@
 	if (!dmxdev->filter)
 		return -ENOMEM;
 
-	dmxdev->dvr_input_workqueue =
-		create_singlethread_workqueue("dvr_workqueue");
-
-	if (dmxdev->dvr_input_workqueue == NULL) {
-		vfree(dmxdev->filter);
-		return -ENOMEM;
-	}
-
 	dmxdev->playback_mode = DMX_PB_MODE_PUSH;
 
 	mutex_init(&dmxdev->mutex);
@@ -3203,8 +3285,10 @@
 	dvb_ringbuffer_init(&dmxdev->dvr_buffer, NULL, 8192);
 	dvb_ringbuffer_init(&dmxdev->dvr_input_buffer, NULL, 8192);
 
-	INIT_WORK(&dmxdev->dvr_input_work,
-			  dvr_input_work_func);
+	if (dmxdev->demux->debugfs_demux_dir)
+		debugfs_create_file("filters", S_IRUGO,
+			dmxdev->demux->debugfs_demux_dir, dmxdev,
+			&dbgfs_filters_fops);
 
 	return 0;
 }
@@ -3223,9 +3307,6 @@
 				dmxdev->dvr_dvbdev->users==1);
 	}
 
-	flush_workqueue(dmxdev->dvr_input_workqueue);
-	destroy_workqueue(dmxdev->dvr_input_workqueue);
-
 	dvb_unregister_device(dmxdev->dvbdev);
 	dvb_unregister_device(dmxdev->dvr_dvbdev);
 
diff --git a/drivers/media/dvb/dvb-core/dmxdev.h b/drivers/media/dvb/dvb-core/dmxdev.h
index e30c2c3..d1c1cc3 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.h
+++ b/drivers/media/dvb/dvb-core/dmxdev.h
@@ -34,7 +34,7 @@
 #include <linux/string.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
-#include <linux/workqueue.h>
+#include <linux/kthread.h>
 #include <linux/dvb/dmx.h>
 
 #include "dvbdev.h"
@@ -122,6 +122,8 @@
 	/* relevent for decoder PES */
 	unsigned long pes_buffer_size;
 
+	/* for recording output */
+	enum dmx_tsp_format_t dmx_tsp_format;
 	u32 rec_chunk_size;
 
 	/* only for sections */
@@ -131,8 +133,6 @@
 };
 
 struct dmxdev {
-	struct work_struct dvr_input_work;
-
 	struct dvb_device *dvbdev;
 	struct dvb_device *dvr_dvbdev;
 
@@ -165,7 +165,7 @@
 
 	struct dvb_ringbuffer dvr_input_buffer;
 	enum dmx_buffer_mode dvr_input_buffer_mode;
-	struct workqueue_struct *dvr_input_workqueue;
+	struct task_struct *dvr_input_thread;
 
 #define DVR_BUFFER_SIZE (10*188*1024)
 
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index de7b28d..6d66d45 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -42,6 +42,8 @@
 */
 // #define DVB_DEMUX_SECTION_LOSS_LOG
 
+#define TIMESTAMP_LEN	4
+
 static int dvb_demux_tscheck;
 module_param(dvb_demux_tscheck, int, 0644);
 MODULE_PARM_DESC(dvb_demux_tscheck,
@@ -417,17 +419,16 @@
 
 static inline void dvb_dmx_swfilter_output_packet(
 	struct dvb_demux_feed *feed,
-	const u8 *buf)
+	const u8 *buf,
+	const u8 timestamp[TIMESTAMP_LEN])
 {
-	u8 time_stamp[4] = {0};
-	struct dvb_demux *demux = feed->demux;
-
 	/*
 	 * if we output 192 packet with timestamp at head of packet,
 	 * output the timestamp now before the 188 TS packet
 	 */
-	if (demux->tsp_out_format == DMX_TSP_FORMAT_192_HEAD)
-		feed->cb.ts(time_stamp, 4, NULL, 0, &feed->feed.ts, DMX_OK);
+	if (feed->tsp_out_format == DMX_TSP_FORMAT_192_HEAD)
+		feed->cb.ts(timestamp, TIMESTAMP_LEN, NULL,
+			0, &feed->feed.ts, DMX_OK);
 
 	feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts, DMX_OK);
 
@@ -435,8 +436,9 @@
 	 * if we output 192 packet with timestamp at tail of packet,
 	 * output the timestamp now after the 188 TS packet
 	 */
-	if (demux->tsp_out_format == DMX_TSP_FORMAT_192_TAIL)
-		feed->cb.ts(time_stamp, 4, NULL, 0, &feed->feed.ts, DMX_OK);
+	if (feed->tsp_out_format == DMX_TSP_FORMAT_192_TAIL)
+		feed->cb.ts(timestamp, TIMESTAMP_LEN, NULL,
+			0, &feed->feed.ts, DMX_OK);
 }
 
 static inline void dvb_dmx_configure_decoder_fullness(
@@ -571,7 +573,7 @@
 }
 
 static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed,
-						const u8 *buf)
+			const u8 *buf, const u8 timestamp[TIMESTAMP_LEN])
 {
 	switch (feed->type) {
 	case DMX_TYPE_TS:
@@ -581,7 +583,8 @@
 			if (feed->ts_type & TS_PAYLOAD_ONLY)
 				dvb_dmx_swfilter_payload(feed, buf);
 			else
-				dvb_dmx_swfilter_output_packet(feed, buf);
+				dvb_dmx_swfilter_output_packet(feed,
+						buf, timestamp);
 		}
 		if (feed->ts_type & TS_DECODER)
 			if (feed->demux->write_to_decoder)
@@ -605,7 +608,8 @@
 	((f)->feed.ts.is_filtering) &&					\
 	(((f)->ts_type & (TS_PACKET | TS_DEMUX)) == TS_PACKET))
 
-static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
+static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf,
+				const u8 timestamp[TIMESTAMP_LEN])
 {
 	struct dvb_demux_feed *feed;
 	u16 pid = ts_pid(buf);
@@ -678,10 +682,10 @@
 			continue;
 
 		if (feed->pid == pid)
-			dvb_dmx_swfilter_packet_type(feed, buf);
+			dvb_dmx_swfilter_packet_type(feed, buf, timestamp);
 		else if ((feed->pid == 0x2000) &&
 			     (feed->feed.ts.is_filtering))
-			dvb_dmx_swfilter_output_packet(feed, buf);
+			dvb_dmx_swfilter_output_packet(feed, buf, timestamp);
 	}
 }
 
@@ -735,6 +739,7 @@
 			      size_t count)
 {
 	struct timespec pre_time;
+	u8 timestamp[TIMESTAMP_LEN] = {0};
 
 	if (dvb_demux_performancecheck)
 		pre_time = current_kernel_time();
@@ -746,7 +751,7 @@
 
 	while (count--) {
 		if (buf[0] == 0x47)
-			dvb_dmx_swfilter_packet(demux, buf);
+			dvb_dmx_swfilter_packet(demux, buf, timestamp);
 		buf += 188;
 	}
 
@@ -794,6 +799,7 @@
 	int p = 0, i, j;
 	const u8 *q;
 	struct timespec pre_time;
+	u8 timestamp[TIMESTAMP_LEN];
 
 	if (dvb_demux_performancecheck)
 		pre_time = current_kernel_time();
@@ -812,12 +818,23 @@
 			goto bailout;
 		}
 		memcpy(&demux->tsbuf[i], buf, j);
+
+		if (pktsize == 192) {
+			if (leadingbytes)
+				memcpy(timestamp, &buf[p], TIMESTAMP_LEN);
+			else
+				memcpy(timestamp, &buf[188], TIMESTAMP_LEN);
+		} else {
+			memset(timestamp, 0, TIMESTAMP_LEN);
+		}
+
 		if (pktsize == 192 &&
 			leadingbytes &&
 			demux->tsbuf[leadingbytes] == 0x47)  /* double check */
-			dvb_dmx_swfilter_packet(demux, demux->tsbuf+4);
+			dvb_dmx_swfilter_packet(demux,
+				demux->tsbuf + TIMESTAMP_LEN, timestamp);
 		else if (demux->tsbuf[0] == 0x47) /* double check */
-			dvb_dmx_swfilter_packet(demux, demux->tsbuf);
+			dvb_dmx_swfilter_packet(demux, demux->tsbuf, timestamp);
 		demux->tsbufp = 0;
 		p += j;
 	}
@@ -841,10 +858,18 @@
 			q = demux->tsbuf;
 		}
 
-		if (pktsize == 192 && leadingbytes)
-			q = &buf[p+leadingbytes];
+		if (pktsize == 192) {
+			if (leadingbytes) {
+				q = &buf[p+leadingbytes];
+				memcpy(timestamp, &buf[p], TIMESTAMP_LEN);
+			} else {
+				memcpy(timestamp, &buf[188], TIMESTAMP_LEN);
+			}
+		} else {
+			memset(timestamp, 0, TIMESTAMP_LEN);
+		}
 
-		dvb_dmx_swfilter_packet(demux, q);
+		dvb_dmx_swfilter_packet(demux, q, timestamp);
 		p += pktsize;
 	}
 
@@ -891,7 +916,7 @@
 		break;
 
 	case DMX_TSP_FORMAT_192_HEAD:
-		_dvb_dmx_swfilter(demux, buf, count, 192, 4);
+		_dvb_dmx_swfilter(demux, buf, count, 192, TIMESTAMP_LEN);
 		break;
 
 	case DMX_TSP_FORMAT_204:
@@ -1154,6 +1179,25 @@
 	return 0;
 }
 
+static int dmx_ts_set_tsp_out_format(
+	struct dmx_ts_feed *ts_feed,
+	enum dmx_tsp_format_t tsp_format)
+{
+	struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+	struct dvb_demux *dvbdmx = feed->demux;
+
+	mutex_lock(&dvbdmx->mutex);
+
+	if (feed->state == DMX_STATE_GO) {
+		mutex_unlock(&dvbdmx->mutex);
+		return -EINVAL;
+	}
+
+	feed->tsp_out_format = tsp_format;
+	mutex_unlock(&dvbdmx->mutex);
+	return 0;
+}
+
 static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
 				   struct dmx_ts_feed **ts_feed,
 				   dmx_ts_cb callback)
@@ -1175,6 +1219,7 @@
 	feed->pid = 0xffff;
 	feed->peslen = 0;
 	feed->buffer = NULL;
+	feed->tsp_out_format = DMX_TSP_FORMAT_188;
 	memset(&feed->indexing_params, 0,
 			sizeof(struct dmx_indexing_video_params));
 
@@ -1193,6 +1238,7 @@
 	(*ts_feed)->stop_filtering = dmx_ts_feed_stop_filtering;
 	(*ts_feed)->set = dmx_ts_feed_set;
 	(*ts_feed)->set_indexing_params = dmx_ts_set_indexing_params;
+	(*ts_feed)->set_tsp_out_format = dmx_ts_set_tsp_out_format;
 	(*ts_feed)->get_decoder_buff_status = dmx_ts_feed_decoder_buff_status;
 	(*ts_feed)->data_ready_cb = dmx_ts_feed_data_ready_cb;
 	(*ts_feed)->notify_data_read = NULL;
@@ -1689,23 +1735,6 @@
 	return 0;
 }
 
-static int dvbdmx_set_tsp_out_format(
-	struct dmx_demux *demux,
-	enum dmx_tsp_format_t tsp_format)
-{
-	struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
-
-	if ((tsp_format > DMX_TSP_FORMAT_192_HEAD) ||
-		(tsp_format < DMX_TSP_FORMAT_188))
-		return -EINVAL;
-
-	mutex_lock(&dvbdemux->mutex);
-
-	dvbdemux->tsp_out_format = tsp_format;
-	mutex_unlock(&dvbdemux->mutex);
-	return 0;
-}
-
 int dvb_dmx_init(struct dvb_demux *dvbdemux)
 {
 	int i;
@@ -1732,19 +1761,20 @@
 			"demux%d",
 			dvb_demux_index++);
 
-	dvbdemux->debugfs_demux_dir = debugfs_create_dir(dvbdemux->alias, NULL);
+	dvbdemux->dmx.debugfs_demux_dir =
+		debugfs_create_dir(dvbdemux->alias, NULL);
 
-	if (dvbdemux->debugfs_demux_dir != NULL) {
+	if (dvbdemux->dmx.debugfs_demux_dir != NULL) {
 		debugfs_create_u32(
 			"total_processing_time",
 			S_IRUGO|S_IWUGO,
-			dvbdemux->debugfs_demux_dir,
+			dvbdemux->dmx.debugfs_demux_dir,
 			&dvbdemux->total_process_time);
 
 		debugfs_create_u32(
 			"total_crc_time",
 			S_IRUGO|S_IWUGO,
-			dvbdemux->debugfs_demux_dir,
+			dvbdemux->dmx.debugfs_demux_dir,
 			&dvbdemux->total_crc_time);
 	}
 
@@ -1775,7 +1805,6 @@
 	dvbdemux->tsbufp = 0;
 
 	dvbdemux->tsp_format = DMX_TSP_FORMAT_188;
-	dvbdemux->tsp_out_format = DMX_TSP_FORMAT_188;
 
 	if (!dvbdemux->check_crc32)
 		dvbdemux->check_crc32 = dvb_dmx_crc32;
@@ -1805,7 +1834,6 @@
 	dmx->get_pes_pids = dvbdmx_get_pes_pids;
 
 	dmx->set_tsp_format = dvbdmx_set_tsp_format;
-	dmx->set_tsp_out_format = dvbdmx_set_tsp_out_format;
 
 	mutex_init(&dvbdemux->mutex);
 	spin_lock_init(&dvbdemux->lock);
@@ -1817,9 +1845,10 @@
 
 void dvb_dmx_release(struct dvb_demux *dvbdemux)
 {
-	if (dvbdemux->debugfs_demux_dir != NULL)
-		debugfs_remove_recursive(dvbdemux->debugfs_demux_dir);
+	if (dvbdemux->dmx.debugfs_demux_dir != NULL)
+		debugfs_remove_recursive(dvbdemux->dmx.debugfs_demux_dir);
 
+	dvb_demux_index--;
 	vfree(dvbdemux->cnt_storage);
 	vfree(dvbdemux->filter);
 	vfree(dvbdemux->feed);
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
index 5a32363..4e6dfaf 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
@@ -90,6 +90,7 @@
 	u16 pid;
 	u8 *buffer;
 	int buffer_size;
+	enum dmx_tsp_format_t tsp_out_format;
 
 	struct timespec timeout;
 	struct dvb_demux_filter *filter;
@@ -155,7 +156,6 @@
 	uint32_t speed_pkts_cnt; /* for TS speed check */
 
 	enum dmx_tsp_format_t tsp_format;
-	enum dmx_tsp_format_t tsp_out_format;
 
 	enum dmx_playback_mode_t playback_mode;
 	int sw_filter_abort;
@@ -174,7 +174,6 @@
 
 	u32 total_process_time;
 	u32 total_crc_time;
-	struct dentry *debugfs_demux_dir;
 };
 
 int dvb_dmx_init(struct dvb_demux *dvbdemux);
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
index 18c3767..51d66cd 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
@@ -477,29 +477,29 @@
 	mpq_demux->hw_notification_size = 0;
 	mpq_demux->decoder_tsp_drop_count = 0;
 
-	if (mpq_demux->demux.debugfs_demux_dir != NULL) {
+	if (mpq_demux->demux.dmx.debugfs_demux_dir != NULL) {
 		debugfs_create_u32(
 			"hw_notification_rate",
 			S_IRUGO|S_IWUGO,
-			mpq_demux->demux.debugfs_demux_dir,
+			mpq_demux->demux.dmx.debugfs_demux_dir,
 			&mpq_demux->hw_notification_rate);
 
 		debugfs_create_u32(
 			"hw_notification_count",
 			S_IRUGO|S_IWUGO,
-			mpq_demux->demux.debugfs_demux_dir,
+			mpq_demux->demux.dmx.debugfs_demux_dir,
 			&mpq_demux->hw_notification_count);
 
 		debugfs_create_u32(
 			"hw_notification_size",
 			S_IRUGO|S_IWUGO,
-			mpq_demux->demux.debugfs_demux_dir,
+			mpq_demux->demux.dmx.debugfs_demux_dir,
 			&mpq_demux->hw_notification_size);
 
 		debugfs_create_u32(
 			"decoder_tsp_drop_count",
 			S_IRUGO|S_IWUGO,
-			mpq_demux->demux.debugfs_demux_dir,
+			mpq_demux->demux.dmx.debugfs_demux_dir,
 			&mpq_demux->decoder_tsp_drop_count);
 	}
 }
diff --git a/drivers/media/radio/radio-iris.c b/drivers/media/radio/radio-iris.c
index fde7cb7..363b541 100644
--- a/drivers/media/radio/radio-iris.c
+++ b/drivers/media/radio/radio-iris.c
@@ -2535,7 +2535,6 @@
 		case SCAN_FOR_WEAK:
 			radio->srch_st_list.srch_list_dir = dir;
 			radio->srch_st_list.srch_list_mode = srch;
-			radio->srch_st_list.srch_list_max = 0;
 			retval = hci_fm_search_station_list(
 				&radio->srch_st_list, radio->fm_hdev);
 			break;
@@ -3170,6 +3169,7 @@
 		radio->srch_rds.srch_pi = ctrl->value;
 		break;
 	case V4L2_CID_PRIVATE_IRIS_SRCH_CNT:
+		radio->srch_st_list.srch_list_max = ctrl->value;
 		break;
 	case V4L2_CID_PRIVATE_IRIS_SPACING:
 		if (radio->mode == FM_RECV) {
diff --git a/drivers/media/video/msm/io/msm_io_8960.c b/drivers/media/video/msm/io/msm_io_8960.c
index 808cc32..1b56578 100644
--- a/drivers/media/video/msm/io/msm_io_8960.c
+++ b/drivers/media/video/msm/io/msm_io_8960.c
@@ -103,6 +103,14 @@
 		} else
 			CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
 		break;
+	case S_ADV_VIDEO:
+		if (bus_perf_client) {
+			rc = msm_bus_scale_client_update_request(
+				bus_perf_client, 7);
+			CDBG("%s: S_ADV_VIDEO rc = %d\n", __func__, rc);
+		} else
+			CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
+		break;
 	case S_DEFAULT:
 		break;
 	default:
diff --git a/drivers/media/video/msm/server/msm_cam_server.c b/drivers/media/video/msm/server/msm_cam_server.c
index 49543a4..b2a7f71 100644
--- a/drivers/media/video/msm/server/msm_cam_server.c
+++ b/drivers/media/video/msm/server/msm_cam_server.c
@@ -1645,6 +1645,7 @@
 
 static const struct v4l2_ioctl_ops msm_ioctl_ops_server = {
 	.vidioc_subscribe_event = msm_server_v4l2_subscribe_event,
+	.vidioc_unsubscribe_event = msm_server_v4l2_unsubscribe_event,
 	.vidioc_default = msm_ioctl_server,
 };
 
diff --git a/drivers/media/video/msm/server/msm_cam_server.h b/drivers/media/video/msm/server/msm_cam_server.h
index 5e39d25..387c254 100644
--- a/drivers/media/video/msm/server/msm_cam_server.h
+++ b/drivers/media/video/msm/server/msm_cam_server.h
@@ -17,7 +17,7 @@
 #include <linux/proc_fs.h>
 #include <linux/ioctl.h>
 #include <mach/camera.h>
-#include "msm.h"
+#include "../msm.h"
 
 uint32_t msm_cam_server_get_mctl_handle(void);
 struct iommu_domain *msm_cam_server_get_domain(void);
diff --git a/drivers/media/video/msm/vfe/msm_vfe32.c b/drivers/media/video/msm/vfe/msm_vfe32.c
index db0db36..a382d53 100644
--- a/drivers/media/video/msm/vfe/msm_vfe32.c
+++ b/drivers/media/video/msm/vfe/msm_vfe32.c
@@ -1464,8 +1464,10 @@
 	CDBG("VFE opertaion mode = 0x%x, output mode = 0x%x\n",
 		vfe32_ctrl->share_ctrl->operation_mode,
 		vfe32_ctrl->share_ctrl->outpath.output_mode);
-		msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase +
-			VFE_CAMIF_COMMAND);
+	msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase +
+		VFE_CAMIF_COMMAND);
+	msm_camera_io_w_mb(VFE_AXI_CFG_MASK,
+		vfe32_ctrl->share_ctrl->vfebase + VFE_AXI_CFG);
 }
 
 static int vfe32_start_recording(
@@ -5789,7 +5791,7 @@
 void axi_start(struct msm_cam_media_controller *pmctl,
 	struct axi_ctrl_t *axi_ctrl, struct msm_camera_vfe_params_t vfe_params)
 {
-	int rc = 0;
+	int rc = 0, bus_vector_idx = 0;
 	uint32_t reg_update = 0;
 	uint32_t vfe_mode =
 		(axi_ctrl->share_ctrl->current_mode &
@@ -5811,9 +5813,22 @@
 			pmctl->sdata->pdata->cam_bus_scale_table, S_CAPTURE);
 		break;
 	case AXI_CMD_RECORD:
+		if (cpu_is_msm8930() || cpu_is_msm8930aa() ||
+			cpu_is_msm8930ab()) {
+			if (axi_ctrl->share_ctrl->current_mode &
+				VFE_OUTPUTS_PREVIEW_AND_VIDEO
+			|| axi_ctrl->share_ctrl->current_mode &
+				VFE_OUTPUTS_VIDEO_AND_PREVIEW)
+				bus_vector_idx = S_VIDEO;
+			else
+				bus_vector_idx = S_ADV_VIDEO;
+		} else {
+			bus_vector_idx = S_VIDEO;
+		}
 		if (!axi_ctrl->share_ctrl->dual_enabled)
 			msm_camio_bus_scale_cfg(
-			pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
+			pmctl->sdata->pdata->cam_bus_scale_table,
+			bus_vector_idx);
 		return;
 	case AXI_CMD_ZSL:
 		if (!axi_ctrl->share_ctrl->dual_enabled)
@@ -6046,6 +6061,8 @@
 	uint32_t vfe_mode =
 	axi_ctrl->share_ctrl->current_mode & ~(VFE_OUTPUTS_RDI0|
 		VFE_OUTPUTS_RDI1);
+	int bus_vector_idx = 0;
+
 	switch (vfe_params.cmd_type) {
 	case AXI_CMD_PREVIEW:
 	case AXI_CMD_CAPTURE:
@@ -6059,9 +6076,17 @@
 			pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
 		return;
 	case AXI_CMD_LIVESHOT:
-		if (!axi_ctrl->share_ctrl->dual_enabled)
+		if (!axi_ctrl->share_ctrl->dual_enabled) {
+			bus_vector_idx = S_VIDEO;
+
+			if (cpu_is_msm8930() || cpu_is_msm8930aa() ||
+				cpu_is_msm8930ab())
+				bus_vector_idx = S_ADV_VIDEO;
+
 			msm_camio_bus_scale_cfg(
-			pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
+			pmctl->sdata->pdata->cam_bus_scale_table,
+			bus_vector_idx);
+		}
 		return;
 	default:
 		return;
diff --git a/drivers/media/video/msm/vfe/msm_vfe32.h b/drivers/media/video/msm/vfe/msm_vfe32.h
index f985221..169c34e 100644
--- a/drivers/media/video/msm/vfe/msm_vfe32.h
+++ b/drivers/media/video/msm/vfe/msm_vfe32.h
@@ -913,6 +913,7 @@
 #define VFE_DMI_ADDR                    0x0000059C
 #define VFE_DMI_DATA_HI                 0x000005A0
 #define VFE_DMI_DATA_LO                 0x000005A4
+#define VFE_AXI_CFG                     0x00000600
 #define VFE_BUS_IO_FORMAT_CFG           0x000006F8
 #define VFE_PIXEL_IF_CFG                0x000006FC
 #define VFE_RDI0_CFG                    0x00000734
@@ -923,6 +924,8 @@
 #define VFE33_DMI_DATA_HI               0x000005A0
 #define VFE33_DMI_DATA_LO               0x000005A4
 
+#define VFE_AXI_CFG_MASK                0xFFFFFFFF
+
 #define VFE32_OUTPUT_MODE_PT			BIT(0)
 #define VFE32_OUTPUT_MODE_S			BIT(1)
 #define VFE32_OUTPUT_MODE_V			BIT(2)
diff --git a/drivers/media/video/msm_vidc/msm_smem.c b/drivers/media/video/msm_vidc/msm_smem.c
index 3dd2193..83f33a1 100644
--- a/drivers/media/video/msm_vidc/msm_smem.c
+++ b/drivers/media/video/msm_vidc/msm_smem.c
@@ -24,7 +24,7 @@
 static int get_device_address(struct ion_client *clnt,
 		struct ion_handle *hndl, int domain_num, int partition_num,
 		unsigned long align, unsigned long *iova,
-		unsigned long *buffer_size)
+		unsigned long *buffer_size, int flags)
 {
 	int rc;
 	if (!iova || !buffer_size || !hndl || !clnt) {
@@ -36,25 +36,39 @@
 		align = 4096;
 	dprintk(VIDC_DBG, "domain: %d, partition: %d\n",
 		domain_num, partition_num);
+	if (flags & SMEM_SECURE) {
+		if (flags & SMEM_INPUT)
+			rc = msm_ion_secure_buffer(clnt, hndl, 0x1, 0);
+		else
+			rc = msm_ion_secure_buffer(clnt, hndl, 0x2, 0);
+		if (rc) {
+			dprintk(VIDC_ERR, "Failed to secure memory\n");
+			goto mem_secure_failed;
+		}
+	}
 	rc = ion_map_iommu(clnt, hndl, domain_num, partition_num, align,
 			0, iova, buffer_size, 0, 0);
 	if (rc)
 		dprintk(VIDC_ERR,
 		"ion_map_iommu failed(%d).domain: %d,partition: %d\n",
 		rc, domain_num, partition_num);
-
+mem_secure_failed:
 	return rc;
 }
 
 static void put_device_address(struct ion_client *clnt,
-		struct ion_handle *hndl, int domain_num, int partition_num)
+	struct ion_handle *hndl, int domain_num, int partition_num, int flags)
 {
 	ion_unmap_iommu(clnt, hndl, domain_num, partition_num);
+	if (flags & SMEM_SECURE) {
+		if (msm_ion_unsecure_buffer(clnt, hndl))
+			dprintk(VIDC_ERR, "Failed to unsecure memory\n");
+	}
 }
 
 static int ion_user_to_kernel(struct smem_client *client,
 			int fd, u32 offset, int domain, int partition,
-			struct msm_smem *mem)
+			struct msm_smem *mem, int flags)
 {
 	struct ion_handle *hndl;
 	unsigned long iova = 0;
@@ -70,8 +84,9 @@
 	mem->kvaddr = NULL;
 	mem->domain = domain;
 	mem->partition_num = partition;
+	mem->flags = flags;
 	rc = get_device_address(client->clnt, hndl, mem->domain,
-		mem->partition_num, 4096, &iova, &buffer_size);
+		mem->partition_num, 4096, &iova, &buffer_size, flags);
 	if (rc) {
 		dprintk(VIDC_ERR, "Failed to get device address: %d\n", rc);
 		goto fail_device_address;
@@ -101,11 +116,16 @@
 	unsigned long ionflags = 0;
 	unsigned long heap_mask = 0;
 	int rc = 0;
-	if (flags == SMEM_CACHED)
+	if (flags & SMEM_CACHED)
 		ionflags = ION_SET_CACHED(ionflags);
 	else
 		ionflags = ION_SET_UNCACHED(ionflags);
 
+	if (flags & SMEM_SECURE) {
+		ionflags |= ION_SECURE;
+		size = (size + 0xfffff) & (~0xfffff);
+	}
+
 	heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID);
 	if (align < 4096)
 		align = 4096;
@@ -124,6 +144,7 @@
 	mem->smem_priv = hndl;
 	mem->domain = domain;
 	mem->partition_num = partition;
+	mem->flags = flags;
 	if (map_kernel) {
 		mem->kvaddr = ion_map_kernel(client->clnt, hndl);
 		if (!mem->kvaddr) {
@@ -136,7 +157,7 @@
 		mem->kvaddr = NULL;
 
 	rc = get_device_address(client->clnt, hndl, mem->domain,
-		mem->partition_num, align, &iova, &buffer_size);
+		mem->partition_num, align, &iova, &buffer_size, flags);
 	if (rc) {
 		dprintk(VIDC_ERR, "Failed to get device address: %d\n",
 			rc);
@@ -160,7 +181,8 @@
 {
 	if (mem->device_addr)
 		put_device_address(client->clnt,
-			mem->smem_priv, mem->domain, mem->partition_num);
+			mem->smem_priv, mem->domain,
+			mem->partition_num, mem->flags);
 	if (mem->kvaddr)
 		ion_unmap_kernel(client->clnt, mem->smem_priv);
 	if (mem->smem_priv)
@@ -182,7 +204,7 @@
 }
 
 struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset,
-	int domain, int partition)
+	int domain, int partition, int flags)
 {
 	struct smem_client *client = clt;
 	int rc = 0;
@@ -199,7 +221,7 @@
 	switch (client->mem_type) {
 	case SMEM_ION:
 		rc = ion_user_to_kernel(clt, fd, offset,
-			domain, partition, mem);
+			domain, partition, mem, flags);
 		break;
 	default:
 		dprintk(VIDC_ERR, "Mem type not supported\n");
diff --git a/drivers/media/video/msm_vidc/msm_smem.h b/drivers/media/video/msm_vidc/msm_smem.h
index c109abd..8241fdd 100644
--- a/drivers/media/video/msm_vidc/msm_smem.h
+++ b/drivers/media/video/msm_vidc/msm_smem.h
@@ -20,9 +20,10 @@
 	SMEM_ION,
 };
 
-enum smem_cache_prop {
-	SMEM_CACHED,
-	SMEM_UNCACHED,
+enum smem_prop {
+	SMEM_CACHED = 0x1,
+	SMEM_SECURE = 0x2,
+	SMEM_INPUT = 0x4,
 };
 
 struct msm_smem {
@@ -32,6 +33,7 @@
 	unsigned long device_addr;
 	int domain;
 	int partition_num;
+	int flags;
 	void *smem_priv;
 };
 
@@ -41,6 +43,6 @@
 void msm_smem_free(void *clt, struct msm_smem *mem);
 void msm_smem_delete_client(void *clt);
 struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset, int
-		domain, int partition);
+		domain, int partition, int flags);
 int msm_smem_clean_invalidate(void *clt, struct msm_smem *mem);
 #endif
diff --git a/drivers/media/video/msm_vidc/msm_v4l2_vidc.c b/drivers/media/video/msm_vidc/msm_v4l2_vidc.c
index 912fad4..4f2373e 100644
--- a/drivers/media/video/msm_vidc/msm_v4l2_vidc.c
+++ b/drivers/media/video/msm_vidc/msm_v4l2_vidc.c
@@ -756,6 +756,8 @@
 	struct msm_v4l2_vid_inst *v4l2_inst;
 	int plane = 0;
 	int i, rc = 0;
+	int smem_flags = 0;
+	int domain;
 	vidc_inst = get_vidc_inst(file, fh);
 	v4l2_inst = get_v4l2_inst(file, fh);
 	if (!v4l2_inst->mem_client) {
@@ -776,6 +778,7 @@
 		goto exit;
 	}
 	for (i = 0; i < b->length; ++i) {
+		smem_flags = 0;
 		if (EXTRADATA_IDX(b->length) &&
 			(i == EXTRADATA_IDX(b->length)) &&
 			!b->m.planes[i].length) {
@@ -792,8 +795,22 @@
 			kfree(binfo);
 			goto exit;
 		}
+		if ((vidc_inst->mode == VIDC_SECURE)
+				&& (!EXTRADATA_IDX(b->length)
+					|| (i != EXTRADATA_IDX(b->length)))) {
+			smem_flags |= SMEM_SECURE;
+			domain =
+			vidc_inst->core->resources.io_map[CP_MAP].domain;
+		} else
+			domain =
+			vidc_inst->core->resources.io_map[NS_MAP].domain;
+
+		if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+			smem_flags |= SMEM_INPUT;
+
 		temp = get_same_fd_buffer(&v4l2_inst->registered_bufs,
 				b->m.planes[i].reserved[0], &plane);
+
 		if (temp) {
 			binfo->type = b->type;
 			binfo->fd[i] = b->m.planes[i].reserved[0];
@@ -807,8 +824,7 @@
 			handle = msm_smem_user_to_kernel(v4l2_inst->mem_client,
 			b->m.planes[i].reserved[0],
 			b->m.planes[i].reserved[1],
-			vidc_inst->core->resources.io_map[NS_MAP].domain,
-			0);
+			domain,	0, smem_flags);
 			if (!handle) {
 				dprintk(VIDC_ERR,
 					"Failed to get device buffer address\n");
@@ -1119,9 +1135,11 @@
 					- SHARED_QSIZE;
 			partition[1].size = SHARED_QSIZE;
 			layout.npartitions = 2;
+			layout.is_secure = 0;
 		} else {
 			partition[0].size = io_map[i].addr_range[1];
 			layout.npartitions = 1;
+			layout.is_secure = 1;
 		}
 		layout.partitions = &partition[0];
 		layout.client_name = io_map[i].name;
diff --git a/drivers/media/video/msm_vidc/msm_vdec.c b/drivers/media/video/msm_vidc/msm_vdec.c
index 22063d4..a95a296 100644
--- a/drivers/media/video/msm_vidc/msm_vdec.c
+++ b/drivers/media/video/msm_vidc/msm_vdec.c
@@ -47,6 +47,27 @@
 	"Decode Order",
 	NULL
 };
+static const char *const mpeg_video_vidc_extradata[] = {
+	"Extradata none",
+	"Extradata MB Quantization",
+	"Extradata Interlace Video",
+	"Extradata VC1 Framedisp",
+	"Extradata VC1 Seqdisp",
+	"Extradata timestamp",
+	"Extradata S3D Frame Packing",
+	"Extradata Frame Rate",
+	"Extradata Panscan Window",
+	"Extradata Recovery point SEI",
+	"Extradata Closed Caption UD",
+	"Extradata AFD UD",
+	"Extradata Multislice info",
+	"Extradata number of concealed MB",
+	"Extradata metadata filler",
+	"Extradata input crop",
+	"Extradata digital zoom",
+	"Extradata aspect ratio",
+};
+
 static const struct msm_vidc_ctrl msm_vdec_ctrls[] = {
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_STREAM_FORMAT,
@@ -156,10 +177,48 @@
 		.minimum = V4L2_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE_DISABLE,
 		.maximum = V4L2_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE_ENABLE,
 		.default_value = V4L2_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE_DISABLE,
-		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_SECURE,
+		.name = "Secure mode",
+		.type = V4L2_CTRL_TYPE_BUTTON,
+		.minimum = 0,
+		.maximum = 0,
+		.default_value = 0,
+		.step = 0,
 		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA,
+		.name = "Extradata Type",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
+		.maximum = V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO,
+		.default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
+		.menu_skip_mask = ~(
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_AFD_UD) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB) |
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER) |
+			(1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_INPUT_CROP) |
+			(1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_DIGITAL_ZOOM) |
+			(1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO)
+			),
+		.qmenu = mpeg_video_vidc_extradata,
+		.step = 0,
+	},
 };
 
 #define NUM_CTRLS ARRAY_SIZE(msm_vdec_ctrls)
@@ -518,6 +577,9 @@
 					fmt->get_frame_size(i,
 						f->fmt.pix_mp.height,
 						f->fmt.pix_mp.width);
+				inst->bufq[OUTPUT_PORT].
+					vb2_bufq.plane_sizes[i] =
+					f->fmt.pix_mp.plane_fmt[i].sizeimage;
 			}
 		} else {
 			f->fmt.pix_mp.plane_fmt[0].sizeimage =
@@ -527,6 +589,11 @@
 				f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
 		inst->buff_req.buffer[HAL_BUFFER_EXTRADATA_OUTPUT].buffer_size;
 			}
+			for (i = 0; i < fmt->num_planes; ++i)
+				inst->bufq[CAPTURE_PORT].
+					vb2_bufq.plane_sizes[i] =
+					f->fmt.pix_mp.plane_fmt[i].sizeimage;
+
 		}
 	} else {
 		dprintk(VIDC_ERR,
@@ -624,6 +691,10 @@
 			}
 		}
 		f->fmt.pix_mp.num_planes = fmt->num_planes;
+		for (i = 0; i < fmt->num_planes; ++i) {
+			inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[i] =
+				f->fmt.pix_mp.plane_fmt[i].sizeimage;
+		}
 	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 		inst->prop.width = f->fmt.pix_mp.width;
 		inst->prop.height = f->fmt.pix_mp.height;
@@ -652,6 +723,10 @@
 			fmt->get_frame_size(0, f->fmt.pix_mp.height,
 					f->fmt.pix_mp.width);
 		f->fmt.pix_mp.num_planes = fmt->num_planes;
+		for (i = 0; i < fmt->num_planes; ++i) {
+			inst->bufq[OUTPUT_PORT].vb2_bufq.plane_sizes[i] =
+				f->fmt.pix_mp.plane_fmt[i].sizeimage;
+		}
 	}
 err_invalid_fmt:
 	return rc;
@@ -933,7 +1008,8 @@
 	case V4L2_DEC_CMD_STOP:
 		rc = msm_comm_release_scratch_buffers(inst);
 		if (rc)
-			pr_err("Failed to release scratch buffers: %d\n", rc);
+			dprintk(VIDC_ERR,
+				"Failed to release scratch buffers: %d\n", rc);
 		rc = msm_comm_release_persist_buffers(inst);
 		if (rc)
 			pr_err("Failed to release persist buffers: %d\n", rc);
@@ -1002,17 +1078,8 @@
 	void *pdata;
 	struct msm_vidc_inst *inst = container_of(ctrl->handler,
 				struct msm_vidc_inst, ctrl_handler);
-	rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
-
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Failed to move inst: %p to start done state\n", inst);
-		goto failed_open_done;
-	}
-
 	control.id = ctrl->id;
 	control.value = ctrl->val;
-
 	switch (control.id) {
 	case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_FORMAT:
 		property_id =
@@ -1067,10 +1134,29 @@
 		hal_property.enable = control.value;
 		pdata = &hal_property;
 		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_SECURE:
+		inst->mode = VIDC_SECURE;
+		dprintk(VIDC_DBG, "Setting secure mode to :%d\n", inst->mode);
+		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA:
+	{
+		struct hal_extradata_enable extra;
+		property_id = HAL_PARAM_INDEX_EXTRADATA;
+		extra.index = msm_comm_get_hal_extradata_index(control.value);
+		extra.enable = 1;
+		pdata = &extra;
+		break;
+	}
 	default:
 		break;
-		}
+	}
 	if (property_id) {
+		rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
+		if (rc) {
+			dprintk(VIDC_ERR,
+			"Failed to move inst: %p to start done state\n", inst);
+			goto failed_open_done;
+		}
 		dprintk(VIDC_DBG,
 			"Control: HAL property=%d,ctrl_id=%d,ctrl_value=%d\n",
 			property_id,
@@ -1082,11 +1168,10 @@
 		}
 	if (rc)
 		dprintk(VIDC_ERR, "Failed to set hal property for framesize\n");
-
 failed_open_done:
-
 	return rc;
 }
+
 static int msm_vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
 {
 	return 0;
diff --git a/drivers/media/video/msm_vidc/msm_venc.c b/drivers/media/video/msm_vidc/msm_venc.c
index d53da9e..d01841d 100644
--- a/drivers/media/video/msm_vidc/msm_venc.c
+++ b/drivers/media/video/msm_vidc/msm_venc.c
@@ -23,7 +23,6 @@
 #define DEFAULT_WIDTH 1280
 #define MIN_NUM_OUTPUT_BUFFERS 4
 #define MAX_NUM_OUTPUT_BUFFERS 8
-#define MAX_INPUT_BUFFERS 32
 #define MIN_BIT_RATE 64000
 #define MAX_BIT_RATE 160000000
 #define DEFAULT_BIT_RATE 64000
@@ -594,11 +593,11 @@
 		spin_lock_irqsave(&inst->lock, flags);
 		*num_buffers = inst->buff_req.buffer[0].buffer_count_actual =
 			max(*num_buffers, inst->buff_req.buffer[0].
-			buffer_count_actual);
+				buffer_count_actual);
 		spin_unlock_irqrestore(&inst->lock, flags);
 		property_id = HAL_PARAM_BUFFER_COUNT_ACTUAL;
 		new_buf_count.buffer_type = HAL_BUFFER_INPUT;
-		new_buf_count.buffer_count_actual = MAX_INPUT_BUFFERS;
+		new_buf_count.buffer_count_actual = *num_buffers;
 		rc = vidc_hal_session_set_property(inst->session,
 					property_id, &new_buf_count);
 		dprintk(VIDC_DBG, "size = %d, alignment = %d, count = %d\n",
diff --git a/drivers/media/video/msm_vidc/msm_vidc.c b/drivers/media/video/msm_vidc/msm_vidc.c
index 1d0124f..64897c7 100644
--- a/drivers/media/video/msm_vidc/msm_vidc.c
+++ b/drivers/media/video/msm_vidc/msm_vidc.c
@@ -413,7 +413,7 @@
 
 	inst = kzalloc(sizeof(*inst), GFP_KERNEL);
 	if (!inst) {
-		pr_err("Failed to allocate memory\n")	;
+		dprintk(VIDC_ERR, "Failed to allocate memory\n");
 		rc = -ENOMEM;
 		goto err_invalid_core;
 	}
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.c b/drivers/media/video/msm_vidc/msm_vidc_common.c
index 02773ff..cda0ea8 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.c
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.c
@@ -18,6 +18,7 @@
 #include <mach/iommu.h>
 #include <mach/iommu_domains.h>
 #include <mach/subsystem_restart.h>
+#include <mach/scm.h>
 
 #include "msm_vidc_common.h"
 #include "vidc_hal_api.h"
@@ -50,6 +51,18 @@
 	__mbs;\
 })
 
+#define TZBSP_MEM_PROTECT_VIDEO_VAR 0x8
+struct tzbsp_memprot {
+	u32 cp_start;
+	u32 cp_size;
+	u32 cp_nonpixel_start;
+	u32 cp_nonpixel_size;
+};
+
+struct tzbsp_resp {
+	int ret;
+};
+
 static const u32 bus_table[] = {
 	0,
 	36000,
@@ -140,6 +153,31 @@
 	return rc;
 }
 
+static int protect_cp_mem(struct msm_vidc_core *core)
+{
+	struct tzbsp_memprot memprot;
+	unsigned int resp = 0;
+	int rc = 0;
+	struct msm_vidc_iommu_info *io_map = core->resources.io_map;
+	if (!io_map) {
+		dprintk(VIDC_ERR, "invalid params: %p\n", io_map);
+		return -EINVAL;
+	}
+	memprot.cp_start = 0x0;
+	memprot.cp_size = io_map[CP_MAP].addr_range[0] +
+			io_map[CP_MAP].addr_range[1];
+	memprot.cp_nonpixel_start = 0;
+	memprot.cp_nonpixel_size = 0;
+
+	rc = scm_call(SCM_SVC_CP, TZBSP_MEM_PROTECT_VIDEO_VAR, &memprot,
+			sizeof(memprot), &resp, sizeof(resp));
+	if (rc)
+		dprintk(VIDC_ERR,
+		"Failed to protect memory , rc is :%d, response : %d\n",
+		rc, resp);
+	return rc;
+}
+
 struct msm_vidc_core *get_vidc_core(int core_id)
 {
 	struct msm_vidc_core *core;
@@ -676,7 +714,6 @@
 		(u32)fill_buf_done->packet_buffer1);
 	if (vb) {
 		vb->v4l2_planes[0].bytesused = fill_buf_done->filled_len1;
-
 		if (!(fill_buf_done->flags1 &
 			HAL_BUFFERFLAG_TIMESTAMPINVALID)) {
 			int64_t time_usec = fill_buf_done->timestamp_hi;
@@ -714,6 +751,10 @@
 		default:
 			break;
 		}
+		inst->count.fbd++;
+		if (fill_buf_done->filled_len1)
+			msm_vidc_debugfs_update(inst,
+				MSM_VIDC_DEBUGFS_EVENT_FBD);
 
 		dprintk(VIDC_DBG, "Filled length = %d; flags %x\n",
 				vb->v4l2_planes[0].bytesused,
@@ -722,7 +763,6 @@
 		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
 		mutex_unlock(&inst->bufq[CAPTURE_PORT].lock);
 		wake_up(&inst->kernel_event_queue);
-		msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FBD);
 	} else {
 		/*
 		 * FIXME:
@@ -925,13 +965,16 @@
 		rc = -ENOMEM;
 		goto fail_subsystem_get;
 	}
-
 	rc = msm_comm_enable_clks(core);
 	if (rc) {
 		dprintk(VIDC_ERR, "Failed to enable clocks: %d\n", rc);
 		goto fail_enable_clks;
 	}
-
+	rc = protect_cp_mem(core);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to protect memory\n");
+		goto fail_iommu_attach;
+	}
 	rc = msm_comm_iommu_attach(core);
 	if (rc) {
 		dprintk(VIDC_ERR, "Failed to attach iommu");
@@ -954,10 +997,10 @@
 		return;
 	}
 	if (core->resources.fw.cookie) {
-		subsystem_put(core->resources.fw.cookie);
-		core->resources.fw.cookie = NULL;
 		msm_comm_iommu_detach(core);
 		msm_comm_disable_clks(core);
+		subsystem_put(core->resources.fw.cookie);
+		core->resources.fw.cookie = NULL;
 	}
 }
 
@@ -1432,6 +1475,25 @@
 	return rc;
 }
 
+static int get_flipped_state(int present_state,
+	int desired_state)
+{
+	int flipped_state = present_state;
+	if (flipped_state < MSM_VIDC_STOP
+			&& desired_state > MSM_VIDC_STOP) {
+		flipped_state = MSM_VIDC_STOP + (MSM_VIDC_STOP - flipped_state);
+		flipped_state &= 0xFFFE;
+		flipped_state = flipped_state - 1;
+	} else if (flipped_state > MSM_VIDC_STOP
+			&& desired_state < MSM_VIDC_STOP) {
+		flipped_state = MSM_VIDC_STOP -
+			(flipped_state - MSM_VIDC_STOP + 1);
+		flipped_state &= 0xFFFE;
+		flipped_state = flipped_state - 1;
+	}
+	return flipped_state;
+}
+
 int msm_comm_try_state(struct msm_vidc_inst *inst, int state)
 {
 	int rc = 0;
@@ -1458,89 +1520,77 @@
 				"Core is in bad state can't change the state");
 		goto exit;
 	}
-	flipped_state = inst->state;
-	if (flipped_state < MSM_VIDC_STOP
-			&& state > MSM_VIDC_STOP) {
-		flipped_state = MSM_VIDC_STOP + (MSM_VIDC_STOP - flipped_state);
-		flipped_state &= 0xFFFE;
-		flipped_state = flipped_state - 1;
-	} else if (flipped_state > MSM_VIDC_STOP
-			&& state < MSM_VIDC_STOP) {
-		flipped_state = MSM_VIDC_STOP -
-			(flipped_state - MSM_VIDC_STOP + 1);
-		flipped_state &= 0xFFFE;
-		flipped_state = flipped_state - 1;
-	}
+	flipped_state = get_flipped_state(inst->state, state);
 	dprintk(VIDC_DBG,
 			"flipped_state = 0x%x\n", flipped_state);
 	switch (flipped_state) {
 	case MSM_VIDC_CORE_UNINIT_DONE:
 	case MSM_VIDC_CORE_INIT:
 		rc = msm_comm_init_core(inst);
-		if (rc || state <= inst->state)
+		if (rc || state <= get_flipped_state(inst->state, state))
 			break;
 	case MSM_VIDC_CORE_INIT_DONE:
 		rc = msm_comm_init_core_done(inst);
-		if (rc || state <= inst->state)
+		if (rc || state <= get_flipped_state(inst->state, state))
 			break;
 	case MSM_VIDC_OPEN:
 		rc = msm_comm_session_init(flipped_state, inst);
-		if (rc || state <= inst->state)
+		if (rc || state <= get_flipped_state(inst->state, state))
 			break;
 	case MSM_VIDC_OPEN_DONE:
 		rc = wait_for_state(inst, flipped_state, MSM_VIDC_OPEN_DONE,
 			SESSION_INIT_DONE);
-		if (rc || state <= inst->state)
+		if (rc || state <= get_flipped_state(inst->state, state))
 			break;
 	case MSM_VIDC_LOAD_RESOURCES:
 		rc = msm_vidc_load_resources(flipped_state, inst);
-		if (rc || state <= inst->state)
+		if (rc || state <= get_flipped_state(inst->state, state))
 			break;
 	case MSM_VIDC_LOAD_RESOURCES_DONE:
 	case MSM_VIDC_START:
 		rc = msm_vidc_start(flipped_state, inst);
-		if (rc || state <= inst->state)
+		if (rc || state <= get_flipped_state(inst->state, state))
 			break;
 	case MSM_VIDC_START_DONE:
 		rc = wait_for_state(inst, flipped_state, MSM_VIDC_START_DONE,
 				SESSION_START_DONE);
-		if (rc || state <= inst->state)
+		if (rc || state <= get_flipped_state(inst->state, state))
 			break;
 	case MSM_VIDC_STOP:
 		rc = msm_vidc_stop(flipped_state, inst);
-		if (rc || state <= inst->state)
+		if (rc || state <= get_flipped_state(inst->state, state))
 			break;
 	case MSM_VIDC_STOP_DONE:
 		rc = wait_for_state(inst, flipped_state, MSM_VIDC_STOP_DONE,
 				SESSION_STOP_DONE);
-		if (rc || state <= inst->state)
+		if (rc || state <= get_flipped_state(inst->state, state))
 			break;
 		dprintk(VIDC_DBG, "Moving to Stop Done state\n");
 	case MSM_VIDC_RELEASE_RESOURCES:
 		rc = msm_vidc_release_res(flipped_state, inst);
-		if (rc || state <= inst->state)
+		if (rc || state <= get_flipped_state(inst->state, state))
 			break;
 	case MSM_VIDC_RELEASE_RESOURCES_DONE:
 		rc = wait_for_state(inst, flipped_state,
 			MSM_VIDC_RELEASE_RESOURCES_DONE,
 			SESSION_RELEASE_RESOURCE_DONE);
-		if (rc || state <= inst->state)
+		if (rc || state <= get_flipped_state(inst->state, state))
 			break;
 		dprintk(VIDC_DBG,
 				"Moving to release resources done state\n");
 	case MSM_VIDC_CLOSE:
 		rc = msm_comm_session_close(flipped_state, inst);
-		if (rc || state <= inst->state)
+		if (rc || state <= get_flipped_state(inst->state, state))
 			break;
 	case MSM_VIDC_CLOSE_DONE:
 		rc = wait_for_state(inst, flipped_state, MSM_VIDC_CLOSE_DONE,
 				SESSION_END_DONE);
-		if (rc || state <= inst->state)
+		if (rc || state <= get_flipped_state(inst->state, state))
 			break;
 	case MSM_VIDC_CORE_UNINIT:
 		dprintk(VIDC_DBG, "Sending core uninit\n");
 		rc = msm_vidc_deinit_core(inst);
-		if (rc || state == inst->state)
+		if (rc || state == get_flipped_state(inst->state, state))
 			break;
 	default:
 		dprintk(VIDC_ERR, "State not recognized\n");
@@ -1629,6 +1679,8 @@
 			struct vidc_seq_hdr seq_hdr;
 			int extra_idx = 0;
 			frame_data.filled_len = 0;
+			frame_data.offset = 0;
+			frame_data.alloc_len = vb->v4l2_planes[0].length;
 			frame_data.buffer_type = HAL_BUFFER_OUTPUT;
 			extra_idx =
 			EXTRADATA_IDX(inst->fmts[CAPTURE_PORT]->num_planes);
@@ -1840,6 +1892,8 @@
 	struct internal_buf *binfo;
 	struct vidc_buffer_addr_info buffer_info;
 	unsigned long flags;
+	int domain;
+	unsigned long smem_flags = 0;
 	struct hal_buffer_requirements *scratch_buf =
 		&inst->buff_req.buffer[HAL_BUFFER_INTERNAL_SCRATCH];
 	int i;
@@ -1849,14 +1903,18 @@
 		scratch_buf->buffer_size);
 	if (msm_comm_release_scratch_buffers(inst))
 		dprintk(VIDC_WARN, "Failed to release scratch buffers\n");
+	if (inst->mode == VIDC_SECURE) {
+		domain = inst->core->resources.io_map[CP_MAP].domain;
+		smem_flags |= SMEM_SECURE;
+	} else
+		domain = inst->core->resources.io_map[NS_MAP].domain;
 
 	if (scratch_buf->buffer_size) {
 		for (i = 0; i < scratch_buf->buffer_count_actual;
 				i++) {
 			handle = msm_smem_alloc(inst->mem_client,
-				scratch_buf->buffer_size, 1, SMEM_UNCACHED,
-				inst->core->resources.io_map[NS_MAP].domain,
-				0, 0);
+				scratch_buf->buffer_size, 1, smem_flags,
+				domain, 0, 0);
 			if (!handle) {
 				dprintk(VIDC_ERR,
 					"Failed to allocate scratch memory\n");
@@ -1902,6 +1960,8 @@
 	struct internal_buf *binfo;
 	struct vidc_buffer_addr_info buffer_info;
 	unsigned long flags;
+	unsigned long smem_flags = 0;
+	int domain;
 	struct hal_buffer_requirements *persist_buf =
 		&inst->buff_req.buffer[HAL_BUFFER_INTERNAL_PERSIST];
 	int i;
@@ -1915,12 +1975,17 @@
 		return rc;
 	}
 
+	if (inst->mode == VIDC_SECURE) {
+		domain = inst->core->resources.io_map[CP_MAP].domain;
+		flags |= SMEM_SECURE;
+	} else
+		domain = inst->core->resources.io_map[NS_MAP].domain;
+
 	if (persist_buf->buffer_size) {
 		for (i = 0;	i <	persist_buf->buffer_count_actual; i++) {
 			handle = msm_smem_alloc(inst->mem_client,
-				persist_buf->buffer_size, 1, SMEM_UNCACHED,
-				inst->core->resources.io_map[NS_MAP].domain,
-				0, 0);
+				persist_buf->buffer_size, 1, smem_flags,
+				domain, 0, 0);
 			if (!handle) {
 				dprintk(VIDC_ERR,
 					"Failed to allocate persist memory\n");
@@ -2076,3 +2141,61 @@
 	mutex_unlock(&inst->sync_lock);
 	return rc;
 }
+
+
+enum hal_extradata_id msm_comm_get_hal_extradata_index(
+	enum v4l2_mpeg_vidc_extradata index)
+{
+	int ret = 0;
+	switch (index) {
+	case V4L2_MPEG_VIDC_EXTRADATA_NONE:
+		ret = HAL_EXTRADATA_NONE;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION:
+		ret = HAL_EXTRADATA_MB_QUANTIZATION;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO:
+		ret = HAL_EXTRADATA_INTERLACE_VIDEO;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP:
+		ret = HAL_EXTRADATA_VC1_FRAMEDISP;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP:
+		ret = HAL_EXTRADATA_VC1_SEQDISP;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP:
+		ret = HAL_EXTRADATA_TIMESTAMP;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING:
+		ret = HAL_EXTRADATA_S3D_FRAME_PACKING;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE:
+		ret = HAL_EXTRADATA_FRAME_RATE;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW:
+		ret = HAL_EXTRADATA_PANSCAN_WINDOW;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI:
+		ret = HAL_EXTRADATA_RECOVERY_POINT_SEI;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD:
+		ret = HAL_EXTRADATA_CLOSED_CAPTION_UD;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_AFD_UD:
+		ret = HAL_EXTRADATA_AFD_UD;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO:
+		ret = HAL_EXTRADATA_MULTISLICE_INFO;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB:
+		ret = HAL_EXTRADATA_NUM_CONCEALED_MB;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER:
+		ret = HAL_EXTRADATA_METADATA_FILLER;
+		break;
+	default:
+		dprintk(VIDC_WARN, "Extradata not found: %d\n", index);
+		break;
+	}
+	return ret;
+};
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.h b/drivers/media/video/msm_vidc/msm_vidc_common.h
index 7562058..28bec97 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.h
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.h
@@ -41,6 +41,8 @@
 	enum instance_state state);
 int msm_comm_unset_ocmem(struct msm_vidc_core *core);
 int msm_comm_free_ocmem(struct msm_vidc_core *core);
+enum hal_extradata_id msm_comm_get_hal_extradata_index(
+	enum v4l2_mpeg_vidc_extradata index);
 #define IS_PRIV_CTRL(idx) (\
 		(V4L2_CTRL_ID2CLASS(idx) == V4L2_CTRL_CLASS_MPEG) && \
 		V4L2_CTRL_DRIVER_PRIV(idx))
diff --git a/drivers/media/video/msm_vidc/msm_vidc_debug.c b/drivers/media/video/msm_vidc/msm_vidc_debug.c
index 914c422..f91d0dd 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_debug.c
+++ b/drivers/media/video/msm_vidc/msm_vidc_debug.c
@@ -202,28 +202,27 @@
 	switch (e) {
 	case MSM_VIDC_DEBUGFS_EVENT_ETB:
 		inst->count.etb++;
-		if (inst->count.ftb > inst->count.fbd) {
+		if (inst->count.ebd && inst->count.ftb > inst->count.fbd) {
 			d->pdata[FRAME_PROCESSING].name[0] = '\0';
 			tic(inst, FRAME_PROCESSING, a);
 		}
 	break;
 	case MSM_VIDC_DEBUGFS_EVENT_EBD:
 		inst->count.ebd++;
-		if (inst->count.ebd == inst->count.etb)
+		if (inst->count.ebd && inst->count.ebd == inst->count.etb)
 			toc(inst, FRAME_PROCESSING);
 	break;
 	case MSM_VIDC_DEBUGFS_EVENT_FTB: {
 		inst->count.ftb++;
-		if (inst->count.etb > inst->count.ebd) {
+		if (inst->count.ebd && inst->count.etb > inst->count.ebd) {
 			d->pdata[FRAME_PROCESSING].name[0] = '\0';
 			tic(inst, FRAME_PROCESSING, a);
 		}
 	}
 	break;
 	case MSM_VIDC_DEBUGFS_EVENT_FBD:
-		inst->count.fbd++;
-		inst->debug.counter++;
-		if (inst->count.fbd == inst->count.ftb)
+		inst->debug.samples++;
+		if (inst->count.ebd && inst->count.fbd == inst->count.ftb)
 			toc(inst, FRAME_PROCESSING);
 		break;
 	default:
diff --git a/drivers/media/video/msm_vidc/msm_vidc_debug.h b/drivers/media/video/msm_vidc/msm_vidc_debug.h
index 1a51173..995daf0 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_debug.h
+++ b/drivers/media/video/msm_vidc/msm_vidc_debug.h
@@ -81,23 +81,8 @@
 		do_gettimeofday(&__ddl_tv);
 		i->debug.pdata[p].stop = (__ddl_tv.tv_sec * 1000)
 		+ (__ddl_tv.tv_usec / 1000);
-		i->debug.pdata[p].cumulative =
+		i->debug.pdata[p].cumulative +=
 		(i->debug.pdata[p].stop - i->debug.pdata[p].start);
-		if (i->count.fbd) {
-			if (i->debug.pdata[p].average != 0) {
-				i->debug.pdata[p].average = ((i->debug.pdata[p].
-					average * (i->count.fbd -
-					i->debug.counter) +
-					i->debug.pdata[p].cumulative)
-					/ i->count.fbd);
-			} else {
-				i->debug.pdata[p].average =
-					i->debug.pdata[p].cumulative
-					/ i->count.fbd;
-			}
-		}
-		i->debug.counter = 0;
-		i->debug.pdata[p].cumulative = 0;
 		i->debug.pdata[p].sampling = true;
 	}
 }
@@ -110,9 +95,11 @@
 			(msm_vidc_debug & VIDC_PROF)) {
 			dprintk(VIDC_PROF, "%s averaged %d ms/sample\n",
 				i->debug.pdata[x].name,
-				i->debug.pdata[x].average);
+				i->debug.pdata[x].cumulative /
+					i->debug.samples);
 			dprintk(VIDC_PROF, "%s Samples: %d",
-					i->debug.pdata[x].name, i->count.fbd);
+					i->debug.pdata[x].name,
+					i->debug.samples);
 		}
 	}
 }
diff --git a/drivers/media/video/msm_vidc/msm_vidc_internal.h b/drivers/media/video/msm_vidc/msm_vidc_internal.h
index 8e1a99e..e9295a6 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_internal.h
+++ b/drivers/media/video/msm_vidc/msm_vidc_internal.h
@@ -200,7 +200,7 @@
 struct msm_vidc_debug {
 	struct profile_data pdata[MAX_PROFILING_POINTS];
 	int profile;
-	int counter;
+	int samples;
 };
 
 struct msm_vidc_ssr_info {
@@ -210,6 +210,11 @@
 	bool ssr_in_progress;
 };
 
+enum msm_vidc_mode {
+	VIDC_NON_SECURE,
+	VIDC_SECURE,
+};
+
 struct msm_vidc_core {
 	struct list_head list;
 	struct mutex sync_lock;
@@ -260,6 +265,7 @@
 	void *priv;
 	struct msm_vidc_debug debug;
 	struct buf_count count;
+	enum msm_vidc_mode mode;
 };
 
 extern struct msm_vidc_drv *vidc_driver;
diff --git a/drivers/media/video/msm_vidc/vidc_hal.c b/drivers/media/video/msm_vidc/vidc_hal.c
index 2f9d8d4..e449821 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.c
+++ b/drivers/media/video/msm_vidc/vidc_hal.c
@@ -501,18 +501,27 @@
 static void vidc_hal_interface_queues_release(struct hal_device *device)
 {
 	int i;
+
+	vidc_hal_free(device->hal_client, device->mem_addr.mem_data);
+
 	for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
-		vidc_hal_free(device->hal_client,
-			device->iface_queues[i].q_array.mem_data);
 		device->iface_queues[i].q_hdr = NULL;
 		device->iface_queues[i].q_array.mem_data = NULL;
 		device->iface_queues[i].q_array.align_virtual_addr = NULL;
 		device->iface_queues[i].q_array.align_device_addr = NULL;
 	}
-	vidc_hal_free(device->hal_client,
-				device->iface_q_table.mem_data);
 	device->iface_q_table.align_virtual_addr = NULL;
 	device->iface_q_table.align_device_addr = NULL;
+
+	device->qdss.align_virtual_addr = NULL;
+	device->qdss.align_device_addr = NULL;
+
+	device->sfr.align_virtual_addr = NULL;
+	device->sfr.align_device_addr = NULL;
+
+	device->mem_addr.align_virtual_addr = NULL;
+	device->mem_addr.align_device_addr = NULL;
+
 	msm_smem_delete_client(device->hal_client);
 	device->hal_client = NULL;
 }
@@ -524,14 +533,51 @@
 	u8 i;
 	int rc = 0;
 	struct vidc_iface_q_info *iface_q;
-
-	rc = vidc_hal_alloc((void *) &dev->iface_q_table,
-					dev->hal_client,
-			VIDC_IFACEQ_TABLE_SIZE, 1, SMEM_UNCACHED, domain);
+	struct hfi_sfr_struct *vsfr;
+	struct vidc_mem_addr *mem_addr;
+	int offset = 0;
+	int size_1m = 1024 * 1024;
+	int uc_size = (UC_SIZE + size_1m - 1) & (~(size_1m - 1));
+	mem_addr = &dev->mem_addr;
+	rc = vidc_hal_alloc((void *) mem_addr,
+			dev->hal_client, uc_size, 1,
+			0, domain);
 	if (rc) {
 		dprintk(VIDC_ERR, "iface_q_table_alloc_fail");
 		return -ENOMEM;
 	}
+	dev->iface_q_table.align_virtual_addr = mem_addr->align_virtual_addr;
+	dev->iface_q_table.align_device_addr = mem_addr->align_device_addr;
+	dev->iface_q_table.mem_size = VIDC_IFACEQ_TABLE_SIZE;
+	dev->iface_q_table.mem_data = NULL;
+	offset += dev->iface_q_table.mem_size;
+
+	for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
+		iface_q = &dev->iface_queues[i];
+		iface_q->q_array.align_device_addr =
+			mem_addr->align_device_addr + offset;
+		iface_q->q_array.align_virtual_addr =
+			mem_addr->align_virtual_addr + offset;
+		iface_q->q_array.mem_size = VIDC_IFACEQ_QUEUE_SIZE;
+		iface_q->q_array.mem_data = NULL;
+		offset += iface_q->q_array.mem_size;
+		iface_q->q_hdr = VIDC_IFACEQ_GET_QHDR_START_ADDR(
+				dev->iface_q_table.align_virtual_addr, i);
+		vidc_hal_set_queue_hdr_defaults(iface_q->q_hdr);
+	}
+
+	dev->qdss.align_device_addr = mem_addr->align_device_addr + offset;
+	dev->qdss.align_virtual_addr = mem_addr->align_virtual_addr + offset;
+	dev->qdss.mem_size = QDSS_SIZE;
+	dev->qdss.mem_data = NULL;
+	offset += dev->qdss.mem_size;
+
+	dev->sfr.align_device_addr = mem_addr->align_device_addr + offset;
+	dev->sfr.align_virtual_addr = mem_addr->align_virtual_addr + offset;
+	dev->sfr.mem_size = SFR_SIZE;
+	dev->sfr.mem_data = NULL;
+	offset += dev->sfr.mem_size;
+
 	q_tbl_hdr = (struct hfi_queue_table_header *)
 			dev->iface_q_table.align_virtual_addr;
 	q_tbl_hdr->qtbl_version = 0;
@@ -543,23 +589,6 @@
 	q_tbl_hdr->qtbl_num_q = VIDC_IFACEQ_NUMQ;
 	q_tbl_hdr->qtbl_num_active_q = VIDC_IFACEQ_NUMQ;
 
-	for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
-		iface_q = &dev->iface_queues[i];
-		rc = vidc_hal_alloc((void *) &iface_q->q_array,
-				dev->hal_client, VIDC_IFACEQ_QUEUE_SIZE,
-				1, SMEM_UNCACHED, domain);
-		if (rc) {
-			dprintk(VIDC_ERR, "iface_q_table_alloc[%d]_fail", i);
-			vidc_hal_interface_queues_release(dev);
-			return -ENOMEM;
-		} else {
-			iface_q->q_hdr =
-				VIDC_IFACEQ_GET_QHDR_START_ADDR(
-			dev->iface_q_table.align_virtual_addr, i);
-			vidc_hal_set_queue_hdr_defaults(iface_q->q_hdr);
-		}
-	}
-
 	iface_q = &dev->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
 	q_hdr = iface_q->q_hdr;
 	q_hdr->qhdr_start_addr = (u32)
@@ -577,6 +606,12 @@
 	q_hdr->qhdr_start_addr = (u32)
 		iface_q->q_array.align_device_addr;
 	q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q;
+
+	write_register(dev->hal_data->register_base_addr,
+			VIDC_UC_REGION_ADDR,
+			(u32) mem_addr->align_device_addr, 0);
+	write_register(dev->hal_data->register_base_addr,
+			VIDC_UC_REGION_SIZE, mem_addr->mem_size, 0);
 	write_register(dev->hal_data->register_base_addr,
 		VIDC_CPU_CS_SCIACMDARG2,
 		(u32) dev->iface_q_table.align_device_addr,
@@ -584,6 +619,15 @@
 	write_register(dev->hal_data->register_base_addr,
 		VIDC_CPU_CS_SCIACMDARG1, 0x01,
 		dev->iface_q_table.align_virtual_addr);
+	write_register(dev->hal_data->register_base_addr,
+			VIDC_MMAP_ADDR,
+			(u32) dev->qdss.align_device_addr, 0);
+
+	vsfr = (struct hfi_sfr_struct *) dev->sfr.align_virtual_addr;
+	vsfr->bufSize = SFR_SIZE;
+
+	write_register(dev->hal_data->register_base_addr,
+			VIDC_SFR_ADDR, (u32)dev->sfr.align_device_addr , 0);
 	return 0;
 }
 
@@ -595,10 +639,15 @@
 			VIDC_WRAPPER_INTR_MASK, 0x8, 0);
 	write_register(device->hal_data->register_base_addr,
 			VIDC_CPU_CS_SCIACMDARG3, 1, 0);
+
 	while (!ctrl_status && count < max_tries) {
 		ctrl_status = read_register(
 		device->hal_data->register_base_addr,
 		VIDC_CPU_CS_SCIACMDARG0);
+		if ((ctrl_status & 0xFE) == 0x4) {
+			dprintk(VIDC_ERR, "invalid setting for UC_REGION\n");
+			break;
+		}
 		usleep_range(500, 1000);
 		count++;
 	}
@@ -685,6 +734,7 @@
 	spin_lock_init(&dev->read_lock);
 	spin_lock_init(&dev->write_lock);
 	set_vbif_registers(dev);
+
 	if (!dev->hal_client) {
 		dev->hal_client = msm_smem_new_client(SMEM_ION);
 		if (dev->hal_client == NULL) {
@@ -693,8 +743,7 @@
 			goto err_no_mem;
 		}
 
-		dprintk(VIDC_DBG, "Device_Virt_Address : 0x%x,"
-		"Register_Virt_Addr: 0x%x",
+		dprintk(VIDC_DBG, "Dev_Virt: 0x%x, Reg_Virt: 0x%x",
 		dev->hal_data->device_base_addr,
 		(u32) dev->hal_data->register_base_addr);
 
@@ -709,12 +758,15 @@
 		rc = -EEXIST;
 		goto err_no_mem;
 	}
+	write_register(dev->hal_data->register_base_addr,
+		VIDC_CTRL_INIT, 0x1, 0);
 	rc = vidc_hal_core_start_cpu(dev);
 	if (rc) {
 		dprintk(VIDC_ERR, "Failed to start core");
 		rc = -ENODEV;
 		goto err_no_dev;
 	}
+
 	pkt.size = sizeof(struct hfi_cmd_sys_init_packet);
 	pkt.packet_type = HFI_CMD_SYS_INIT;
 	pkt.arch_type = HFI_ARCH_OX_OFFSET;
@@ -920,6 +972,61 @@
 	}
 	return buffer;
 }
+
+
+static int get_hfi_extradata_index(enum hal_extradata_id index)
+{
+	int ret = 0;
+	switch (index) {
+	case HAL_EXTRADATA_MB_QUANTIZATION:
+		ret = HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION;
+		break;
+	case HAL_EXTRADATA_INTERLACE_VIDEO:
+		ret = HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_VC1_FRAMEDISP:
+		ret = HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_VC1_SEQDISP:
+		ret = HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_TIMESTAMP:
+		ret = HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_S3D_FRAME_PACKING:
+		ret = HFI_PROPERTY_PARAM_S3D_FRAME_PACKING_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_FRAME_RATE:
+		ret = HFI_PROPERTY_PARAM_VDEC_FRAME_RATE_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_PANSCAN_WINDOW:
+		ret = HFI_PROPERTY_PARAM_VDEC_PANSCAN_WNDW_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_RECOVERY_POINT_SEI:
+		ret = HFI_PROPERTY_PARAM_VDEC_RECOVERY_POINT_SEI_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_CLOSED_CAPTION_UD:
+		ret = HFI_PROPERTY_PARAM_VDEC_CLOSED_CAPTION_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_AFD_UD:
+		ret = HFI_PROPERTY_PARAM_VDEC_AFD_EXTRADATA;
+		break;
+	case HAL_EXTRADATA_MULTISLICE_INFO:
+		ret = HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO;
+		break;
+	case HAL_EXTRADATA_NUM_CONCEALED_MB:
+		ret = HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB;
+		break;
+	case HAL_EXTRADATA_INDEX:
+		ret = HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG;
+		break;
+	default:
+		dprintk(VIDC_WARN, "Extradata index not found: %d\n", index);
+		break;
+	}
+	return ret;
+}
+
 int vidc_hal_session_set_property(void *sess,
 	enum hal_property ptype, void *pdata)
 {
@@ -927,6 +1034,7 @@
 	struct hfi_cmd_session_set_property_packet *pkt =
 		(struct hfi_cmd_session_set_property_packet *) &packet;
 	struct hal_session *session;
+	int rc = 0;
 
 	if (!sess || !pdata) {
 		dprintk(VIDC_ERR, "Invalid Params");
@@ -1208,7 +1316,7 @@
 	{
 		struct hfi_enable *hfi;
 		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_SYNC_FRAME_SEQUENCE_HEADER;
+			HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER;
 		hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
 		hfi->enable = ((struct hfi_enable *) pdata)->enable;
 		pkt->size += sizeof(u32) * 2;
@@ -1476,6 +1584,30 @@
 					hfi_multi_slice_control);
 		break;
 	}
+	case HAL_PARAM_INDEX_EXTRADATA:
+	{
+		struct hfi_index_extradata_config *hfi;
+		struct hal_extradata_enable *extra = pdata;
+		int index = 0;
+		pkt->rg_property_data[0] =
+			get_hfi_extradata_index(extra->index);
+		hfi =
+			(struct hfi_index_extradata_config *)
+			&pkt->rg_property_data[1];
+		hfi->enable = extra->enable;
+		index = get_hfi_extradata_index(extra->index);
+		if (index)
+			hfi->index_extra_data_id = index;
+		else {
+			dprintk(VIDC_WARN,
+				"Failed to find extradata index: %d\n",
+				index);
+			rc = -EINVAL;
+		}
+		pkt->size += sizeof(u32) +
+			sizeof(struct hfi_index_extradata_config);
+		break;
+	}
 	case HAL_CONFIG_VPE_DEINTERLACE:
 		break;
 	/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
@@ -1510,9 +1642,10 @@
 		dprintk(VIDC_INFO, "DEFAULT: Calling 0x%x", ptype);
 		break;
 	}
-	if (vidc_hal_iface_cmdq_write(session->device, pkt))
-		return -ENOTEMPTY;
-	return 0;
+	if (!rc)
+		rc = vidc_hal_iface_cmdq_write(session->device, pkt);
+
+	return rc;
 }
 
 int vidc_hal_session_get_property(void *sess,
@@ -1734,7 +1867,6 @@
 		((buffer_info->num_buffers - 1) * sizeof(u32));
 	pkt->packet_type = HFI_CMD_SESSION_SET_BUFFERS;
 	pkt->session_id = (u32) session;
-	pkt->buffer_mode = HFI_BUFFER_MODE_STATIC;
 	pkt->buffer_size = buffer_info->buffer_size;
 	pkt->min_buffer_size = buffer_info->buffer_size;
 	pkt->num_buffers = buffer_info->num_buffers;
@@ -1949,8 +2081,11 @@
 	pkt.packet_buffer = (u8 *) output_frame->device_addr;
 	pkt.extra_data_buffer =
 		(u8 *) output_frame->extradata_addr;
-
-	dprintk(VIDC_INFO, "### Q OUTPUT BUFFER ###");
+	pkt.alloc_len = output_frame->alloc_len;
+	pkt.filled_len = output_frame->filled_len;
+	pkt.offset = output_frame->offset;
+	dprintk(VIDC_DBG, "### Q OUTPUT BUFFER ###: %d, %d, %d\n",
+			pkt.alloc_len, pkt.filled_len, pkt.offset);
 	if (vidc_hal_iface_cmdq_write(session->device, &pkt))
 		rc = -ENOTEMPTY;
 	return rc;
@@ -2127,8 +2262,8 @@
 
 	dprintk(VIDC_INFO, " GOT INTERRUPT () ");
 	if (!device->callback) {
-		dprintk(VIDC_ERR, "No callback function	"
-					  "to process interrupt: %p\n", device);
+		dprintk(VIDC_ERR, "No interrupt callback function: %p\n",
+				device);
 		return;
 	}
 	vidc_hal_core_clear_interrupt(device);
diff --git a/drivers/media/video/msm_vidc/vidc_hal.h b/drivers/media/video/msm_vidc/vidc_hal.h
index 0e70e30..57d685b 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.h
+++ b/drivers/media/video/msm_vidc/vidc_hal.h
@@ -81,6 +81,12 @@
 	(void *)((((u32)ptr) + sizeof(struct hfi_queue_table_header)) + \
 		(i * sizeof(struct hfi_queue_header)))
 
+#define QDSS_SIZE 4096
+#define SFR_SIZE 4096
+
+#define UC_SIZE (VIDC_IFACEQ_TABLE_SIZE + \
+	(VIDC_IFACEQ_QUEUE_SIZE * VIDC_IFACEQ_NUMQ) + QDSS_SIZE + SFR_SIZE)
+
 enum vidc_hw_reg {
 	VIDC_HWREG_CTRL_STATUS =  0x1,
 	VIDC_HWREG_QTBL_INFO =  0x2,
@@ -113,6 +119,9 @@
 #define HFI_BUFFERFLAG_TIMESTAMPINVALID	0x00000100
 #define HFI_BUFFERFLAG_READONLY			0x00000200
 #define HFI_BUFFERFLAG_ENDOFSUBFRAME	0x00000400
+#define HFI_BUFFERFLAG_EOSEQ			0x00200000
+#define HFI_BUFFERFLAG_DISCONTINUITY	0x80000000
+#define HFI_BUFFERFLAG_TEI				0x40000000
 
 #define HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING	\
 	(HFI_OX_BASE + 0x1001)
@@ -120,6 +129,8 @@
 	(HFI_OX_BASE + 0x1002)
 #define HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED		\
 	(HFI_OX_BASE + 0x1003)
+#define  HFI_ERR_SESSION_START_CODE_NOT_FOUND		\
+	(HFI_OX_BASE + 0x1004)
 
 #define HFI_BUFFER_INTERNAL_SCRATCH (HFI_OX_BASE + 0x1)
 #define HFI_BUFFER_EXTRADATA_INPUT (HFI_OX_BASE + 0x2)
@@ -129,6 +140,11 @@
 #define HFI_BUFFER_MODE_STATIC (HFI_OX_BASE + 0x1)
 #define HFI_BUFFER_MODE_RING (HFI_OX_BASE + 0x2)
 
+struct hfi_buffer_alloc_mode {
+	u32 buffer_type;
+	u32 buffer_mode;
+};
+
 #define HFI_FLUSH_INPUT (HFI_OX_BASE + 0x1)
 #define HFI_FLUSH_OUTPUT (HFI_OX_BASE + 0x2)
 #define HFI_FLUSH_OUTPUT2 (HFI_OX_BASE + 0x3)
@@ -141,7 +157,11 @@
 #define HFI_EXTRADATA_VC1_SEQDISP			0x00000004
 #define HFI_EXTRADATA_TIMESTAMP				0x00000005
 #define HFI_EXTRADATA_S3D_FRAME_PACKING		0x00000006
-#define  HFI_EXTRADATA_EOSNAL_DETECTED      0x00000007
+#define HFI_EXTRADATA_FRAME_RATE			0x00000007
+#define HFI_EXTRADATA_PANSCAN_WINDOW		0x00000008
+#define HFI_EXTRADATA_RECOVERY_POINT_SEI	0x00000009
+#define HFI_EXTRADATA_CLOSED_CAPTION_UD		0x0000000A
+#define HFI_EXTRADATA_AFD_UD				0x0000000B
 #define HFI_EXTRADATA_MULTISLICE_INFO		0x7F100000
 #define HFI_EXTRADATA_NUM_CONCEALED_MB		0x7F100001
 #define HFI_EXTRADATA_INDEX					0x7F100002
@@ -151,7 +171,7 @@
 #define HFI_INDEX_EXTRADATA_DIGITAL_ZOOM	0x07000010
 #define HFI_INDEX_EXTRADATA_ASPECT_RATIO	0x7F100003
 
-struct HFI_INDEX_EXTRADATA_CONFIG_TYPE {
+struct hfi_index_extradata_config {
 	int enable;
 	u32 index_extra_data_id;
 };
@@ -188,10 +208,14 @@
 (HFI_PROPERTY_PARAM_OX_START + 0x004)
 #define HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG		\
 	(HFI_PROPERTY_PARAM_OX_START + 0x005)
-#define  HFI_PROPERTY_PARAM_INDEX_EXTRADATA             \
+#define HFI_PROPERTY_PARAM_INDEX_EXTRADATA             \
 	(HFI_PROPERTY_PARAM_OX_START + 0x006)
 #define HFI_PROPERTY_PARAM_DIVX_FORMAT					\
 	(HFI_PROPERTY_PARAM_OX_START + 0x007)
+#define HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE			\
+	(HFI_PROPERTY_PARAM_OX_START + 0x008)
+#define HFI_PROPERTY_PARAM_S3D_FRAME_PACKING_EXTRADATA	\
+	(HFI_PROPERTY_PARAM_OX_START + 0x009)
 
 #define HFI_PROPERTY_CONFIG_OX_START					\
 	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x02000)
@@ -233,6 +257,20 @@
 #define HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE   \
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00D)
 
+#define HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY		\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00E)
+#define HFI_PROPERTY_PARAM_VDEC_CLOSED_CAPTION_EXTRADATA	\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00F)
+#define HFI_PROPERTY_PARAM_VDEC_AFD_EXTRADATA		\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x010)
+#define HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA		\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x011)
+#define HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA		\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x012)
+#define HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA			\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x013)
+#define HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA	\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x014)
 
 #define HFI_PROPERTY_CONFIG_VDEC_OX_START				\
 	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x0000)
@@ -249,8 +287,11 @@
 	(HFI_PROPERTY_PARAM_VENC_OX_START + 0x001)
 #define  HFI_PROPERTY_PARAM_VENC_H264_IDR_S3D_FRAME_PACKING_NAL \
 	(HFI_PROPERTY_PARAM_VENC_OX_START + 0x002)
+
 #define HFI_PROPERTY_CONFIG_VENC_OX_START				\
 	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x6000)
+#define  HFI_PROPERTY_CONFIG_VENC_FRAME_QP				\
+	(HFI_PROPERTY_CONFIG_VENC_OX_START + 0x001)
 
 #define HFI_PROPERTY_PARAM_VPE_OX_START					\
 	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x7000)
@@ -346,12 +387,12 @@
 };
 
 #define HFI_CMD_SYS_OX_START		\
-	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x0000)
+(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_CMD_START_OFFSET + 0x0000)
 #define HFI_CMD_SYS_SESSION_ABORT	(HFI_CMD_SYS_OX_START + 0x001)
 #define HFI_CMD_SYS_PING		(HFI_CMD_SYS_OX_START + 0x002)
 
 #define HFI_CMD_SESSION_OX_START	\
-	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x1000)
+(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_CMD_START_OFFSET + 0x1000)
 #define HFI_CMD_SESSION_LOAD_RESOURCES	(HFI_CMD_SESSION_OX_START + 0x001)
 #define HFI_CMD_SESSION_START		(HFI_CMD_SESSION_OX_START + 0x002)
 #define HFI_CMD_SESSION_STOP		(HFI_CMD_SESSION_OX_START + 0x003)
@@ -369,14 +410,14 @@
 	(HFI_CMD_SESSION_OX_START + 0x00C)
 
 #define HFI_MSG_SYS_OX_START			\
-	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x0000)
+(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_MSG_START_OFFSET + 0x0000)
 #define HFI_MSG_SYS_IDLE		(HFI_MSG_SYS_OX_START + 0x1)
 #define HFI_MSG_SYS_PING_ACK	(HFI_MSG_SYS_OX_START + 0x2)
 #define HFI_MSG_SYS_PROPERTY_INFO	(HFI_MSG_SYS_OX_START + 0x3)
 #define HFI_MSG_SYS_SESSION_ABORT_DONE	(HFI_MSG_SYS_OX_START + 0x4)
 
 #define HFI_MSG_SESSION_OX_START		\
-	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x1000)
+(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_MSG_START_OFFSET + 0x1000)
 #define HFI_MSG_SESSION_LOAD_RESOURCES_DONE	(HFI_MSG_SESSION_OX_START + 0x1)
 #define HFI_MSG_SESSION_START_DONE		(HFI_MSG_SESSION_OX_START + 0x2)
 #define HFI_MSG_SESSION_STOP_DONE		(HFI_MSG_SESSION_OX_START + 0x3)
@@ -386,9 +427,12 @@
 #define HFI_MSG_SESSION_EMPTY_BUFFER_DONE	(HFI_MSG_SESSION_OX_START + 0x7)
 #define HFI_MSG_SESSION_FILL_BUFFER_DONE	(HFI_MSG_SESSION_OX_START + 0x8)
 #define HFI_MSG_SESSION_PROPERTY_INFO		(HFI_MSG_SESSION_OX_START + 0x9)
-#define HFI_MSG_SESSION_RELEASE_RESOURCES_DONE	(HFI_MSG_SESSION_OX_START + 0xA)
+#define HFI_MSG_SESSION_RELEASE_RESOURCES_DONE	\
+	(HFI_MSG_SESSION_OX_START + 0xA)
 #define HFI_MSG_SESSION_PARSE_SEQUENCE_HEADER_DONE		\
 	(HFI_MSG_SESSION_OX_START + 0xB)
+#define  HFI_MSG_SESSION_RELEASE_BUFFERS_DONE			\
+	(HFI_MSG_SESSION_OX_START + 0xC)
 
 struct hfi_cmd_sys_session_abort_packet {
 	u32 size;
@@ -476,6 +520,9 @@
 	u32 packet_type;
 	u32 session_id;
 	u32 stream_id;
+	u32 offset;
+	u32 alloc_len;
+	u32 filled_len;
 	u32 output_tag;
 	u8 *packet_buffer;
 	u8 *extra_data_buffer;
@@ -515,6 +562,7 @@
 	u32 buffer_type;
 	u32 buffer_size;
 	u32 extra_data_size;
+	int response_req;
 	u32 num_buffers;
 	u32 rg_buffer_info[1];
 };
@@ -702,6 +750,15 @@
 	u32 error_type;
 };
 
+struct hfi_msg_session_release_buffers_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	u32 num_buffers;
+	u32 rg_buffer_info[1];
+};
+
 struct hfi_extradata_mb_quantization_payload {
 	u8 rg_mb_qp[1];
 };
@@ -740,41 +797,26 @@
 	u32 time_stamp_high;
 };
 
-enum HFI_S3D_FP_LAYOUT {
-	HFI_S3D_FP_LAYOUT_NONE,
-	HFI_S3D_FP_LAYOUT_INTRLV_CHECKERBOARD,
-	HFI_S3D_FP_LAYOUT_INTRLV_COLUMN,
-	HFI_S3D_FP_LAYOUT_INTRLV_ROW,
-	HFI_S3D_FP_LAYOUT_SIDEBYSIDE,
-	HFI_S3D_FP_LAYOUT_TOPBOTTOM,
-	HFI_S3D_FP_LAYOUT_UNUSED = 0x10000000
-};
-
-enum HFI_S3D_FP_VIEW_ORDER {
-	HFI_S3D_FP_LEFTVIEW_FIRST,
-	HFI_S3D_FP_RIGHTVIEW_FIRST,
-	HFI_S3D_FP_UNKNOWN,
-	HFI_S3D_FP_VIEWORDER_UNUSED = 0x10000000
-};
-
-enum HFI_S3D_FP_FLIP {
-	HFI_S3D_FP_FLIP_NONE,
-	HFI_S3D_FP_FLIP_LEFT_HORIZ,
-	HFI_S3D_FP_FLIP_LEFT_VERT,
-	HFI_S3D_FP_FLIP_RIGHT_HORIZ,
-	HFI_S3D_FP_FLIP_RIGHT_VERT,
-	HFI_S3D_FP_FLIP_UNUSED = 0x10000000
-};
 
 struct hfi_extradata_s3d_frame_packing_payload {
-	enum HFI_S3D_FP_LAYOUT layout;
-	enum HFI_S3D_FP_VIEW_ORDER order;
-	enum HFI_S3D_FP_FLIP flip;
-	int quin_cunx;
-	u32 left_view_luma_site_x;
-	u32 left_view_luma_site_y;
-	u32 right_view_luma_site_x;
-	u32 right_view_luma_site_y;
+	u32 fpa_id;
+	int cancel_flag;
+	u32 fpa_type;
+	int quin_cunx_flag;
+	u32 content_interprtation_type;
+	int spatial_flipping_flag;
+	int frame0_flipped_flag;
+	int field_views_flag;
+	int current_frame_isFrame0_flag;
+	int frame0_self_contained_flag;
+	int frame1_self_contained_flag;
+	u32 frame0_graid_pos_x;
+	u32 frame0_graid_pos_y;
+	u32 frame1_graid_pos_x;
+	u32 frame1_graid_pos_y;
+	u32 fpa_reserved_byte;
+	u32 fpa_repetition_period;
+	int fpa_extension_flag;
 };
 
 struct hfi_extradata_interlace_video_payload {
@@ -813,6 +855,26 @@
 	int height;
 };
 
+struct hfi_index_extradata_aspect_ratio_payload {
+	u32 size;
+	u32 version;
+	u32 port_index;
+	u32 aspect_width;
+	u32 aspect_height;
+};
+struct hfi_extradata_panscan_wndw_payload {
+	u32 num_window;
+	struct hfi_extradata_vc1_pswnd wnd[1];
+};
+
+struct hfi_extradata_frame_type_payload {
+	u32 frame_rate;
+};
+
+struct hfi_extradata_recovery_point_sei_payload {
+	u32 flag;
+};
+
 struct vidc_mem_addr {
 	u8 *align_device_addr;
 	u8 *align_virtual_addr;
@@ -842,6 +904,9 @@
 	spinlock_t write_lock;
 	void (*callback) (u32 response, void *callback);
 	struct vidc_mem_addr iface_q_table;
+	struct vidc_mem_addr qdss;
+	struct vidc_mem_addr sfr;
+	struct vidc_mem_addr mem_addr;
 	struct vidc_iface_q_info iface_queues[VIDC_IFACEQ_NUMQ];
 	struct smem_client *hal_client;
 	struct hal_data *hal_data;
@@ -862,14 +927,6 @@
 	int dev_count;
 };
 
-struct hfi_index_extradata_aspect_ratio_payload {
-	u32 size;
-	u32 version;
-	u32 port_index;
-	u32 saspect_width;
-	u32  saspect_height;
-};
-
 extern struct hal_device_data hal_ctxt;
 
 int vidc_hal_iface_msgq_read(struct hal_device *device, void *pkt);
diff --git a/drivers/media/video/msm_vidc/vidc_hal_api.h b/drivers/media/video/msm_vidc/vidc_hal_api.h
index 8aff5af..9d20a31 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_api.h
+++ b/drivers/media/video/msm_vidc/vidc_hal_api.h
@@ -72,12 +72,32 @@
 	VIDC_ERR_UNUSED = 0x10000000
 };
 
+enum hal_extradata_id {
+	HAL_EXTRADATA_NONE,
+	HAL_EXTRADATA_MB_QUANTIZATION,
+	HAL_EXTRADATA_INTERLACE_VIDEO,
+	HAL_EXTRADATA_VC1_FRAMEDISP,
+	HAL_EXTRADATA_VC1_SEQDISP,
+	HAL_EXTRADATA_TIMESTAMP,
+	HAL_EXTRADATA_S3D_FRAME_PACKING,
+	HAL_EXTRADATA_FRAME_RATE,
+	HAL_EXTRADATA_PANSCAN_WINDOW,
+	HAL_EXTRADATA_RECOVERY_POINT_SEI,
+	HAL_EXTRADATA_CLOSED_CAPTION_UD,
+	HAL_EXTRADATA_AFD_UD,
+	HAL_EXTRADATA_MULTISLICE_INFO,
+	HAL_EXTRADATA_INDEX,
+	HAL_EXTRADATA_NUM_CONCEALED_MB,
+	HAL_EXTRADATA_METADATA_FILLER,
+};
+
 enum hal_property {
 	HAL_CONFIG_FRAME_RATE = 0x04000001,
 	HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT,
 	HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
 	HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO,
 	HAL_PARAM_EXTRA_DATA_HEADER_CONFIG,
+	HAL_PARAM_INDEX_EXTRADATA,
 	HAL_PARAM_FRAME_SIZE,
 	HAL_CONFIG_REALTIME,
 	HAL_PARAM_BUFFER_COUNT_ACTUAL,
@@ -460,6 +480,11 @@
 	HAL_UNUSED_PICT = 0x10000000,
 };
 
+struct hal_extradata_enable {
+	u32 enable;
+	enum hal_extradata_id index;
+};
+
 struct hal_enable_picture {
 	u32 picture_type;
 };
diff --git a/drivers/media/video/msm_vidc/vidc_hal_helper.h b/drivers/media/video/msm_vidc/vidc_hal_helper.h
index 43995eb..9412eed 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_helper.h
+++ b/drivers/media/video/msm_vidc/vidc_hal_helper.h
@@ -14,55 +14,6 @@
 #ifndef __H_VIDC_HAL_HELPER_H__
 #define __H_VIDC_HAL_HELPER_H__
 
-#define HFI_NV12_IL_CALC_Y_STRIDE(stride, frame_width, stride_multiple) \
-	{ stride = (frame_width + stride_multiple - 1) & \
-	(0xffffffff - (stride_multiple - 1))}
-
-#define HFI_NV12_IL_CALC_Y_BUFHEIGHT(buf_height, frame_height,\
-	min_buf_height_multiple) \
-	{ buf_height = (frame_height + min_buf_height_multiple - 1) & \
-	(0xffffffff - (min_buf_height_multiple - 1)) }
-
-#define HFI_NV12_IL_CALC_UV_STRIDE(stride, frame_width, stride_multiple) \
-	{ stride = ((((frame_width + 1) >> 1) + stride_multiple - 1) & \
-	(0xffffffff - (stride_multiple - 1))) << 1 }
-
-#define HFI_NV12_IL_CALC_UV_BUFHEIGHT(buf_height, frame_height,\
-	min_buf_height_multiple) \
-	{ buf_height = ((((frame_height + 1) >> 1) + \
-	min_buf_height_multiple - 1) & (0xffffffff - \
-	(min_buf_height_multiple - 1))) }
-
-#define HFI_NV12_IL_CALC_BUF_SIZE(buf_size, y_buf_size, y_stride, \
-	y_buf_height, uv_buf_size, uv_stride, uv_buf_height, uv_alignment) \
-	{ y_buf_size = (y_stride * y_buf_height); \
-	uv_buf_size = (uv_stride * uv_buf_height) + uv_alignment; \
-	buf_size = y_buf_size + uv_buf_size }
-
-#define HFI_YUYV_CALC_STRIDE(stride, frame_width, stride_multiple) \
-	{ stride = ((frame_width << 1) + stride_multiple - 1) & \
-	(0xffffffff - (stride_multiple - 1)) }
-
-#define HFI_YUYV_CALC_BUFHEIGHT(buf_height, frame_height,\
-	min_buf_height_multiple) \
-	{ buf_height = ((frame_height + min_buf_height_multiple - 1) & \
-	(0xffffffff - (min_buf_height_multiple - 1))) }
-
-#define HFI_YUYV_CALC_BUF_SIZE(buf_size, stride, buf_height) \
-	{ buf_size = stride * buf_height }
-
-#define HFI_RGB888_CALC_STRIDE(stride, frame_width, stride_multiple) \
-	{ stride = ((frame_width * 3) + stride_multiple - 1) & \
-	(0xffffffff - (stride_multiple - 1)) }
-
-#define HFI_RGB888_CALC_BUFHEIGHT(buf_height, frame_height,\
-	min_buf_height_multiple) \
-	{ buf_height = ((frame_height + min_buf_height_multiple - 1) & \
-	(0xffffffff - (min_buf_height_multiple - 1))) }
-
-#define HFI_RGB888_CALC_BUF_SIZE(buf_size, stride, buf_height) \
-	{ buf_size = (stride * buf_height) }
-
 #define HFI_COMMON_BASE				(0)
 #define HFI_OX_BASE					(0x01000000)
 
@@ -81,6 +32,9 @@
 #define HFI_ARCH_COMMON_OFFSET		(0)
 #define HFI_ARCH_OX_OFFSET			(0x00200000)
 
+#define  HFI_CMD_START_OFFSET		(0x00010000)
+#define  HFI_MSG_START_OFFSET		(0x00020000)
+
 #define HFI_ERR_NONE						HFI_COMMON_BASE
 #define HFI_ERR_SYS_FATAL				(HFI_COMMON_BASE + 0x1)
 #define HFI_ERR_SYS_INVALID_PARAMETER		(HFI_COMMON_BASE + 0x2)
@@ -110,6 +64,7 @@
 
 #define HFI_ERR_SESSION_STREAM_CORRUPT		(HFI_COMMON_BASE + 0x100B)
 #define HFI_ERR_SESSION_ENC_OVERFLOW		(HFI_COMMON_BASE + 0x100C)
+#define  HFI_ERR_SESSION_UNSUPPORTED_STREAM	(HFI_COMMON_BASE + 0x100D)
 
 #define HFI_EVENT_SYS_ERROR				(HFI_COMMON_BASE + 0x1)
 #define HFI_EVENT_SESSION_ERROR			(HFI_COMMON_BASE + 0x2)
@@ -130,8 +85,8 @@
 #define HFI_H264_PROFILE_HIGH				0x00000004
 #define HFI_H264_PROFILE_STEREO_HIGH		0x00000008
 #define HFI_H264_PROFILE_MULTIVIEW_HIGH		0x00000010
-#define HFI_H264_PROFILE_CONSTRAINED_HIGH	0x00000020
-#define  HFI_H264_PROFILE_CONSTRAINED_BASE  0x00000040
+#define HFI_H264_PROFILE_CONSTRAINED_BASE	0x00000020
+#define HFI_H264_PROFILE_CONSTRAINED_HIGH	0x00000040
 
 #define HFI_H264_LEVEL_1					0x00000001
 #define HFI_H264_LEVEL_1b					0x00000002
@@ -235,8 +190,11 @@
 #define HFI_PROPERTY_SYS_DEBUG_CONFIG		\
 	(HFI_PROPERTY_SYS_COMMON_START + 0x001)
 #define HFI_PROPERTY_SYS_RESOURCE_OCMEM_REQUIREMENT_INFO	\
-(HFI_PROPERTY_SYS_COMMON_START + 0x002)
-#define HFI_PROPERTY_PARAM_COMMON_START		\
+	(HFI_PROPERTY_SYS_COMMON_START + 0x002)
+#define HFI_PROPERTY_SYS_CONFIG_VCODEC_CLKFREQ				\
+	(HFI_PROPERTY_SYS_COMMON_START + 0x003)
+
+#define HFI_PROPERTY_PARAM_COMMON_START	\
 	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000)
 #define HFI_PROPERTY_PARAM_FRAME_SIZE		\
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x001)
@@ -316,16 +274,25 @@
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00F)
 #define  HFI_PROPERTY_PARAM_VENC_QUALITY_VS_SPEED           \
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x010)
-#define HFI_PROPERTY_PARAM_VENC_MPEG4_QPEL				\
-	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x011)
 #define HFI_PROPERTY_PARAM_VENC_ADVANCED				\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x012)
-#define HFI_PROPERTY_PARAM_VENC_SYNC_FRAME_SEQUENCE_HEADER	\
-	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x013)
 #define  HFI_PROPERTY_PARAM_VENC_H264_SPS_ID                \
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x014)
 #define  HFI_PROPERTY_PARAM_VENC_H264_PPS_ID               \
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x015)
+#define HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x016)
+#define HFI_PROPERTY_PARAM_VENC_ASPECT_RATIO			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x017)
+#define HFI_PROPERTY_PARAM_VENC_NUMREF					\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x018)
+#define HFI_PROPERTY_PARAM_VENC_MULTIREF_P				\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x019)
+#define HFI_PROPERTY_PARAM_VENC_HIER_P_NUM_ENH_LAYER	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01A)
+
+#define HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01B)
 
 #define HFI_PROPERTY_CONFIG_VENC_COMMON_START				\
 	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000)
@@ -339,13 +306,13 @@
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x004)
 #define  HFI_PROPERTY_CONFIG_VENC_SLICE_SIZE                \
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x005)
-#define HFI_PROPERTY_CONFIG_VENC_FRAME_QP				\
-	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x006)
 #define HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE				\
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x007)
 
 #define HFI_PROPERTY_PARAM_VPE_COMMON_START				\
 	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000)
+#define  HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER	\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x008)
 
 #define HFI_PROPERTY_CONFIG_VPE_COMMON_START				\
 	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x8000)
@@ -356,6 +323,7 @@
 
 struct hfi_bitrate {
 	u32 bit_rate;
+	u32 layer_id;
 };
 
 #define HFI_CAPABILITY_FRAME_WIDTH			(HFI_COMMON_BASE + 0x1)
@@ -367,7 +335,7 @@
 #define HFI_CAPABILITY_SCALE_Y				(HFI_COMMON_BASE + 0x7)
 #define HFI_CAPABILITY_BITRATE				(HFI_COMMON_BASE + 0x8)
 #define  HFI_CAPABILITY_BFRAME				(HFI_COMMON_BASE + 0x9)
-#define  HFI_CAPABILITY_HIERARCHICAL_P_LAYERS	(HFI_COMMON_BASE + 0x10)
+#define  HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS   (HFI_COMMON_BASE + 0x10)
 
 struct hfi_capability_supported {
 	u32 capability_type;
@@ -386,9 +354,14 @@
 #define HFI_DEBUG_MSG_HIGH					0x00000004
 #define HFI_DEBUG_MSG_ERROR					0x00000008
 #define HFI_DEBUG_MSG_FATAL					0x00000010
+#define HFI_DEBUG_MSG_PERF					0x00000020
+
+#define HFI_DEBUG_MODE_QUEUE				0x00000001
+#define HFI_DEBUG_MODE_QDSS					0x00000002
 
 struct hfi_debug_config {
 	u32 debug_config;
+	u32 debug_mode;
 };
 
 struct hfi_enable {
@@ -396,8 +369,9 @@
 };
 
 #define HFI_H264_DB_MODE_DISABLE			(HFI_COMMON_BASE + 0x1)
-#define HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY	(HFI_COMMON_BASE + 0x2)
-#define HFI_H264_DB_MODE_ALL_BOUNDARY			(HFI_COMMON_BASE + 0x3)
+#define HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY	\
+	(HFI_COMMON_BASE + 0x2)
+#define HFI_H264_DB_MODE_ALL_BOUNDARY		(HFI_COMMON_BASE + 0x3)
 
 struct hfi_h264_db_control {
 	u32 mode;
@@ -465,8 +439,8 @@
 };
 
 #define HFI_MULTI_SLICE_OFF				(HFI_COMMON_BASE + 0x1)
-#define HFI_MULTI_SLICE_BY_MB_COUNT			(HFI_COMMON_BASE + 0x2)
-#define HFI_MULTI_SLICE_BY_BYTE_COUNT		(HFI_COMMON_BASE + 0x3)
+#define HFI_MULTI_SLICE_BY_MB_COUNT		(HFI_COMMON_BASE + 0x2)
+#define HFI_MULTI_SLICE_BY_BYTE_COUNT	(HFI_COMMON_BASE + 0x3)
 #define HFI_MULTI_SLICE_GOB				(HFI_COMMON_BASE + 0x4)
 
 struct hfi_multi_slice_control {
@@ -654,9 +628,14 @@
 struct hfi_seq_header_info {
 	u32 max_hader_len;
 };
+struct hfi_aspect_ratio {
+	u32 aspect_width;
+	u32 aspect_height;
+};
 
 #define HFI_CMD_SYS_COMMON_START			\
-	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000)
+(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + HFI_CMD_START_OFFSET \
+	+ 0x0000)
 #define HFI_CMD_SYS_INIT		(HFI_CMD_SYS_COMMON_START + 0x001)
 #define HFI_CMD_SYS_PC_PREP		(HFI_CMD_SYS_COMMON_START + 0x002)
 #define HFI_CMD_SYS_SET_RESOURCE	(HFI_CMD_SYS_COMMON_START + 0x003)
@@ -666,9 +645,11 @@
 #define HFI_CMD_SYS_SESSION_INIT	(HFI_CMD_SYS_COMMON_START + 0x007)
 #define HFI_CMD_SYS_SESSION_END		(HFI_CMD_SYS_COMMON_START + 0x008)
 #define HFI_CMD_SYS_SET_BUFFERS		(HFI_CMD_SYS_COMMON_START + 0x009)
+#define HFI_CMD_SYS_TEST_START		(HFI_CMD_SYS_COMMON_START + 0x100)
 
 #define HFI_CMD_SESSION_COMMON_START		\
-	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000)
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET +	\
+	HFI_CMD_START_OFFSET + 0x1000)
 #define HFI_CMD_SESSION_SET_PROPERTY		\
 	(HFI_CMD_SESSION_COMMON_START + 0x001)
 #define HFI_CMD_SESSION_SET_BUFFERS			\
@@ -677,7 +658,8 @@
 	(HFI_CMD_SESSION_COMMON_START + 0x003)
 
 #define HFI_MSG_SYS_COMMON_START			\
-	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000)
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET +	\
+	HFI_MSG_START_OFFSET + 0x0000)
 #define HFI_MSG_SYS_INIT_DONE			(HFI_MSG_SYS_COMMON_START + 0x1)
 #define HFI_MSG_SYS_PC_PREP_DONE		(HFI_MSG_SYS_COMMON_START + 0x2)
 #define HFI_MSG_SYS_RELEASE_RESOURCE	(HFI_MSG_SYS_COMMON_START + 0x3)
@@ -686,7 +668,8 @@
 #define HFI_MSG_SYS_SESSION_END_DONE	(HFI_MSG_SYS_COMMON_START + 0x7)
 
 #define HFI_MSG_SESSION_COMMON_START		\
-	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000)
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET +	\
+	HFI_MSG_START_OFFSET + 0x1000)
 #define HFI_MSG_EVENT_NOTIFY	(HFI_MSG_SESSION_COMMON_START + 0x1)
 #define HFI_MSG_SESSION_GET_SEQUENCE_HEADER_DONE	\
 	(HFI_MSG_SESSION_COMMON_START + 0x2)
@@ -778,7 +761,6 @@
 	u32 packet_type;
 	u32 session_id;
 	u32 buffer_type;
-	u32 buffer_mode;
 	u32 buffer_size;
 	u32 extra_data_size;
 	u32 min_buffer_size;
@@ -860,4 +842,22 @@
 	u8 rg_msg_data[1];
 };
 
+enum HFI_VENUS_QTBL_STATUS {
+	HFI_VENUS_QTBL_DISABLED = 0x00,
+	HFI_VENUS_QTBL_ENABLED = 0x01,
+	HFI_VENUS_QTBL_INITIALIZING = 0x02,
+	HFI_VENUS_QTBL_DEINITIALIZING = 0x03
+};
+
+enum HFI_VENUS_CTRL_INIT_STATUS {
+	HFI_VENUS_CTRL_NOT_INIT = 0x0,
+	HFI_VENUS_CTRL_READY = 0x1,
+	HFI_VENUS_CTRL_ERROR_FATAL = 0x2
+};
+
+struct hfi_sfr_struct {
+	u32 bufSize;
+	u8 rg_data[1];
+};
+
 #endif
diff --git a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
index ba599ec..200f5d3 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
+++ b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
@@ -315,16 +315,22 @@
 {
 	struct hfi_buffer_requirements *hfi_buf_req;
 	u32 req_bytes;
-	enum vidc_status rc = VIDC_ERR_NONE;
 
 	dprintk(VIDC_DBG, "Entered ");
+	if (!prop) {
+		dprintk(VIDC_ERR,
+			"hal_process_sess_get_prop_buf_req:bad_prop: %p",
+			prop);
+		return;
+	}
 	req_bytes = prop->size - sizeof(
 	struct hfi_msg_session_property_info_packet);
 
-	if (req_bytes == 0 || (req_bytes % sizeof(
-		struct hfi_buffer_requirements))) {
+	if (!req_bytes || (req_bytes % sizeof(
+		struct hfi_buffer_requirements)) ||
+		(!prop->rg_property_data[1])) {
 		dprintk(VIDC_ERR,
-			"hal_process_sess_get_prop_buf_req:bad_pkt_size: %d",
+			"hal_process_sess_get_prop_buf_req:bad_pkt: %d",
 			req_bytes);
 		return;
 	}
@@ -332,15 +338,14 @@
 	hfi_buf_req = (struct hfi_buffer_requirements *)
 		&prop->rg_property_data[1];
 
-	while (req_bytes != 0) {
-		if ((hfi_buf_req->buffer_count_min > hfi_buf_req->
-			buffer_count_actual)
-			|| (hfi_buf_req->buffer_alignment == 0)
-			|| (hfi_buf_req->buffer_size == 0)) {
-			dprintk(VIDC_ERR, "hal_process_sess_get_prop_buf_req:"
-						"bad_buf_req");
-			rc = VIDC_ERR_FAIL;
-		}
+	while (req_bytes) {
+		if ((hfi_buf_req->buffer_size) &&
+			((hfi_buf_req->buffer_count_min > hfi_buf_req->
+			buffer_count_actual)))
+				dprintk(VIDC_WARN,
+					"hal_process_sess_get_prop_buf_req:"
+					"bad_buf_req");
+
 		dprintk(VIDC_DBG, "got buffer requirements for: %d",
 					hfi_buf_req->buffer_type);
 		switch (hfi_buf_req->buffer_type) {
diff --git a/drivers/media/video/msm_vidc/vidc_hal_io.h b/drivers/media/video/msm_vidc/vidc_hal_io.h
index 6845ac5..9888adb 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_io.h
+++ b/drivers/media/video/msm_vidc/vidc_hal_io.h
@@ -135,4 +135,41 @@
 #define VIDC_VENUS0_WRAPPER_VBIF_PRIORITY_LEVEL \
 	(VIDC_WRAPPER_BASE_OFFS + 0x24)
 
+#define VIDC_CTRL_INIT 0x000D2048
+#define VIDC_CTRL_INIT_RESERVED_BITS31_1__M 0xFFFFFFFE
+#define VIDC_CTRL_INIT_RESERVED_BITS31_1__S 1
+#define VIDC_CTRL_INIT_CTRL__M 0x00000001
+#define VIDC_CTRL_INIT_CTRL__S 0
+
+#define VIDC_CTRL_STATUS 0x000D204C
+#define VIDC_CTRL_STATUS_RESERVED_BITS31_8__M 0xFFFFFF00
+#define VIDC_CTRL_STATUS_RESERVED_BITS31_8__S 8
+#define VIDC_CTRL_ERROR_STATUS__M             0x000000FE
+#define VIDC_CTRL_ERROR_STATUS__S             1
+#define VIDC_CTRL_INIT_STATUS__M              0x00000001
+#define VIDC_CTRL_INIT_STATUS__S              0
+
+#define VIDC_QTBL_INFO 0x000D2050
+#define VIDC_QTBL_HOSTID__M 0xFF000000
+#define VIDC_QTBL_HOSTID__S 24
+#define VIDC_QTBL_INFO_RESERVED_BITS23_8__M 0x00FFFF00
+#define VIDC_QTBL_INFO_RESERVED_BITS23_8__S 8
+#define VIDC_QTBL_STATUS__M 0x000000FF
+#define VIDC_QTBL_STATUS__S 0
+
+#define VIDC_QTBL_ADDR 0x000D2054
+
+#define VIDC_VERSION_INFO 0x000D2058
+#define VIDC_VERSION_INFO_MAJOR__M  0xF0000000
+#define VIDC_VERSION_INFO_MAJOR__S  28
+#define VIDC_VERSION_INFO_MINOR__M  0x0FFFFFE0
+#define VIDC_VERSION_INFO_MINOR__S  5
+#define VIDC_VERSION_INFO_BRANCH__M 0x0000001F
+#define VIDC_VERSION_INFO_BRANCH__S 0
+
+#define VIDC_SFR_ADDR 0x000D205C
+#define VIDC_MMAP_ADDR 0x000D2060
+#define VIDC_UC_REGION_ADDR 0x000D2064
+#define VIDC_UC_REGION_SIZE 0x000D2068
+
 #endif
diff --git a/drivers/media/video/msm_wfd/enc-mfc-subdev.c b/drivers/media/video/msm_wfd/enc-mfc-subdev.c
index d839be3..45532a9 100644
--- a/drivers/media/video/msm_wfd/enc-mfc-subdev.c
+++ b/drivers/media/video/msm_wfd/enc-mfc-subdev.c
@@ -23,6 +23,8 @@
 
 #define VID_ENC_MAX_ENCODER_CLIENTS 1
 #define MAX_NUM_CTRLS 20
+#define V4L2_FRAME_FLAGS (V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | \
+		V4L2_BUF_FLAG_BFRAME | V4L2_QCOM_BUF_FLAG_CODECCONFIG)
 
 static long venc_fill_outbuf(struct v4l2_subdev *sd, void *arg);
 
@@ -179,6 +181,7 @@
 		vbuf->v4l2_planes[0].bytesused =
 			frame_data->data_len;
 
+		vbuf->v4l2_buf.flags &= ~(V4L2_FRAME_FLAGS);
 		switch (frame_data->frame) {
 		case VCD_FRAME_I:
 		case VCD_FRAME_IDR:
diff --git a/drivers/media/video/vcap_vp.c b/drivers/media/video/vcap_vp.c
index 82f9e58..5161b7b 100644
--- a/drivers/media/video/vcap_vp.c
+++ b/drivers/media/video/vcap_vp.c
@@ -396,6 +396,7 @@
 		if (rc == 0 && atomic_read(&dev->vp_enabled) == 1) {
 			/* This should not happen, if it does hw is stuck */
 			disable_irq_nosync(dev->vpirq->start);
+			atomic_set(&dev->vp_enabled, 0);
 			pr_err("%s: VP Timeout and VP still running\n",
 				__func__);
 		}
diff --git a/drivers/mfd/pm8821-core.c b/drivers/mfd/pm8821-core.c
index 86bd5ec..fe3e67e 100644
--- a/drivers/mfd/pm8821-core.c
+++ b/drivers/mfd/pm8821-core.c
@@ -312,11 +312,11 @@
 	drvdata = platform_get_drvdata(pdev);
 	if (drvdata)
 		pmic = drvdata->pm_chip_data;
-	if (pmic)
-		mfd_remove_devices(pmic->dev);
-	if (pmic->irq_chip) {
-		pm8821_irq_exit(pmic->irq_chip);
-		pmic->irq_chip = NULL;
+	if (pmic) {
+		if (pmic->dev)
+			mfd_remove_devices(pmic->dev);
+		if (pmic->irq_chip)
+			pm8821_irq_exit(pmic->irq_chip);
 	}
 	platform_set_drvdata(pdev, NULL);
 	kfree(pmic);
diff --git a/drivers/mfd/pm8xxx-misc.c b/drivers/mfd/pm8xxx-misc.c
index 6bb1441..fb57bd0 100644
--- a/drivers/mfd/pm8xxx-misc.c
+++ b/drivers/mfd/pm8xxx-misc.c
@@ -671,8 +671,7 @@
 	voltage = chg_config->voltage;
 	resistor = chg_config->resistor;
 
-	if (resistor < PM8XXX_COINCELL_RESISTOR_2100_OHMS ||
-			resistor > PM8XXX_COINCELL_RESISTOR_800_OHMS) {
+	if (resistor > PM8XXX_COINCELL_RESISTOR_800_OHMS) {
 		pr_err("Invalid resistor value provided\n");
 		return -EINVAL;
 	}
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index 1f7b67a..fa7c116 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -35,6 +35,8 @@
 #define MAX_WCD9XXX_DEVICE	4
 #define TABLA_I2C_MODE	0x03
 #define SITAR_I2C_MODE	0x01
+#define CODEC_DT_MAX_PROP_SIZE   40
+#define WCD9XXX_I2C_GSBI_SLAVE_ID "3-000d"
 
 struct wcd9xxx_i2c {
 	struct i2c_client *client;
@@ -43,6 +45,17 @@
 	int mod_id;
 };
 
+static char *taiko_supplies[] = {
+	"cdc-vdd-buck", "cdc-vdd-tx-h", "cdc-vdd-rx-h", "cdc-vddpx-1",
+	"cdc-vdd-a-1p2v", "cdc-vddcx-1", "cdc-vddcx-2",
+};
+
+static int wcd9xxx_dt_parse_vreg_info(struct device *dev,
+	struct wcd9xxx_regulator *vreg, const char *vreg_name);
+static int wcd9xxx_dt_parse_micbias_info(struct device *dev,
+	struct wcd9xxx_micbias_setting *micbias);
+static struct wcd9xxx_pdata *wcd9xxx_populate_dt_pdata(struct device *dev);
+
 struct wcd9xxx_i2c wcd9xxx_modules[MAX_WCD9XXX_DEVICE];
 static int wcd9xxx_intf = -1;
 
@@ -764,19 +777,31 @@
 	int ret = 0;
 	int i2c_mode = 0;
 	static int device_id;
+	struct device *dev;
 
 	pr_info("%s\n", __func__);
 	if (wcd9xxx_intf == WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
-		pr_info("tabla card is already detected in slimbus mode\n");
+		dev_dbg(&client->dev, "%s:Codec is detected in slimbus mode\n",
+			 __func__);
 		return -ENODEV;
 	}
-	pdata = client->dev.platform_data;
 	if (device_id > 0) {
 		wcd9xxx_modules[device_id++].client = client;
-		pr_info("probe for other slaves devices of tabla\n");
+		dev_dbg(&client->dev, "%s:probe for other slaves\n"
+			"devices of codec\n", __func__);
 		return ret;
 	}
-
+	dev = &client->dev;
+	if (client->dev.of_node) {
+		dev_dbg(&client->dev, "%s:Platform data from device tree\n",
+			__func__);
+		pdata = wcd9xxx_populate_dt_pdata(&client->dev);
+		client->dev.platform_data = pdata;
+	} else {
+		dev_dbg(&client->dev, "%s:Platform data from board file\n",
+			__func__);
+		pdata = client->dev.platform_data;
+	}
 	wcd9xxx = kzalloc(sizeof(struct wcd9xxx), GFP_KERNEL);
 	if (wcd9xxx == NULL) {
 		pr_err("%s: error, allocation failed\n", __func__);
@@ -858,7 +883,6 @@
 	return 0;
 }
 
-#define CODEC_DT_MAX_PROP_SIZE   40
 static int wcd9xxx_dt_parse_vreg_info(struct device *dev,
 	struct wcd9xxx_regulator *vreg, const char *vreg_name)
 {
@@ -1057,11 +1081,6 @@
 	return 0;
 }
 
-static char *taiko_supplies[] = {
-	"cdc-vdd-buck", "cdc-vdd-tx-h", "cdc-vdd-rx-h", "cdc-vddpx-1",
-	"cdc-vdd-a-1p2v", "cdc-vddcx-1", "cdc-vddcx-2",
-};
-
 static struct wcd9xxx_pdata *wcd9xxx_populate_dt_pdata(struct device *dev)
 {
 	struct wcd9xxx_pdata *pdata;
@@ -1071,12 +1090,11 @@
 
 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata) {
-		dev_err(dev,
-			"could not allocate memory for platform data\n");
+		dev_err(dev, "could not allocate memory for platform data\n");
 		return NULL;
 	}
-
-	if (!strcmp(dev_name(dev), "taiko-slim-pgd")) {
+	if (!strcmp(dev_name(dev), "taiko-slim-pgd") ||
+		(!strcmp(dev_name(dev), WCD9XXX_I2C_GSBI_SLAVE_ID))) {
 		codec_supplies = taiko_supplies;
 		num_of_supplies = ARRAY_SIZE(taiko_supplies);
 	} else {
@@ -1111,11 +1129,7 @@
 			pdata->reset_gpio);
 		goto err;
 	}
-
-	ret = wcd9xxx_dt_parse_slim_interface_dev_info(dev,
-			&pdata->slimbus_slave_device);
-	if (ret)
-		goto err;
+	dev_dbg(dev, "%s: reset gpio %d", __func__, pdata->reset_gpio);
 	return pdata;
 err:
 	devm_kfree(dev, pdata);
@@ -1151,6 +1165,14 @@
 	if (slim->dev.of_node) {
 		dev_info(&slim->dev, "Platform data from device tree\n");
 		pdata = wcd9xxx_populate_dt_pdata(&slim->dev);
+		ret = wcd9xxx_dt_parse_slim_interface_dev_info(&slim->dev,
+				&pdata->slimbus_slave_device);
+		if (ret) {
+			dev_err(&slim->dev, "Error, parsing slim interface\n");
+			devm_kfree(&slim->dev, pdata);
+			ret = -EINVAL;
+			goto err;
+		}
 		slim->dev.platform_data = pdata;
 
 	} else {
@@ -1460,6 +1482,14 @@
 #define WCD9XXX_I2C_DIGITAL_1	2
 #define WCD9XXX_I2C_DIGITAL_2	3
 
+static struct i2c_device_id wcd9xxx_id_table[] = {
+	{"wcd9xxx-i2c", WCD9XXX_I2C_TOP_LEVEL},
+	{"wcd9xxx-i2c", WCD9XXX_I2C_ANALOG},
+	{"wcd9xxx-i2c", WCD9XXX_I2C_DIGITAL_1},
+	{"wcd9xxx-i2c", WCD9XXX_I2C_DIGITAL_2},
+	{}
+};
+
 static struct i2c_device_id tabla_id_table[] = {
 	{"tabla top level", WCD9XXX_I2C_TOP_LEVEL},
 	{"tabla analog", WCD9XXX_I2C_ANALOG},
@@ -1481,9 +1511,22 @@
 	.suspend = wcd9xxx_i2c_suspend,
 };
 
+static struct i2c_driver wcd9xxx_i2c_driver = {
+	.driver                 = {
+		.owner          =       THIS_MODULE,
+		.name           =       "wcd9xxx-i2c-core",
+	},
+	.id_table               =       wcd9xxx_id_table,
+	.probe                  =       wcd9xxx_i2c_probe,
+	.remove                 =       __devexit_p(wcd9xxx_i2c_remove),
+	.resume	= wcd9xxx_i2c_resume,
+	.suspend = wcd9xxx_i2c_suspend,
+};
+
+
 static int __init wcd9xxx_init(void)
 {
-	int ret1, ret2, ret3, ret4, ret5, ret6;
+	int ret1, ret2, ret3, ret4, ret5, ret6, ret7;
 
 	ret1 = slim_driver_register(&tabla_slim_driver);
 	if (ret1 != 0)
@@ -1495,7 +1538,7 @@
 
 	ret3 = i2c_add_driver(&tabla_i2c_driver);
 	if (ret3 != 0)
-		pr_err("failed to add the I2C driver\n");
+		pr_err("failed to add the tabla2x I2C driver\n");
 
 	ret4 = slim_driver_register(&sitar_slim_driver);
 	if (ret4 != 0)
@@ -1509,7 +1552,11 @@
 	if (ret6 != 0)
 		pr_err("Failed to register taiko SB driver: %d\n", ret6);
 
-	return (ret1 && ret2 && ret3 && ret4 && ret5 && ret6) ? -1 : 0;
+	ret7 = i2c_add_driver(&wcd9xxx_i2c_driver);
+	if (ret7 != 0)
+		pr_err("failed to add the wcd9xxx I2C driver\n");
+
+	return (ret1 && ret2 && ret3 && ret4 && ret5 && ret6 && ret7) ? -1 : 0;
 }
 module_init(wcd9xxx_init);
 
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 6ab3a66..93a3237 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -664,6 +664,17 @@
 	  This adds support for connecting devices like mouse in HSIC
 	  Host mode.
 
+config TI_DRV2667
+	tristate "TI's DRV2667 haptic controller support"
+	depends on I2C
+	help
+	  The DRV2667 is a piezo haptic controller chip. It can drive
+	  piezo haptics either in digital mode or analog mode. This chip
+	  can be used in variety of devices to provide haptic support.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ti_drv2667.
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index e92e119..8395ef4 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -72,3 +72,4 @@
 obj-$(CONFIG_PMIC8058_XOADC) += pmic8058-xoadc.o
 obj-$(CONFIG_QSEECOM) += qseecom.o
 obj-$(CONFIG_QFP_FUSE) += qfp_fuse.o
+obj-$(CONFIG_TI_DRV2667) += ti_drv2667.o
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 1972845..c9fb721 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -35,6 +35,7 @@
 #include <linux/elf.h>
 #include <linux/firmware.h>
 #include <linux/freezer.h>
+#include <linux/scatterlist.h>
 #include <mach/board.h>
 #include <mach/msm_bus.h>
 #include <mach/msm_bus_board.h>
@@ -48,12 +49,17 @@
 #define QSEOS_VERSION_13		0x13
 #define QSEOS_VERSION_14		0x14
 #define QSEEE_VERSION_00		0x400000
+#define QSEE_VERSION_01			0x401000
+#define QSEE_VERSION_02			0x402000
+
 
 #define QSEOS_CHECK_VERSION_CMD		0x00001803
 
 #define QSEE_CE_CLK_100MHZ		100000000
 #define QSEE_CE_CLK_50MHZ		50000000
 
+#define QSEECOM_MAX_SG_ENTRY	10
+
 enum qseecom_command_scm_resp_type {
 	QSEOS_APP_ID = 0xEE01,
 	QSEOS_LISTENER_ID
@@ -72,6 +78,7 @@
 	QSEOS_GET_APP_STATE_COMMAND,
 	QSEOS_LOAD_SERV_IMAGE_COMMAND,
 	QSEOS_UNLOAD_SERV_IMAGE_COMMAND,
+	QSEOS_APP_REGION_NOTIFICATION,
 	QSEOS_CMD_MAX     = 0xEFFFFFFF
 };
 
@@ -86,6 +93,12 @@
 	CLK_SFPB,
 };
 
+__packed  struct qsee_apps_region_info_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t addr;
+	uint32_t size;
+};
+
 __packed struct qseecom_check_app_ireq {
 	uint32_t qsee_cmd_id;
 	char     app_name[MAX_APP_NAME_SIZE];
@@ -247,6 +260,11 @@
 struct clk *ce_core_src_clk;
 struct clk *ce_bus_clk;
 
+struct qseecom_sg_entry {
+	uint32_t phys_addr;
+	uint32_t len;
+};
+
 /* Function proto types */
 static int qsee_vote_for_clock(int32_t);
 static void qsee_disable_clock_vote(int32_t);
@@ -672,6 +690,7 @@
 	ion_phys_addr_t pa = 0;
 	uint32_t len;
 	struct qseecom_command_scm_resp resp;
+	struct qseecom_check_app_ireq req;
 	struct qseecom_load_app_ireq load_req;
 
 	/* Copy the relevant information needed for loading the image */
@@ -685,88 +704,112 @@
 	ret = qsee_vote_for_clock(CLK_SFPB);
 	if (ret)
 		pr_warning("Unable to vote for SFPB clock");
+	req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+	memcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
 
-	pr_warn("App (%s) does not exist, loading apps for first time\n",
+	ret = __qseecom_check_app_exists(req);
+	if (ret < 0)
+		return ret;
+	else
+		app_id = ret;
+
+	if (app_id) {
+		pr_warn("App id %d (%s) already exists\n", app_id,
+			(char *)(req.app_name));
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(entry,
+		&qseecom.registered_app_list_head, list){
+			if (entry->app_id == app_id) {
+				entry->ref_cnt++;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(
+		&qseecom.registered_app_list_lock, flags);
+	} else {
+		pr_warn("App (%s) does'nt exist, loading apps for first time\n",
 			(char *)(load_img_req.img_name));
-	/* Get the handle of the shared fd */
-	ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+		/* Get the handle of the shared fd */
+		ihandle = ion_import_dma_buf(qseecom.ion_clnt,
 					load_img_req.ifd_data_fd);
-	if (IS_ERR_OR_NULL(ihandle)) {
-		pr_err("Ion client could not retrieve the handle\n");
-		qsee_disable_clock_vote(CLK_SFPB);
-		return -ENOMEM;
-	}
+		if (IS_ERR_OR_NULL(ihandle)) {
+			pr_err("Ion client could not retrieve the handle\n");
+			qsee_disable_clock_vote(CLK_SFPB);
+			return -ENOMEM;
+		}
 
-	/* Get the physical address of the ION BUF */
-	ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
+		/* Get the physical address of the ION BUF */
+		ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
 
-	/* Populate the structure for sending scm call to load image */
-	memcpy(load_req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
-	load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
-	load_req.mdt_len = load_img_req.mdt_len;
-	load_req.img_len = load_img_req.img_len;
-	load_req.phy_addr = pa;
+		/* Populate the structure for sending scm call to load image */
+		memcpy(load_req.app_name, load_img_req.img_name,
+						MAX_APP_NAME_SIZE);
+		load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+		load_req.mdt_len = load_img_req.mdt_len;
+		load_req.img_len = load_img_req.img_len;
+		load_req.phy_addr = pa;
 
-	/*  SCM_CALL  to load the app and get the app_id back */
-	ret = scm_call(SCM_SVC_TZSCHEDULER, 1,  &load_req,
+		/*  SCM_CALL  to load the app and get the app_id back */
+		ret = scm_call(SCM_SVC_TZSCHEDULER, 1,  &load_req,
 			sizeof(struct qseecom_load_app_ireq),
 			&resp, sizeof(resp));
-	if (ret) {
-		pr_err("scm_call to load app failed\n");
-		return -EINVAL;
-	}
-
-	if (resp.result == QSEOS_RESULT_FAILURE) {
-		pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
-		if (!IS_ERR_OR_NULL(ihandle))
-			ion_free(qseecom.ion_clnt, ihandle);
-		qsee_disable_clock_vote(CLK_SFPB);
-		return -EFAULT;
-	}
-
-	if (resp.result == QSEOS_RESULT_INCOMPLETE) {
-		ret = __qseecom_process_incomplete_cmd(data, &resp);
 		if (ret) {
-			pr_err("process_incomplete_cmd failed err: %d\n",
-					ret);
+			pr_err("scm_call to load app failed\n");
+			return -EINVAL;
+		}
+
+		if (resp.result == QSEOS_RESULT_FAILURE) {
+			pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
 			if (!IS_ERR_OR_NULL(ihandle))
 				ion_free(qseecom.ion_clnt, ihandle);
 			qsee_disable_clock_vote(CLK_SFPB);
-			return ret;
+			return -EFAULT;
 		}
-	}
 
-	if (resp.result != QSEOS_RESULT_SUCCESS) {
-		pr_err("scm_call failed resp.result unknown, %d\n",
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+					ret);
+				if (!IS_ERR_OR_NULL(ihandle))
+					ion_free(qseecom.ion_clnt, ihandle);
+				qsee_disable_clock_vote(CLK_SFPB);
+				return ret;
+			}
+		}
+
+		if (resp.result != QSEOS_RESULT_SUCCESS) {
+			pr_err("scm_call failed resp.result unknown, %d\n",
 				resp.result);
+			if (!IS_ERR_OR_NULL(ihandle))
+				ion_free(qseecom.ion_clnt, ihandle);
+			qsee_disable_clock_vote(CLK_SFPB);
+			return -EFAULT;
+		}
+
+		app_id = resp.data;
+
+		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+		if (!entry) {
+			pr_err("kmalloc failed\n");
+			qsee_disable_clock_vote(CLK_SFPB);
+			return -ENOMEM;
+		}
+		entry->app_id = app_id;
+		entry->ref_cnt = 1;
+
+		/* Deallocate the handle */
 		if (!IS_ERR_OR_NULL(ihandle))
 			ion_free(qseecom.ion_clnt, ihandle);
-		qsee_disable_clock_vote(CLK_SFPB);
-		return -EFAULT;
-	}
 
-	app_id = resp.data;
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_add_tail(&entry->list, &qseecom.registered_app_list_head);
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+									flags);
 
-	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
-	if (!entry) {
-		pr_err("kmalloc failed\n");
-		qsee_disable_clock_vote(CLK_SFPB);
-		return -ENOMEM;
-	}
-	entry->app_id = app_id;
-	entry->ref_cnt = 1;
-
-	/* Deallocate the handle */
-	if (!IS_ERR_OR_NULL(ihandle))
-		ion_free(qseecom.ion_clnt, ihandle);
-
-	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
-	list_add_tail(&entry->list, &qseecom.registered_app_list_head);
-	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
-
-	pr_warn("App with id %d (%s) now loaded\n", app_id,
+		pr_warn("App with id %d (%s) now loaded\n", app_id,
 		(char *)(load_img_req.img_name));
-
+	}
 	data->client.app_id = app_id;
 	load_img_req.app_id = app_id;
 	if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
@@ -1093,13 +1136,11 @@
 {
 	struct ion_handle *ihandle;
 	char *field;
-	uint32_t *update;
-	ion_phys_addr_t pa;
 	int ret = 0;
 	int i = 0;
-	uint32_t length;
 
 	for (i = 0; i < MAX_ION_FD; i++) {
+		struct sg_table *sg_ptr = NULL;
 		if (req->ifd_data[i].fd > 0) {
 			/* Get the handle of the shared fd */
 			ihandle = ion_import_dma_buf(qseecom.ion_clnt,
@@ -1110,20 +1151,51 @@
 			}
 			field = (char *) req->cmd_req_buf +
 						req->ifd_data[i].cmd_buf_offset;
-			update = (uint32_t *) field;
 
 			/* Populate the cmd data structure with the phys_addr */
-			ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &length);
-			if (ret)
-				return -ENOMEM;
-
-			*update = (uint32_t)pa;
+			sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
+			if (sg_ptr == NULL) {
+				pr_err("IOn client could not retrieve sg table\n");
+				goto err;
+			}
+			if (sg_ptr->nents == 0) {
+				pr_err("Num of scattered entries is 0\n");
+				goto err;
+			}
+			if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
+				pr_err("Num of scattered entries");
+				pr_err(" (%d) is greater than max supported %d\n",
+					sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
+				goto err;
+			}
+			if (sg_ptr->nents == 1) {
+				uint32_t *update;
+				update = (uint32_t *) field;
+				*update = (uint32_t)sg_dma_address(sg_ptr->sgl);
+			} else {
+				struct qseecom_sg_entry *update;
+				struct scatterlist *sg;
+				int j = 0;
+				update = (struct qseecom_sg_entry *) field;
+				sg = sg_ptr->sgl;
+				for (j = 0; j < sg_ptr->nents; j++) {
+					update->phys_addr = (uint32_t)
+						sg_dma_address(sg);
+					update->len = (uint32_t)sg->length;
+					update++;
+					sg = sg_next(sg);
+				}
+			}
 			/* Deallocate the handle */
 			if (!IS_ERR_OR_NULL(ihandle))
 				ion_free(qseecom.ion_clnt, ihandle);
 		}
 	}
 	return ret;
+err:
+	if (!IS_ERR_OR_NULL(ihandle))
+		ion_free(qseecom.ion_clnt, ihandle);
+	return -ENOMEM;
 }
 
 static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
@@ -1339,12 +1411,10 @@
 	/* Populate the remaining parameters */
 	load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
 	memcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
-	mutex_lock(&app_access_lock);
 	ret = qsee_vote_for_clock(CLK_SFPB);
 	if (ret) {
 		kzfree(img_data);
 		pr_warning("Unable to vote for SFPB clock");
-		mutex_unlock(&app_access_lock);
 		return -EIO;
 	}
 
@@ -1356,7 +1426,6 @@
 	if (ret) {
 		pr_err("scm_call to load failed : ret %d\n", ret);
 		qsee_disable_clock_vote(CLK_SFPB);
-		mutex_unlock(&app_access_lock);
 		return -EIO;
 	}
 
@@ -1380,7 +1449,6 @@
 		break;
 	}
 	qsee_disable_clock_vote(CLK_SFPB);
-	mutex_unlock(&app_access_lock);
 
 	return ret;
 }
@@ -1695,6 +1763,21 @@
 }
 EXPORT_SYMBOL(qseecom_send_command);
 
+int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
+{
+	if ((handle == NULL) || (handle->dev == NULL)) {
+		pr_err("No valid kernel client\n");
+		return -EINVAL;
+	}
+	if (high)
+		return qsee_vote_for_clock(CLK_DFAB);
+	else {
+		qsee_disable_clock_vote(CLK_DFAB);
+		return 0;
+	}
+}
+EXPORT_SYMBOL(qseecom_set_bandwidth);
+
 static int qseecom_send_resp(void)
 {
 	qseecom.send_resp_flag = 1;
@@ -2510,6 +2593,32 @@
 		}
 		qseecom_platform_support = (struct msm_bus_scale_pdata *)
 						msm_bus_cl_get_pdata(pdev);
+		if (qseecom.qsee_version >= (QSEE_VERSION_02)) {
+			struct resource *resource = NULL;
+			struct qsee_apps_region_info_ireq req;
+			struct qseecom_command_scm_resp resp;
+
+			resource = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "secapp-region");
+			if (resource) {
+				req.qsee_cmd_id = QSEOS_APP_REGION_NOTIFICATION;
+				req.addr = resource->start;
+				req.size = resource_size(resource);
+				pr_warn("secure app region addr=0x%x size=0x%x",
+							req.addr, req.size);
+			} else {
+				pr_err("Fail to get secure app region info\n");
+				rc = -EINVAL;
+				goto err;
+			}
+			rc = scm_call(SCM_SVC_TZSCHEDULER, 1, &req, sizeof(req),
+							&resp, sizeof(resp));
+			if (rc) {
+				pr_err("Failed to send secapp region info %d\n",
+									rc);
+				goto err;
+			}
+		}
 	} else {
 		qseecom_platform_support = (struct msm_bus_scale_pdata *)
 						pdev->dev.platform_data;
diff --git a/drivers/misc/qseecom_kernel.h b/drivers/misc/qseecom_kernel.h
index bfa5709..0c93ef2 100644
--- a/drivers/misc/qseecom_kernel.h
+++ b/drivers/misc/qseecom_kernel.h
@@ -31,6 +31,6 @@
 int qseecom_shutdown_app(struct qseecom_handle **handle);
 int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
 			uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len);
-
+int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high);
 
 #endif /* __QSEECOM_KERNEL_H_ */
diff --git a/drivers/misc/ti_drv2667.c b/drivers/misc/ti_drv2667.c
new file mode 100644
index 0000000..554799c
--- /dev/null
+++ b/drivers/misc/ti_drv2667.c
@@ -0,0 +1,679 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/pm.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c/ti_drv2667.h>
+#include "../staging/android/timed_output.h"
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#define DRV2667_SUS_LEVEL	1
+#endif
+
+#define DRV2667_STATUS_REG	0x00
+#define DRV2667_CNTL1_REG	0x01
+#define DRV2667_CNTL2_REG	0x02
+#define DRV2667_WAV_SEQ3_REG	0x03
+#define DRV2667_FIFO_REG	0x0B
+#define DRV2667_PAGE_REG	0xFF
+
+#define DRV2667_STANDBY_MASK	0xBF
+#define DRV2667_INPUT_MUX_MASK	0x04
+#define DRV2667_GAIN_MASK	0xFC
+#define DRV2667_GAIN_SHIFT	0
+#define DRV2667_TIMEOUT_MASK	0xF3
+#define DRV2667_TIMEOUT_SHIFT	2
+#define DRV2667_GO_MASK		0x01
+#define DRV2667_FIFO_SIZE	100
+#define DRV2667_VIB_START_VAL	0x7F
+#define DRV2667_REG_PAGE_ID	0x00
+#define DRV2667_FIFO_CHUNK_MS	10
+#define DRV2667_BYTES_PER_MS	8
+
+#define DRV2667_WAV_SEQ_ID_IDX		1
+#define DRV2667_WAV_SEQ_REP_IDX		6
+#define DRV2667_WAV_SEQ_FREQ_IDX	8
+#define DRV2667_WAV_SEQ_FREQ_MIN	8
+#define DRV2667_WAV_SEQ_DUR_IDX		9
+
+#define DRV2667_MIN_IDLE_TIMEOUT_MS	5
+#define DRV2667_MAX_IDLE_TIMEOUT_MS	20
+
+#define DRV2667_VTG_MIN_UV	3000000
+#define DRV2667_VTG_MAX_UV	5500000
+#define DRV2667_VTG_CURR_UA	24000
+#define DRV2667_I2C_VTG_MIN_UV	1800000
+#define DRV2667_I2C_VTG_MAX_UV	1800000
+#define DRV2667_I2C_CURR_UA	9630
+
+/* supports 3 modes in digital - fifo, ram and wave */
+enum drv2667_modes {
+	FIFO_MODE = 0,
+	RAM_SEQ_MODE,
+	WAV_SEQ_MODE,
+	ANALOG_MODE,
+};
+
+struct drv2667_data {
+	struct i2c_client *client;
+	struct timed_output_dev dev;
+	struct hrtimer timer;
+	struct work_struct work;
+	struct mutex lock;
+	struct regulator *vdd;
+	struct regulator *vdd_i2c;
+	u32 max_runtime_ms;
+	u32 runtime_left;
+	u8 buf[DRV2667_FIFO_SIZE + 1];
+	u8 cntl2_val;
+	enum drv2667_modes mode;
+	u32 time_chunk_ms;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct early_suspend es;
+#endif
+};
+
+static int drv2667_read_reg(struct i2c_client *client, u32 reg)
+{
+	int rc;
+
+	rc = i2c_smbus_read_byte_data(client, reg);
+	if (rc < 0)
+		dev_err(&client->dev, "i2c reg read for 0x%x failed\n", reg);
+	return rc;
+}
+
+static int drv2667_write_reg(struct i2c_client *client, u32 reg, u8 val)
+{
+	int rc;
+
+	rc = i2c_smbus_write_byte_data(client, reg, val);
+	if (rc < 0)
+		dev_err(&client->dev, "i2c reg write for 0x%xfailed\n", reg);
+
+	return rc;
+}
+
+static void drv2667_dump_regs(struct drv2667_data *data, char *label)
+{
+	dev_dbg(&data->client->dev,
+		"%s: reg0x00 = 0x%x, reg0x01 = 0x%x reg0x02 = 0x%x", label,
+		drv2667_read_reg(data->client, DRV2667_STATUS_REG),
+		drv2667_read_reg(data->client, DRV2667_CNTL1_REG),
+		drv2667_read_reg(data->client, DRV2667_CNTL2_REG));
+}
+
+static void drv2667_worker(struct work_struct *work)
+{
+	struct drv2667_data *data;
+	int rc = 0;
+	u8 val;
+
+	data = container_of(work, struct drv2667_data, work);
+
+	if (data->mode == WAV_SEQ_MODE) {
+		if (data->runtime_left)
+			val = data->cntl2_val | DRV2667_GO_MASK;
+		else
+			val = data->cntl2_val & ~DRV2667_GO_MASK;
+		rc = drv2667_write_reg(data->client, DRV2667_CNTL2_REG, val);
+	} else if (data->mode == FIFO_MODE) {
+		/* data is played at 8khz */
+		if (data->runtime_left < data->time_chunk_ms)
+			val = data->runtime_left * DRV2667_BYTES_PER_MS;
+		else
+			val = data->time_chunk_ms * DRV2667_BYTES_PER_MS;
+
+		rc = i2c_master_send(data->client, data->buf, val + 1);
+	}
+
+	if (rc < 0)
+		dev_err(&data->client->dev, "i2c send message failed\n");
+}
+
+static void drv2667_enable(struct timed_output_dev *dev, int runtime)
+{
+	struct drv2667_data *data = container_of(dev, struct drv2667_data, dev);
+	unsigned long time_ms;
+
+	if (runtime > data->max_runtime_ms) {
+		dev_dbg(&data->client->dev, "Invalid runtime\n");
+		runtime = data->max_runtime_ms;
+	}
+
+	mutex_lock(&data->lock);
+	hrtimer_cancel(&data->timer);
+	data->runtime_left = runtime;
+	if (data->runtime_left < data->time_chunk_ms)
+		time_ms = runtime * NSEC_PER_MSEC;
+	else
+		time_ms = data->time_chunk_ms * NSEC_PER_MSEC;
+	hrtimer_start(&data->timer, ktime_set(0, time_ms), HRTIMER_MODE_REL);
+	schedule_work(&data->work);
+	mutex_unlock(&data->lock);
+}
+
+static int drv2667_get_time(struct timed_output_dev *dev)
+{
+	struct drv2667_data *data = container_of(dev, struct drv2667_data, dev);
+
+	if (hrtimer_active(&data->timer))
+		return	data->runtime_left +
+			ktime_to_ms(hrtimer_get_remaining(&data->timer));
+	return 0;
+}
+
+static enum hrtimer_restart drv2667_timer(struct hrtimer *timer)
+{
+	struct drv2667_data *data;
+	int time_ms;
+
+	data = container_of(timer, struct drv2667_data, timer);
+	if (data->runtime_left <= data->time_chunk_ms) {
+		data->runtime_left = 0;
+		schedule_work(&data->work);
+		return HRTIMER_NORESTART;
+	}
+
+	data->runtime_left -= data->time_chunk_ms;
+	if (data->runtime_left < data->time_chunk_ms)
+		time_ms = data->runtime_left * NSEC_PER_MSEC;
+	else
+		time_ms = data->time_chunk_ms * NSEC_PER_MSEC;
+
+	hrtimer_forward_now(&data->timer, ktime_set(0, time_ms));
+	schedule_work(&data->work);
+	return HRTIMER_RESTART;
+}
+
+static int drv2667_vreg_config(struct drv2667_data *data, bool on)
+{
+	int rc = 0;
+
+	if (!on)
+		goto deconfig_vreg;
+
+	data->vdd = regulator_get(&data->client->dev, "vdd");
+	if (IS_ERR(data->vdd)) {
+		rc = PTR_ERR(data->vdd);
+		dev_err(&data->client->dev, "unable to request vdd\n");
+		return rc;
+	}
+
+	if (regulator_count_voltages(data->vdd) > 0) {
+		rc = regulator_set_voltage(data->vdd,
+				DRV2667_VTG_MIN_UV, DRV2667_VTG_MAX_UV);
+		if (rc < 0) {
+			dev_err(&data->client->dev,
+				"vdd set voltage failed(%d)\n", rc);
+			goto put_vdd;
+		}
+	}
+
+	data->vdd_i2c = regulator_get(&data->client->dev, "vdd-i2c");
+	if (IS_ERR(data->vdd_i2c)) {
+		rc = PTR_ERR(data->vdd_i2c);
+		dev_err(&data->client->dev, "unable to request vdd for i2c\n");
+		goto reset_vdd_volt;
+	}
+
+	if (regulator_count_voltages(data->vdd_i2c) > 0) {
+		rc = regulator_set_voltage(data->vdd_i2c,
+			DRV2667_I2C_VTG_MIN_UV, DRV2667_I2C_VTG_MAX_UV);
+		if (rc < 0) {
+			dev_err(&data->client->dev,
+				"vdd_i2c set voltage failed(%d)\n", rc);
+			goto put_vdd_i2c;
+		}
+	}
+
+	return rc;
+
+deconfig_vreg:
+	if (regulator_count_voltages(data->vdd_i2c) > 0)
+		regulator_set_voltage(data->vdd_i2c, 0, DRV2667_I2C_VTG_MAX_UV);
+put_vdd_i2c:
+	regulator_put(data->vdd_i2c);
+reset_vdd_volt:
+	if (regulator_count_voltages(data->vdd) > 0)
+		regulator_set_voltage(data->vdd, 0, DRV2667_VTG_MAX_UV);
+put_vdd:
+	regulator_put(data->vdd);
+	return rc;
+}
+
+static int reg_set_optimum_mode_check(struct regulator *reg, int load_uA)
+{
+	return (regulator_count_voltages(reg) > 0) ?
+		regulator_set_optimum_mode(reg, load_uA) : 0;
+}
+
+
+static int drv2667_vreg_on(struct drv2667_data *data, bool on)
+{
+	int rc = 0;
+
+	if (!on)
+		goto vreg_off;
+
+	rc = reg_set_optimum_mode_check(data->vdd, DRV2667_VTG_CURR_UA);
+	if (rc < 0) {
+		dev_err(&data->client->dev,
+			"Regulator vdd set_opt failed rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = regulator_enable(data->vdd);
+	if (rc < 0) {
+		dev_err(&data->client->dev, "enable vdd failed\n");
+		return rc;
+	}
+
+	rc = reg_set_optimum_mode_check(data->vdd_i2c, DRV2667_I2C_CURR_UA);
+	if (rc < 0) {
+		dev_err(&data->client->dev,
+			"Regulator vdd_i2c set_opt failed rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = regulator_enable(data->vdd_i2c);
+	if (rc < 0) {
+		dev_err(&data->client->dev, "enable vdd_i2c failed\n");
+		goto disable_vdd;
+	}
+
+	return rc;
+vreg_off:
+	regulator_disable(data->vdd_i2c);
+disable_vdd:
+	regulator_disable(data->vdd);
+	return rc;
+}
+
+#ifdef CONFIG_PM
+static int drv2667_suspend(struct device *dev)
+{
+	struct drv2667_data *data = dev_get_drvdata(dev);
+	u8 val;
+	int rc;
+
+	hrtimer_cancel(&data->timer);
+	cancel_work_sync(&data->work);
+
+	/* set standby */
+	val = data->cntl2_val | ~DRV2667_STANDBY_MASK;
+	rc = drv2667_write_reg(data->client, DRV2667_CNTL2_REG, val);
+	if (rc < 0)
+		dev_err(dev, "unable to set standby\n");
+
+	/* turn regulators off */
+	drv2667_vreg_on(data, false);
+	return 0;
+}
+
+static int drv2667_resume(struct device *dev)
+{
+	struct drv2667_data *data = dev_get_drvdata(dev);
+	int rc;
+
+	/* turn regulators on */
+	rc = drv2667_vreg_on(data, true);
+	if (rc < 0) {
+		dev_err(dev, "unable to turn regulators on\n");
+		return rc;
+	}
+
+	/* clear standby */
+	rc = drv2667_write_reg(data->client,
+			DRV2667_CNTL2_REG, data->cntl2_val);
+	if (rc < 0) {
+		dev_err(dev, "unable to clear standby\n");
+		goto vreg_off;
+	}
+
+	return 0;
+vreg_off:
+	drv2667_vreg_on(data, false);
+	return rc;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void drv2667_early_suspend(struct early_suspend *es)
+{
+	struct drv2667_data *data = container_of(es, struct drv2667_data, es);
+
+	drv2667_suspend(&data->client->dev);
+}
+
+static void drv2667_late_resume(struct early_suspend *es)
+{
+	struct drv2667_data *data = container_of(es, struct drv2667_data, es);
+
+	drv2667_resume(&data->client->dev);
+}
+#endif
+
+static const struct dev_pm_ops drv2667_pm_ops = {
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	.suspend = drv2667_suspend,
+	.resume = drv2667_resume,
+#endif
+};
+#endif
+
+#ifdef CONFIG_OF
+static int drv2667_parse_dt(struct device *dev, struct drv2667_pdata *pdata)
+{
+	struct property *prop;
+	int rc;
+	u32 temp;
+
+	rc = of_property_read_string(dev->of_node, "ti,label", &pdata->name);
+	/* set vibrator as default name */
+	if (rc < 0)
+		pdata->name = "vibrator";
+
+	rc = of_property_read_u32(dev->of_node, "ti,gain", &temp);
+	/* set gain as 0 */
+	if (rc < 0)
+		pdata->gain = 0;
+	else
+		pdata->gain = (u8) temp;
+
+	rc = of_property_read_u32(dev->of_node, "ti,mode", &temp);
+	/* set FIFO mode as default */
+	if (rc < 0)
+		pdata->mode = FIFO_MODE;
+	else
+		pdata->mode = (u8) temp;
+
+	/* read wave sequence */
+	if (pdata->mode == WAV_SEQ_MODE) {
+		prop = of_find_property(dev->of_node, "ti,wav-seq", &temp);
+		if (!prop) {
+			dev_err(dev, "wav seq data not found");
+			return -ENODEV;
+		} else if (temp != DRV2667_WAV_SEQ_LEN) {
+			dev_err(dev, "Invalid length of wav seq data\n");
+			return -EINVAL;
+		}
+		memcpy(pdata->wav_seq, prop->value, DRV2667_WAV_SEQ_LEN);
+	}
+
+	rc = of_property_read_u32(dev->of_node, "ti,idle-timeout-ms", &temp);
+	/* configure minimum idle timeout */
+	if (rc < 0)
+		pdata->idle_timeout_ms = DRV2667_MIN_IDLE_TIMEOUT_MS;
+	else
+		pdata->idle_timeout_ms = (u8) temp;
+
+	rc = of_property_read_u32(dev->of_node, "ti,max-runtime-ms",
+						&pdata->max_runtime_ms);
+	/* configure one sec as default time */
+	if (rc < 0)
+		pdata->max_runtime_ms = MSEC_PER_SEC;
+
+	return 0;
+}
+#else
+static int drv2667_parse_dt(struct device *dev, struct drv2667_pdata *pdata)
+{
+	return -ENODEV;
+}
+#endif
+
+static int __devinit drv2667_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	struct drv2667_data *data;
+	struct drv2667_pdata *pdata;
+	int rc, i;
+	u8 val, fifo_seq_val, reg;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(&client->dev, "i2c is not supported\n");
+		return -EIO;
+	}
+
+	if (client->dev.of_node) {
+		pdata = devm_kzalloc(&client->dev,
+			sizeof(struct drv2667_pdata), GFP_KERNEL);
+		if (!pdata) {
+			dev_err(&client->dev, "unable to allocate pdata\n");
+			return -ENOMEM;
+		}
+		/* parse DT */
+		rc = drv2667_parse_dt(&client->dev, pdata);
+		if (rc) {
+			dev_err(&client->dev, "DT parsing failed\n");
+			return rc;
+		}
+	} else {
+		pdata = client->dev.platform_data;
+		if (!pdata) {
+			dev_err(&client->dev, "invalid pdata\n");
+			return -EINVAL;
+		}
+	}
+
+	data = devm_kzalloc(&client->dev, sizeof(struct drv2667_data),
+					GFP_KERNEL);
+	if (!data) {
+		dev_err(&client->dev, "unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	i2c_set_clientdata(client, data);
+
+	data->client = client;
+	data->max_runtime_ms = pdata->max_runtime_ms;
+	mutex_init(&data->lock);
+	INIT_WORK(&data->work, drv2667_worker);
+	hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	data->timer.function = drv2667_timer;
+	data->mode = pdata->mode;
+
+	/* configure voltage regulators */
+	rc = drv2667_vreg_config(data, true);
+	if (rc) {
+		dev_err(&client->dev, "unable to configure regulators\n");
+		goto destroy_mutex;
+	}
+
+	/* turn on voltage regulators */
+	rc = drv2667_vreg_on(data, true);
+	if (rc) {
+		dev_err(&client->dev, "unable to turn on regulators\n");
+		goto deconfig_vreg;
+	}
+
+	rc = drv2667_read_reg(client, DRV2667_CNTL2_REG);
+	if (rc < 0)
+		goto vreg_off;
+
+	/* set timeout, clear standby */
+	val = (u8) rc;
+
+	if (pdata->idle_timeout_ms < DRV2667_MIN_IDLE_TIMEOUT_MS ||
+		pdata->idle_timeout_ms > DRV2667_MAX_IDLE_TIMEOUT_MS ||
+		(pdata->idle_timeout_ms % DRV2667_MIN_IDLE_TIMEOUT_MS)) {
+		dev_err(&client->dev, "Invalid idle timeout\n");
+		goto vreg_off;
+	}
+
+	val = (val & DRV2667_TIMEOUT_MASK) |
+		((pdata->idle_timeout_ms / DRV2667_MIN_IDLE_TIMEOUT_MS - 1) <<
+		DRV2667_TIMEOUT_SHIFT);
+
+	val &= DRV2667_STANDBY_MASK;
+
+	rc = drv2667_write_reg(client, DRV2667_CNTL2_REG, val);
+	if (rc < 0)
+		goto vreg_off;
+
+	/* cache control2 val */
+	data->cntl2_val = val;
+
+	/* program drv2667 registers */
+	rc = drv2667_read_reg(client, DRV2667_CNTL1_REG);
+	if (rc < 0)
+		goto vreg_off;
+
+	/* gain and input mode */
+	val = (u8) rc;
+
+	/* remove this check after adding support for these modes */
+	if (data->mode == ANALOG_MODE || data->mode == RAM_SEQ_MODE) {
+		dev_err(&data->client->dev, "Mode not supported\n");
+		goto vreg_off;
+	} else
+		val &= ~DRV2667_INPUT_MUX_MASK; /* set digital mode */
+
+	val = (val & DRV2667_GAIN_MASK) | (pdata->gain << DRV2667_GAIN_SHIFT);
+
+	rc = drv2667_write_reg(client, DRV2667_CNTL1_REG, val);
+	if (rc < 0)
+		goto vreg_off;
+
+	if (data->mode == FIFO_MODE) {
+		/* Load a predefined pattern for FIFO mode */
+		data->buf[0] = DRV2667_FIFO_REG;
+		fifo_seq_val = DRV2667_VIB_START_VAL;
+
+		for (i = 1; i < DRV2667_FIFO_SIZE - 1; i++, fifo_seq_val++)
+			data->buf[i] = fifo_seq_val;
+
+		data->time_chunk_ms = DRV2667_FIFO_CHUNK_MS;
+	} else if (data->mode == WAV_SEQ_MODE) {
+		u8 freq, rep, dur;
+
+		/* program wave sequence from pdata */
+		/* id to wave sequence 3, set page */
+		rc = drv2667_write_reg(client, DRV2667_WAV_SEQ3_REG,
+				pdata->wav_seq[DRV2667_WAV_SEQ_ID_IDX]);
+		if (rc < 0)
+			goto vreg_off;
+
+		/* set page to wave form sequence */
+		rc = drv2667_write_reg(client, DRV2667_PAGE_REG,
+				pdata->wav_seq[DRV2667_WAV_SEQ_ID_IDX]);
+		if (rc < 0)
+			goto vreg_off;
+
+		/* program waveform sequence */
+		for (reg = 0, i = 1; i < DRV2667_WAV_SEQ_LEN - 1; i++, reg++) {
+			rc = drv2667_write_reg(client, reg, pdata->wav_seq[i]);
+			if (rc < 0)
+				goto vreg_off;
+		}
+
+		/* set page back to normal register space */
+		rc = drv2667_write_reg(client, DRV2667_PAGE_REG,
+					DRV2667_REG_PAGE_ID);
+		if (rc < 0)
+			goto vreg_off;
+
+		freq = pdata->wav_seq[DRV2667_WAV_SEQ_FREQ_IDX];
+		rep = pdata->wav_seq[DRV2667_WAV_SEQ_REP_IDX];
+		dur = pdata->wav_seq[DRV2667_WAV_SEQ_DUR_IDX];
+
+		data->time_chunk_ms = (rep * dur * MSEC_PER_SEC) /
+				(freq *	DRV2667_WAV_SEQ_FREQ_MIN);
+	}
+
+	drv2667_dump_regs(data, "new");
+
+	/* register with timed output class */
+	data->dev.name = pdata->name;
+	data->dev.get_time = drv2667_get_time;
+	data->dev.enable = drv2667_enable;
+
+	rc = timed_output_dev_register(&data->dev);
+	if (rc) {
+		dev_err(&client->dev, "unable to register with timed_output\n");
+		goto vreg_off;
+	}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	data->es.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + DRV2667_SUS_LEVEL;
+	data->es.suspend = drv2667_early_suspend;
+	data->es.resume = drv2667_late_resume;
+	register_early_suspend(&data->es);
+#endif
+	return 0;
+
+vreg_off:
+	drv2667_vreg_on(data, false);
+deconfig_vreg:
+	drv2667_vreg_config(data, false);
+destroy_mutex:
+	mutex_destroy(&data->lock);
+	return rc;
+}
+
+static int __devexit drv2667_remove(struct i2c_client *client)
+{
+	struct drv2667_data *data = i2c_get_clientdata(client);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	unregister_early_suspend(&data->es);
+#endif
+	mutex_destroy(&data->lock);
+	timed_output_dev_unregister(&data->dev);
+	hrtimer_cancel(&data->timer);
+	cancel_work_sync(&data->work);
+	drv2667_vreg_on(data, false);
+	drv2667_vreg_config(data, false);
+
+	return 0;
+}
+
+static const struct i2c_device_id drv2667_id_table[] = {
+	{"drv2667", 0},
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, drv2667_id_table);
+
+#ifdef CONFIG_OF
+static const struct of_device_id drv2667_of_id_table[] = {
+	{.compatible = "ti, drv2667"},
+	{ },
+};
+#else
+#define drv2667_of_id_table NULL
+#endif
+
+static struct i2c_driver drv2667_i2c_driver = {
+	.driver = {
+		.name = "drv2667",
+		.owner = THIS_MODULE,
+		.of_match_table = drv2667_of_id_table,
+#ifdef CONFIG_PM
+		.pm = &drv2667_pm_ops,
+#endif
+	},
+	.probe = drv2667_probe,
+	.remove = __devexit_p(drv2667_remove),
+	.id_table = drv2667_id_table,
+};
+
+module_i2c_driver(drv2667_i2c_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI DRV2667 chip driver");
diff --git a/drivers/misc/tspp.c b/drivers/misc/tspp.c
index 3b678c5..f310524 100644
--- a/drivers/misc/tspp.c
+++ b/drivers/misc/tspp.c
@@ -327,10 +327,15 @@
 	int data_inverse;
 	int sync_inverse;
 	int enable_inverse;
+	u32 tsif_irq;
 
 	/* debugfs */
 	struct dentry *dent_tsif;
 	struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
+	u32 stat_rx;
+	u32 stat_overflow;
+	u32 stat_lost_sync;
+	u32 stat_timeout;
 };
 
 enum tspp_buf_state {
@@ -480,6 +485,49 @@
 		dev_info(&device->pdev->dev, "broken pipe %i", status & 0xffff);
 
 	writel_relaxed(status, device->base + TSPP_IRQ_CLEAR);
+
+	/*
+	 * Before returning IRQ_HANDLED to the generic interrupt handling
+	 * framework need to make sure all operations including clearing of
+	 * interrupt status registers in the hardware is performed.
+	 * Thus a barrier after clearing the interrupt status register
+	 * is required to guarantee that the interrupt status register has
+	 * really been cleared by the time we return from this handler.
+	 */
+	wmb();
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t tsif_isr(int irq, void *dev)
+{
+	struct tspp_tsif_device *tsif_device = dev;
+	u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
+
+	if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL |
+			 TSIF_STS_CTL_OVERFLOW |
+			 TSIF_STS_CTL_LOST_SYNC |
+			 TSIF_STS_CTL_TIMEOUT)))
+		return IRQ_NONE;
+
+	if (sts_ctl & TSIF_STS_CTL_OVERFLOW)
+		tsif_device->stat_overflow++;
+
+	if (sts_ctl & TSIF_STS_CTL_LOST_SYNC)
+		tsif_device->stat_lost_sync++;
+
+	if (sts_ctl & TSIF_STS_CTL_TIMEOUT)
+		tsif_device->stat_timeout++;
+
+	iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+
+	/*
+	 * Before returning IRQ_HANDLED to the generic interrupt handling
+	 * framework need to make sure all operations including clearing of
+	 * interrupt status registers in the hardware is performed.
+	 * Thus a barrier after clearing the interrupt status register
+	 * is required to guarantee that the interrupt status register has
+	 * really been cleared by the time we return from this handler.
+	 */
 	wmb();
 	return IRQ_HANDLED;
 }
@@ -527,6 +575,11 @@
 			channel->waiting->filled = iovec.size;
 			channel->waiting->read_index = 0;
 
+			if (channel->src == TSPP_SOURCE_TSIF0)
+				device->tsif[0].stat_rx++;
+			else if (channel->src == TSPP_SOURCE_TSIF1)
+				device->tsif[1].stat_rx++;
+
 			/* update the pointers */
 			channel->waiting = channel->waiting->next;
 		}
@@ -2326,6 +2379,31 @@
 				base + debugfs_tsif_regs[i].offset,
 				&fops_iomem_x32);
 		}
+
+		debugfs_create_u32(
+			"stat_rx_chunks",
+			S_IRUGO|S_IWUGO,
+			tsif_device->dent_tsif,
+			&tsif_device->stat_rx);
+
+		debugfs_create_u32(
+			"stat_overflow",
+			S_IRUGO|S_IWUGO,
+			tsif_device->dent_tsif,
+			&tsif_device->stat_overflow);
+
+		debugfs_create_u32(
+			"stat_lost_sync",
+			S_IRUGO|S_IWUGO,
+			tsif_device->dent_tsif,
+			&tsif_device->stat_lost_sync);
+
+		debugfs_create_u32(
+			"stat_timeout",
+			S_IRUGO|S_IWUGO,
+			tsif_device->dent_tsif,
+			&tsif_device->stat_timeout);
+
 	}
 }
 
@@ -2504,6 +2582,21 @@
 		goto err_irq;
 	}
 
+	/* map TSIF IRQs */
+	device->tsif[0].tsif_irq = TSIF1_IRQ;
+	device->tsif[1].tsif_irq = TSIF2_IRQ;
+
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+		rc = request_irq(device->tsif[i].tsif_irq,
+				tsif_isr, IRQF_SHARED,
+				dev_name(&pdev->dev), &device->tsif[i]);
+		if (rc) {
+			dev_warn(&pdev->dev, "failed to request TSIF%d IRQ: %d",
+				i, rc);
+			device->tsif[i].tsif_irq = 0;
+		}
+	}
+
 	/* BAM IRQ */
 	device->bam_irq = TSIF_BAM_IRQ;
 
@@ -2635,8 +2728,11 @@
 
 	sps_deregister_bam_device(device->bam_handle);
 
-	for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
 		tsif_debugfs_exit(&device->tsif[i]);
+		if (device->tsif[i].tsif_irq)
+			free_irq(device->tsif[i].tsif_irq,  &device->tsif[i]);
+	}
 
 	wake_lock_destroy(&device->wake_lock);
 	free_irq(device->tspp_irq, device);
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 33f0600..a1bea00 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -80,6 +80,7 @@
 config MMC_BLOCK_TEST
 	tristate "MMC block test"
 	depends on MMC_BLOCK && IOSCHED_TEST
+	default y
 	help
 	  MMC block test can be used with test iosched to test the MMC block
 	  device.
diff --git a/drivers/mmc/card/mmc_block_test.c b/drivers/mmc/card/mmc_block_test.c
index c5551b8..610a822 100644
--- a/drivers/mmc/card/mmc_block_test.c
+++ b/drivers/mmc/card/mmc_block_test.c
@@ -554,105 +554,105 @@
 		return NULL;
 	}
 
-	switch (td->test_info.testcase) {
+switch (td->test_info.testcase) {
 	case TEST_STOP_DUE_TO_FLUSH:
-		return " stop due to flush";
+		return "\"stop due to flush\"";
 	case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
-		return " stop due to flush after max-1 reqs";
+		return "\"stop due to flush after max-1 reqs\"";
 	case TEST_STOP_DUE_TO_READ:
-		return " stop due to read";
+		return "\"stop due to read\"";
 	case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
-		return "Test stop due to read after max-1 reqs";
+		return "\"stop due to read after max-1 reqs\"";
 	case TEST_STOP_DUE_TO_EMPTY_QUEUE:
-		return "Test stop due to empty queue";
+		return "\"stop due to empty queue\"";
 	case TEST_STOP_DUE_TO_MAX_REQ_NUM:
-		return "Test stop due to max req num";
+		return "\"stop due to max req num\"";
 	case TEST_STOP_DUE_TO_THRESHOLD:
-		return "Test stop due to exceeding threshold";
+		return "\"stop due to exceeding threshold\"";
 	case TEST_RET_ABORT:
-		return "Test err_check return abort";
+		return "\"err_check return abort\"";
 	case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
-		return "Test err_check return partial followed by success";
+		return "\"err_check return partial followed by success\"";
 	case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
-		return "Test err_check return partial followed by abort";
+		return "\"err_check return partial followed by abort\"";
 	case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
-		return "Test err_check return partial multiple until success";
+		return "\"err_check return partial multiple until success\"";
 	case TEST_RET_PARTIAL_MAX_FAIL_IDX:
-		return "Test err_check return partial max fail index";
+		return "\"err_check return partial max fail index\"";
 	case TEST_RET_RETRY:
-		return "Test err_check return retry";
+		return "\"err_check return retry\"";
 	case TEST_RET_CMD_ERR:
-		return "Test err_check return cmd error";
+		return "\"err_check return cmd error\"";
 	case TEST_RET_DATA_ERR:
-		return "Test err_check return data error";
+		return "\"err_check return data error\"";
 	case TEST_HDR_INVALID_VERSION:
-		return "Test invalid - wrong header version";
+		return "\"invalid - wrong header version\"";
 	case TEST_HDR_WRONG_WRITE_CODE:
-		return "Test invalid - wrong write code";
+		return "\"invalid - wrong write code\"";
 	case TEST_HDR_INVALID_RW_CODE:
-		return "Test invalid - wrong R/W code";
+		return "\"invalid - wrong R/W code\"";
 	case TEST_HDR_DIFFERENT_ADDRESSES:
-		return "Test invalid - header different addresses";
+		return "\"invalid - header different addresses\"";
 	case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
-		return "Test invalid - header req num smaller than actual";
+		return "\"invalid - header req num smaller than actual\"";
 	case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
-		return "Test invalid - header req num larger than actual";
+		return "\"invalid - header req num larger than actual\"";
 	case TEST_HDR_CMD23_PACKED_BIT_SET:
-		return "Test invalid - header cmd23 packed bit set";
+		return "\"invalid - header cmd23 packed bit set\"";
 	case TEST_CMD23_MAX_PACKED_WRITES:
-		return "Test invalid - cmd23 max packed writes";
+		return "\"invalid - cmd23 max packed writes\"";
 	case TEST_CMD23_ZERO_PACKED_WRITES:
-		return "Test invalid - cmd23 zero packed writes";
+		return "\"invalid - cmd23 zero packed writes\"";
 	case TEST_CMD23_PACKED_BIT_UNSET:
-		return "Test invalid - cmd23 packed bit unset";
+		return "\"invalid - cmd23 packed bit unset\"";
 	case TEST_CMD23_REL_WR_BIT_SET:
-		return "Test invalid - cmd23 rel wr bit set";
+		return "\"invalid - cmd23 rel wr bit set\"";
 	case TEST_CMD23_BITS_16TO29_SET:
-		return "Test invalid - cmd23 bits [16-29] set";
+		return "\"invalid - cmd23 bits [16-29] set\"";
 	case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
-		return "Test invalid - cmd23 header block not in count";
+		return "\"invalid - cmd23 header block not in count\"";
 	case TEST_PACKING_EXP_N_OVER_TRIGGER:
-		return "\nTest packing control - pack n";
+		return "\"packing control - pack n\"";
 	case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
-		return "\nTest packing control - pack n followed by read";
+		return "\"packing control - pack n followed by read\"";
 	case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
-		return "\nTest packing control - pack n followed by flush";
+		return "\"packing control - pack n followed by flush\"";
 	case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
-		return "\nTest packing control - pack one followed by read";
+		return "\"packing control - pack one followed by read\"";
 	case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
-		return "\nTest packing control - pack threshold";
+		return "\"packing control - pack threshold\"";
 	case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
-		return "\nTest packing control - no packing";
+		return "\"packing control - no packing\"";
 	case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
-		return "\nTest packing control - no packing, trigger requests";
+		return "\"packing control - no packing, trigger requests\"";
 	case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
-		return "\nTest packing control - no pack, trigger-read-trigger";
+		return "\"packing control - no pack, trigger-read-trigger\"";
 	case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
-		return "\nTest packing control- no pack, trigger-flush-trigger";
+		return "\"packing control- no pack, trigger-flush-trigger\"";
 	case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
-		return "\nTest packing control - mix: pack -> no pack -> pack";
+		return "\"packing control - mix: pack -> no pack -> pack\"";
 	case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
-		return "\nTest packing control - mix: no pack->pack->no pack";
+		return "\"packing control - mix: no pack->pack->no pack\"";
 	case TEST_WRITE_DISCARD_SANITIZE_READ:
-		return "\nTest write, discard, sanitize";
+		return "\"write, discard, sanitize\"";
 	case BKOPS_DELAYED_WORK_LEVEL_1:
-		return "\nTest delayed work BKOPS level 1";
+		return "\"delayed work BKOPS level 1\"";
 	case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
-		return "\nTest delayed work BKOPS level 1 with HPI";
+		return "\"delayed work BKOPS level 1 with HPI\"";
 	case BKOPS_CANCEL_DELAYED_WORK:
-		return "\nTest cancel delayed BKOPS work";
+		return "\"cancel delayed BKOPS work\"";
 	case BKOPS_URGENT_LEVEL_2:
-		return "\nTest urgent BKOPS level 2";
+		return "\"urgent BKOPS level 2\"";
 	case BKOPS_URGENT_LEVEL_2_TWO_REQS:
-		return "\nTest urgent BKOPS level 2, followed by a request";
+		return "\"urgent BKOPS level 2, followed by a request\"";
 	case BKOPS_URGENT_LEVEL_3:
-		return "\nTest urgent BKOPS level 3";
+		return "\"urgent BKOPS level 3\"";
 	case TEST_LONG_SEQUENTIAL_READ:
-		return "Test long sequential read";
+		return "\"long sequential read\"";
 	case TEST_LONG_SEQUENTIAL_WRITE:
-		return "Test long sequential write";
+		return "\"long sequential write\"";
 	default:
-		 return "Unknown testcase";
+		return " Unknown testcase";
 	}
 
 	return NULL;
@@ -1702,6 +1702,12 @@
 		    (bkops_stat->suspend == 0) &&
 		    (bkops_stat->hpi == 1))
 			goto exit;
+		/* this might happen due to timing issues */
+		else if
+		   ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 0) &&
+		    (bkops_stat->suspend == 0) &&
+		    (bkops_stat->hpi == 0))
+			goto ignore;
 		else
 			goto fail;
 		break;
@@ -1735,6 +1741,9 @@
 
 exit:
 	return 0;
+ignore:
+	test_iosched_set_ignore_round(true);
+	return 0;
 fail:
 	if (td->fs_wr_reqs_during_test) {
 		test_pr_info("%s: wr reqs during test, cancel the round",
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index ddb562e..84a26a1 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -186,6 +186,46 @@
 DEFINE_SIMPLE_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set,
 	"%llu\n");
 
+static int mmc_max_clock_get(void *data, u64 *val)
+{
+	struct mmc_host *host = data;
+
+	if (!host)
+		return -EINVAL;
+
+	*val = host->f_max;
+
+	return 0;
+}
+
+static int mmc_max_clock_set(void *data, u64 val)
+{
+	struct mmc_host *host = data;
+	int err = -EINVAL;
+	unsigned long freq = val;
+	unsigned int old_freq;
+
+	if (!host || (val < host->f_min))
+		goto out;
+
+	mmc_claim_host(host);
+	if (host->bus_ops && host->bus_ops->change_bus_speed) {
+		old_freq = host->f_max;
+		host->f_max = freq;
+
+		err = host->bus_ops->change_bus_speed(host, &freq);
+
+		if (err)
+			host->f_max = old_freq;
+	}
+	mmc_release_host(host);
+out:
+	return err;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(mmc_max_clock_fops, mmc_max_clock_get,
+		mmc_max_clock_set, "%llu\n");
+
 void mmc_add_host_debugfs(struct mmc_host *host)
 {
 	struct dentry *root;
@@ -208,6 +248,10 @@
 			&mmc_clock_fops))
 		goto err_node;
 
+	if (!debugfs_create_file("max_clock", S_IRUSR | S_IWUSR, root, host,
+		&mmc_max_clock_fops))
+		goto err_node;
+
 #ifdef CONFIG_MMC_CLKGATE
 	if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
 				root, &host->clk_delay))
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index de7e5bc..4f7d4c3 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -46,6 +46,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <linux/pm_qos.h>
+#include <linux/iopoll.h>
 
 #include <asm/cacheflush.h>
 #include <asm/div64.h>
@@ -348,23 +349,23 @@
 	 * SDCC controller itself can support hard reset.
 	 */
 	if (is_sw_hard_reset(host)) {
-		ktime_t start;
+		u32 pwr;
 
 		writel_relaxed(readl_relaxed(host->base + MMCIPOWER)
 				| MCI_SW_RST, host->base + MMCIPOWER);
 		msmsdcc_sync_reg_wr(host);
 
-		start = ktime_get();
-		while (readl_relaxed(host->base + MMCIPOWER) & MCI_SW_RST) {
-			/*
-			 * See comment in msmsdcc_soft_reset() on choosing 1ms
-			 * poll timeout.
-			 */
-			if (ktime_to_us(ktime_sub(ktime_get(), start)) > 1000) {
-				pr_err("%s: %s failed\n",
-					mmc_hostname(host->mmc), __func__);
-				BUG();
-			}
+		/*
+		 * See comment in msmsdcc_soft_reset() on choosing 1ms
+		 * poll timeout.
+		 */
+		ret = readl_poll_timeout_noirq(host->base + MMCIPOWER,
+				pwr, !(pwr & MCI_SW_RST), 100, 10);
+
+		if (ret) {
+			pr_err("%s: %s failed (%d)\n",
+			mmc_hostname(host->mmc), __func__, ret);
+			BUG();
 		}
 	} else {
 		ret = clk_reset(host->clk, CLK_RESET_ASSERT);
@@ -470,7 +471,9 @@
 					readl_relaxed(host->base + MMCISTATUS)
 					& MCI_TXACTIVE ? "TX" : "RX");
 				msmsdcc_dump_sdcc_state(host);
-				BUG();
+				msmsdcc_reset_and_restore(host);
+				host->pending_dpsm_reset = false;
+				goto out;
 			}
 		}
 	}
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index b34b069..e7a3741 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -23,8 +23,9 @@
 #include <linux/scatterlist.h>
 #include <linux/io.h>
 #include <linux/gpio.h>
-#include <linux/pm_runtime.h>
 #include <linux/mmc/sdhci-pci-data.h>
+#include <linux/sfi.h>
+#include <linux/pm_runtime.h>
 
 #include "sdhci.h"
 
@@ -1451,6 +1452,8 @@
 	int i;
 	struct sdhci_pci_chip *chip;
 
+	sdhci_pci_runtime_pm_forbid(&pdev->dev);
+
 	chip = pci_get_drvdata(pdev);
 
 	if (chip) {
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 55a164f..425d092 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -672,6 +672,7 @@
 }
 
 #ifdef CONFIG_PM_SLEEP
+
 static int sdhci_s3c_suspend(struct device *dev)
 {
 	struct sdhci_host *host = dev_get_drvdata(dev);
@@ -712,6 +713,13 @@
 
 #define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops)
 
+static const struct dev_pm_ops sdhci_s3c_pmops = {
+	.suspend	= sdhci_s3c_suspend,
+	.resume		= sdhci_s3c_resume,
+};
+
+#define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops)
+
 #else
 #define SDHCI_S3C_PMOPS NULL
 #endif
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 43f7e77..6451d62 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 #include <linux/scatterlist.h>
 #include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
 
 #include <linux/leds.h>
 
@@ -42,14 +43,29 @@
 #define MAX_TUNING_LOOP 40
 
 static unsigned int debug_quirks = 0;
+static unsigned int debug_quirks2;
 
 static void sdhci_finish_data(struct sdhci_host *);
 
 static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
 static void sdhci_finish_command(struct sdhci_host *);
-static int sdhci_execute_tuning(struct mmc_host *mmc);
+static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
 static void sdhci_tuning_timer(unsigned long data);
 
+#ifdef CONFIG_PM_RUNTIME
+static int sdhci_runtime_pm_get(struct sdhci_host *host);
+static int sdhci_runtime_pm_put(struct sdhci_host *host);
+#else
+static inline int sdhci_runtime_pm_get(struct sdhci_host *host)
+{
+	return 0;
+}
+static inline int sdhci_runtime_pm_put(struct sdhci_host *host)
+{
+	return 0;
+}
+#endif
+
 static void sdhci_dumpregs(struct sdhci_host *host)
 {
 	printk(KERN_DEBUG DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
@@ -134,6 +150,9 @@
 	    (host->mmc->caps & MMC_CAP_NONREMOVABLE))
 		return;
 
+	if (host->quirks2 & SDHCI_QUIRK2_OWN_CARD_DETECTION)
+		return;
+
 	if (enable)
 		sdhci_unmask_irqs(host, irqs);
 	else
@@ -249,11 +268,14 @@
 
 	spin_lock_irqsave(&host->lock, flags);
 
+	if (host->runtime_suspended)
+		goto out;
+
 	if (brightness == LED_OFF)
 		sdhci_deactivate_led(host);
 	else
 		sdhci_activate_led(host);
-
+out:
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 #endif
@@ -398,12 +420,12 @@
 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
 {
 	local_irq_save(*flags);
-	return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
+	return kmap_atomic(sg_page(sg)) + sg->offset;
 }
 
 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
 {
-	kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
+	kunmap_atomic(buffer);
 	local_irq_restore(*flags);
 }
 
@@ -653,9 +675,7 @@
 			break;
 	}
 
-	if (count >= 0xF) {
-		printk(KERN_WARNING "%s: Too large timeout requested for CMD%d!\n",
-		       mmc_hostname(host->mmc), cmd->opcode);
+	if (count >= 0xF)
 		count = 0xE;
 
 	return count;
@@ -992,7 +1012,8 @@
 		flags |= SDHCI_CMD_INDEX;
 
 	/* CMD19 is special in that the Data Present Select should be set */
-	if (cmd->data || (cmd->opcode == MMC_SEND_TUNING_BLOCK))
+	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
+	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
 		flags |= SDHCI_CMD_DATA;
 
 	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
@@ -1208,6 +1229,8 @@
 
 	host = mmc_priv(mmc);
 
+	sdhci_runtime_pm_get(host);
+
 	spin_lock_irqsave(&host->lock, flags);
 
 	WARN_ON(host->mrq != NULL);
@@ -1251,7 +1274,7 @@
 		if ((host->flags & SDHCI_NEEDS_RETUNING) &&
 		    !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
 			spin_unlock_irqrestore(&host->lock, flags);
-			sdhci_execute_tuning(mmc);
+			sdhci_execute_tuning(mmc, mrq->cmd->opcode);
 			spin_lock_irqsave(&host->lock, flags);
 
 			/* Restore original mmc_request structure */
@@ -1268,14 +1291,11 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
-static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
 {
-	struct sdhci_host *host;
 	unsigned long flags;
 	u8 ctrl;
 
-	host = mmc_priv(mmc);
-
 	spin_lock_irqsave(&host->lock, flags);
 
 	if (host->flags & SDHCI_DEVICE_DEAD)
@@ -1338,7 +1358,8 @@
 		unsigned int clock;
 
 		/* In case of UHS-I modes, set High Speed Enable */
-		if ((ios->timing == MMC_TIMING_UHS_SDR50) ||
+		if ((ios->timing == MMC_TIMING_MMC_HS200) ||
+		    (ios->timing == MMC_TIMING_UHS_SDR50) ||
 		    (ios->timing == MMC_TIMING_UHS_SDR104) ||
 		    (ios->timing == MMC_TIMING_UHS_DDR50) ||
 		    (ios->timing == MMC_TIMING_UHS_SDR25))
@@ -1391,7 +1412,9 @@
 			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
 			/* Select Bus Speed Mode for host */
 			ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
-			if (ios->timing == MMC_TIMING_UHS_SDR12)
+			if (ios->timing == MMC_TIMING_MMC_HS200)
+				ctrl_2 |= SDHCI_CTRL_HS_SDR200;
+			else if (ios->timing == MMC_TIMING_UHS_SDR12)
 				ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
 			else if (ios->timing == MMC_TIMING_UHS_SDR25)
 				ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
@@ -1424,7 +1447,16 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
-static int check_ro(struct sdhci_host *host)
+static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	sdhci_runtime_pm_get(host);
+	sdhci_do_set_ios(host, ios);
+	sdhci_runtime_pm_put(host);
+}
+
+static int sdhci_check_ro(struct sdhci_host *host)
 {
 	unsigned long flags;
 	int is_readonly;
@@ -1448,19 +1480,16 @@
 
 #define SAMPLE_COUNT	5
 
-static int sdhci_get_ro(struct mmc_host *mmc)
+static int sdhci_do_get_ro(struct sdhci_host *host)
 {
-	struct sdhci_host *host;
 	int i, ro_count;
 
-	host = mmc_priv(mmc);
-
 	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
-		return check_ro(host);
+		return sdhci_check_ro(host);
 
 	ro_count = 0;
 	for (i = 0; i < SAMPLE_COUNT; i++) {
-		if (check_ro(host)) {
+		if (sdhci_check_ro(host)) {
 			if (++ro_count > SAMPLE_COUNT / 2)
 				return 1;
 		}
@@ -1469,38 +1498,64 @@
 	return 0;
 }
 
-static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+static void sdhci_hw_reset(struct mmc_host *mmc)
 {
-	struct sdhci_host *host;
-	unsigned long flags;
+	struct sdhci_host *host = mmc_priv(mmc);
 
-	host = mmc_priv(mmc);
+	if (host->ops && host->ops->hw_reset)
+		host->ops->hw_reset(host);
+}
 
-	spin_lock_irqsave(&host->lock, flags);
+static int sdhci_get_ro(struct mmc_host *mmc)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+	int ret;
 
+	sdhci_runtime_pm_get(host);
+	ret = sdhci_do_get_ro(host);
+	sdhci_runtime_pm_put(host);
+	return ret;
+}
+
+static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
+{
 	if (host->flags & SDHCI_DEVICE_DEAD)
 		goto out;
 
 	if (enable)
+		host->flags |= SDHCI_SDIO_IRQ_ENABLED;
+	else
+		host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
+
+	/* SDIO IRQ will be enabled as appropriate in runtime resume */
+	if (host->runtime_suspended)
+		goto out;
+
+	if (enable)
 		sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
 	else
 		sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
 out:
 	mmiowb();
+}
 
+static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+	sdhci_enable_sdio_irq_nolock(host, enable);
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
-static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
-	struct mmc_ios *ios)
+static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
+						struct mmc_ios *ios)
 {
-	struct sdhci_host *host;
 	u8 pwr;
 	u16 clk, ctrl;
 	u32 present_state;
 
-	host = mmc_priv(mmc);
-
 	/*
 	 * Signal Voltage Switching is only applicable for Host Controllers
 	 * v3.00 and above.
@@ -1593,7 +1648,21 @@
 		return 0;
 }
 
-static int sdhci_execute_tuning(struct mmc_host *mmc)
+static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
+	struct mmc_ios *ios)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+	int err;
+
+	if (host->version < SDHCI_SPEC_300)
+		return 0;
+	sdhci_runtime_pm_get(host);
+	err = sdhci_do_start_signal_voltage_switch(host, ios);
+	sdhci_runtime_pm_put(host);
+	return err;
+}
+
+static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
 {
 	struct sdhci_host *host;
 	u16 ctrl;
@@ -1601,26 +1670,35 @@
 	int tuning_loop_counter = MAX_TUNING_LOOP;
 	unsigned long timeout;
 	int err = 0;
+	bool requires_tuning_nonuhs = false;
 
 	host = mmc_priv(mmc);
 
+	sdhci_runtime_pm_get(host);
 	disable_irq(host->irq);
 	spin_lock(&host->lock);
 
 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
 
 	/*
-	 * Host Controller needs tuning only in case of SDR104 mode
-	 * and for SDR50 mode when Use Tuning for SDR50 is set in
+	 * The Host Controller needs tuning only in case of SDR104 mode
+	 * and for SDR50 mode when Use Tuning for SDR50 is set in the
 	 * Capabilities register.
+	 * If the Host Controller supports the HS200 mode then the
+	 * tuning function has to be executed.
 	 */
+	if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
+	    (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
+	     host->flags & SDHCI_HS200_NEEDS_TUNING))
+		requires_tuning_nonuhs = true;
+
 	if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
-	    (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
-	    (host->flags & SDHCI_SDR50_NEEDS_TUNING)))
+	    requires_tuning_nonuhs)
 		ctrl |= SDHCI_CTRL_EXEC_TUNING;
 	else {
 		spin_unlock(&host->lock);
 		enable_irq(host->irq);
+		sdhci_runtime_pm_put(host);
 		return 0;
 	}
 
@@ -1646,12 +1724,12 @@
 	timeout = 150;
 	do {
 		struct mmc_command cmd = {0};
-		struct mmc_request mrq = {0};
+		struct mmc_request mrq = {NULL};
 
 		if (!tuning_loop_counter && !timeout)
 			break;
 
-		cmd.opcode = MMC_SEND_TUNING_BLOCK;
+		cmd.opcode = opcode;
 		cmd.arg = 0;
 		cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
 		cmd.retries = 0;
@@ -1666,7 +1744,17 @@
 		 * block to the Host Controller. So we set the block size
 		 * to 64 here.
 		 */
-		sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
+		if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
+			if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
+				sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
+					     SDHCI_BLOCK_SIZE);
+			else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
+				sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
+					     SDHCI_BLOCK_SIZE);
+		} else {
+			sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
+				     SDHCI_BLOCK_SIZE);
+		}
 
 		/*
 		 * The tuning block is sent by the card to the host controller.
@@ -1764,18 +1852,16 @@
 	sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
 	spin_unlock(&host->lock);
 	enable_irq(host->irq);
+	sdhci_runtime_pm_put(host);
 
 	return err;
 }
 
-static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable)
+static void sdhci_do_enable_preset_value(struct sdhci_host *host, bool enable)
 {
-	struct sdhci_host *host;
 	u16 ctrl;
 	unsigned long flags;
 
-	host = mmc_priv(mmc);
-
 	/* Host Controller v3.00 defines preset value registers */
 	if (host->version < SDHCI_SPEC_300)
 		return;
@@ -1791,18 +1877,30 @@
 	if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
 		ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+		host->flags |= SDHCI_PV_ENABLED;
 	} else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
 		ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+		host->flags &= ~SDHCI_PV_ENABLED;
 	}
 
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
+static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	sdhci_runtime_pm_get(host);
+	sdhci_do_enable_preset_value(host, enable);
+	sdhci_runtime_pm_put(host);
+}
+
 static const struct mmc_host_ops sdhci_ops = {
 	.request	= sdhci_request,
 	.set_ios	= sdhci_set_ios,
 	.get_ro		= sdhci_get_ro,
+	.hw_reset	= sdhci_hw_reset,
 	.enable_sdio_irq = sdhci_enable_sdio_irq,
 	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
 	.execute_tuning			= sdhci_execute_tuning,
@@ -1824,19 +1922,19 @@
 
 	spin_lock_irqsave(&host->lock, flags);
 
-	if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
-		if (host->mrq) {
-			printk(KERN_ERR "%s: Card removed during transfer!\n",
-				mmc_hostname(host->mmc));
-			printk(KERN_ERR "%s: Resetting controller.\n",
-				mmc_hostname(host->mmc));
+	/* Check host->mrq first in case we are runtime suspended */
+	if (host->mrq &&
+	    !(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
+		printk(KERN_ERR "%s: Card removed during transfer!\n",
+			mmc_hostname(host->mmc));
+		printk(KERN_ERR "%s: Resetting controller.\n",
+			mmc_hostname(host->mmc));
 
-			sdhci_reset(host, SDHCI_RESET_CMD);
-			sdhci_reset(host, SDHCI_RESET_DATA);
+		sdhci_reset(host, SDHCI_RESET_CMD);
+		sdhci_reset(host, SDHCI_RESET_DATA);
 
-			host->mrq->cmd->error = -ENOMEDIUM;
-			tasklet_schedule(&host->finish_tasklet);
-		}
+		host->mrq->cmd->error = -ENOMEDIUM;
+		tasklet_schedule(&host->finish_tasklet);
 	}
 
 	spin_unlock_irqrestore(&host->lock, flags);
@@ -1852,14 +1950,16 @@
 
 	host = (struct sdhci_host*)param;
 
+	spin_lock_irqsave(&host->lock, flags);
+
         /*
          * If this tasklet gets rescheduled while running, it will
          * be run again afterwards but without any active request.
          */
-	if (!host->mrq)
+	if (!host->mrq) {
+		spin_unlock_irqrestore(&host->lock, flags);
 		return;
-
-	spin_lock_irqsave(&host->lock, flags);
+	}
 
 	del_timer(&host->timer);
 
@@ -1903,6 +2003,7 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 
 	mmc_request_done(host->mmc, mrq);
+	sdhci_runtime_pm_put(host);
 }
 
 static void sdhci_timeout_timer(unsigned long data)
@@ -2036,12 +2137,14 @@
 
 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
 {
+	u32 command;
 	BUG_ON(intmask == 0);
 
 	/* CMD19 generates _only_ Buffer Read Ready interrupt */
 	if (intmask & SDHCI_INT_DATA_AVAIL) {
-		if (SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) ==
-		    MMC_SEND_TUNING_BLOCK) {
+		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
+		if (command == MMC_SEND_TUNING_BLOCK ||
+		    command == MMC_SEND_TUNING_BLOCK_HS200) {
 			host->tuning_done = 1;
 			wake_up(&host->buf_ready_int);
 			return;
@@ -2140,6 +2243,13 @@
 
 	spin_lock(&host->lock);
 
+	if (host->runtime_suspended) {
+		spin_unlock(&host->lock);
+		printk(KERN_WARNING "%s: got irq while runtime suspended\n",
+		       mmc_hostname(host->mmc));
+		return IRQ_HANDLED;
+	}
+
 	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
 
 	if (!intmask || intmask == 0xffffffff) {
@@ -2226,7 +2336,7 @@
 
 #ifdef CONFIG_PM
 
-int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
+int sdhci_suspend_host(struct sdhci_host *host)
 {
 	int ret;
 
@@ -2266,7 +2376,6 @@
 			return ret;
 	}
 
-
 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
 		if (host->ops->enable_dma)
 			host->ops->enable_dma(host);
@@ -2317,6 +2426,90 @@
 
 #endif /* CONFIG_PM */
 
+#ifdef CONFIG_PM_RUNTIME
+
+static int sdhci_runtime_pm_get(struct sdhci_host *host)
+{
+	return pm_runtime_get_sync(host->mmc->parent);
+}
+
+static int sdhci_runtime_pm_put(struct sdhci_host *host)
+{
+	pm_runtime_mark_last_busy(host->mmc->parent);
+	return pm_runtime_put_autosuspend(host->mmc->parent);
+}
+
+int sdhci_runtime_suspend_host(struct sdhci_host *host)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	/* Disable tuning since we are suspending */
+	if (host->version >= SDHCI_SPEC_300 &&
+	    host->tuning_mode == SDHCI_TUNING_MODE_1) {
+		del_timer_sync(&host->tuning_timer);
+		host->flags &= ~SDHCI_NEEDS_RETUNING;
+	}
+
+	spin_lock_irqsave(&host->lock, flags);
+	sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	synchronize_irq(host->irq);
+
+	spin_lock_irqsave(&host->lock, flags);
+	host->runtime_suspended = true;
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
+
+int sdhci_runtime_resume_host(struct sdhci_host *host)
+{
+	unsigned long flags;
+	int ret = 0, host_flags = host->flags;
+
+	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
+		if (host->ops->enable_dma)
+			host->ops->enable_dma(host);
+	}
+
+	sdhci_init(host, 0);
+
+	/* Force clock and power re-program */
+	host->pwr = 0;
+	host->clock = 0;
+	sdhci_do_set_ios(host, &host->mmc->ios);
+
+	sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
+	if (host_flags & SDHCI_PV_ENABLED)
+		sdhci_do_enable_preset_value(host, true);
+
+	/* Set the re-tuning expiration flag */
+	if ((host->version >= SDHCI_SPEC_300) && host->tuning_count &&
+	    (host->tuning_mode == SDHCI_TUNING_MODE_1))
+		host->flags |= SDHCI_NEEDS_RETUNING;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	host->runtime_suspended = false;
+
+	/* Enable SDIO IRQ */
+	if ((host->flags & SDHCI_SDIO_IRQ_ENABLED))
+		sdhci_enable_sdio_irq_nolock(host, true);
+
+	/* Enable Card Detection */
+	sdhci_enable_card_detection(host);
+
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
+
+#endif
+
 /*****************************************************************************\
  *                                                                           *
  * Device allocation/registration                                            *
@@ -2359,6 +2552,8 @@
 
 	if (debug_quirks)
 		host->quirks = debug_quirks;
+	if (debug_quirks2)
+		host->quirks2 = debug_quirks2;
 
 	sdhci_reset(host, SDHCI_RESET_ALL);
 
@@ -2569,10 +2764,14 @@
 	if (caps[1] & SDHCI_SUPPORT_DDR50)
 		mmc->caps |= MMC_CAP_UHS_DDR50;
 
-	/* Does the host needs tuning for SDR50? */
+	/* Does the host need tuning for SDR50? */
 	if (caps[1] & SDHCI_USE_SDR50_TUNING)
 		host->flags |= SDHCI_SDR50_NEEDS_TUNING;
 
+	/* Does the host need tuning for HS200? */
+	if (mmc->caps2 & MMC_CAP2_HS200)
+		host->flags |= SDHCI_HS200_NEEDS_TUNING;
+
 	/* Driver Type(s) (A, C, D) supported by the host */
 	if (caps[1] & SDHCI_DRIVER_TYPE_A)
 		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
@@ -2893,9 +3092,11 @@
 module_exit(sdhci_drv_exit);
 
 module_param(debug_quirks, uint, 0444);
+module_param(debug_quirks2, uint, 0444);
 
 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
 MODULE_LICENSE("GPL");
 
 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
+MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
diff --git a/drivers/mtd/devices/msm_nand.c b/drivers/mtd/devices/msm_nand.c
index c43abca..5cace6b 100644
--- a/drivers/mtd/devices/msm_nand.c
+++ b/drivers/mtd/devices/msm_nand.c
@@ -6757,6 +6757,23 @@
 	return 0;
 }
 
+static const unsigned int bch_sup_cntrl[] = {
+	0x307, /* MSM7x2xA */
+	0x4030, /* MDM 9x15 */
+};
+
+static inline bool msm_nand_has_bch_ecc_engine(unsigned int hw_id)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(bch_sup_cntrl); i++) {
+		if (hw_id == bch_sup_cntrl[i])
+			return true;
+	}
+
+	return false;
+}
+
 /**
  * msm_nand_scan - [msm_nand Interface] Scan for the msm_nand device
  * @param mtd		MTD device structure
@@ -6778,6 +6795,7 @@
 	uint32_t devcfg;
 	struct nand_flash_dev *flashdev = NULL;
 	struct nand_manufacturers  *flashman = NULL;
+	unsigned int hw_id;
 
 	/* Probe the Flash device for ONFI compliance */
 	if (!flash_onfi_probe(chip)) {
@@ -6845,7 +6863,8 @@
 			mtd_writesize = mtd->writesize >> 1;
 
 		/* Check whether controller and NAND device support 8bit ECC*/
-		if ((flash_rd_reg(chip, MSM_NAND_HW_INFO) == 0x307)
+		hw_id = flash_rd_reg(chip, MSM_NAND_HW_INFO);
+		if (msm_nand_has_bch_ecc_engine(hw_id)
 				&& (supported_flash.ecc_correctability >= 8)) {
 			pr_info("Found supported NAND device for %dbit ECC\n",
 					supported_flash.ecc_correctability);
@@ -6853,7 +6872,8 @@
 		} else {
 			pr_info("Found a supported NAND device\n");
 		}
-		pr_info("NAND Id  : 0x%x\n", supported_flash.flash_id);
+		pr_info("NAND Controller ID : 0x%x\n", hw_id);
+		pr_info("NAND Device ID  : 0x%x\n", supported_flash.flash_id);
 		pr_info("Buswidth : %d Bits\n", (wide_bus) ? 16 : 8);
 		pr_info("Density  : %lld MByte\n", (mtd->size>>20));
 		pr_info("Pagesize : %d Bytes\n", mtd->writesize);
diff --git a/drivers/mtd/devices/msm_qpic_nand.c b/drivers/mtd/devices/msm_qpic_nand.c
index d709e17..9a6cc80 100644
--- a/drivers/mtd/devices/msm_qpic_nand.c
+++ b/drivers/mtd/devices/msm_qpic_nand.c
@@ -2351,7 +2351,7 @@
 {
 	struct msm_nand_info *info;
 	struct resource *res;
-	int err, n_parts;
+	int err;
 	struct device_node *pnode;
 	struct mtd_part_parser_data parser_data;
 
@@ -2443,10 +2443,10 @@
 		goto free_bam;
 	}
 	parser_data.of_node = pnode;
-	n_parts = mtd_device_parse_register(&info->mtd, NULL, &parser_data,
+	err = mtd_device_parse_register(&info->mtd, NULL, &parser_data,
 					NULL, 0);
-	if (n_parts < 0) {
-		pr_err("Unable to register MTD partitions %d\n", n_parts);
+	if (err < 0) {
+		pr_err("Unable to register MTD partitions %d\n", err);
 		goto free_bam;
 	}
 	dev_set_drvdata(&pdev->dev, info);
@@ -2455,7 +2455,6 @@
 			info->nand_phys, info->bam_phys, info->bam_irq);
 	pr_info("Allocated DMA buffer at virt_addr 0x%p, phys_addr 0x%x\n",
 		info->nand_chip.dma_virt_addr, info->nand_chip.dma_phys_addr);
-	pr_info("Found %d MTD partitions\n", n_parts);
 	goto out;
 free_bam:
 	msm_nand_bam_free(info);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 0c634ca..4ee18c75 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -536,15 +536,6 @@
 #ifndef CONFIG_MTD_LAZYECCSTATS
 	part_fill_badblockstats(&(slave->mtd));
 #endif
-	if (master->_block_isbad) {
-		uint64_t offs = 0;
-
-		while (offs < slave->mtd.size) {
-			if (mtd_block_isbad(master, offs + slave->offset))
-				slave->mtd.ecc_stats.badblocks++;
-			offs += slave->mtd.erasesize;
-		}
-	}
 
 out_register:
 	return slave;
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 81815ad..71a9860 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -842,7 +842,6 @@
 	else
 		wcnss_gpios_config(penv->gpios_5wire, false);
 fail_gpio_res:
-	kfree(penv);
 	penv = NULL;
 	return ret;
 }
@@ -883,7 +882,7 @@
 	}
 
 	/* create an environment to track the device */
-	penv = kzalloc(sizeof(*penv), GFP_KERNEL);
+	penv = devm_kzalloc(&pdev->dev, sizeof(*penv), GFP_KERNEL);
 	if (!penv) {
 		dev_err(&pdev->dev, "cannot allocate device memory.\n");
 		return -ENOMEM;
@@ -892,8 +891,10 @@
 
 	/* register sysfs entries */
 	ret = wcnss_create_sysfs(&pdev->dev);
-	if (ret)
+	if (ret) {
+		penv = NULL;
 		return -ENOENT;
+	}
 
 
 #ifdef MODULE
@@ -928,6 +929,7 @@
 wcnss_wlan_remove(struct platform_device *pdev)
 {
 	wcnss_remove_sysfs(&pdev->dev);
+	penv = NULL;
 	return 0;
 }
 
@@ -981,7 +983,6 @@
 			subsystem_put(penv->pil);
 
 
-		kfree(penv);
 		penv = NULL;
 	}
 
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 34e1d40..75cc086 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -76,4 +76,18 @@
 	  PNP PMIC. It configures the frequency of clkdiv outputs on the
 	  PMIC. These clocks are typically wired through alternate functions
 	  on gpio pins.
+
+config IPA
+	tristate "IPA support"
+	depends on SPS
+	help
+	  This driver supports the Internet Packet Accelerator (IPA) core.
+	  IPA is a programmable protocol processor HW block.
+	  It is designed to support generic HW processing of UL/DL IP packets
+	  for various use cases independent of radio technology.
+	  The driver support client connection and configuration
+	  for the IPA core.
+	  Kernel and user-space processes can call the IPA driver
+	  to configure IPA core.
+
 endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index 35efd91..0a755d3 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -3,6 +3,7 @@
 #
 obj-$(CONFIG_MSM_SSBI) += ssbi.o
 obj-$(CONFIG_USB_BAM) += usb_bam.o
+obj-$(CONFIG_IPA) += ipa/
 obj-$(CONFIG_SPS) += sps/
 obj-$(CONFIG_QPNP_PWM) += qpnp-pwm.o
 obj-$(CONFIG_QPNP_POWER_ON) += qpnp-power-on.o
diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile
new file mode 100644
index 0000000..ded5b50
--- /dev/null
+++ b/drivers/platform/msm/ipa/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_IPA) += ipat.o
+ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
+	ipa_utils.o ipa_nat.o rmnet_bridge.o a2_service.o ipa_bridge.o
diff --git a/drivers/platform/msm/ipa/a2_service.c b/drivers/platform/msm/ipa/a2_service.c
new file mode 100644
index 0000000..0ae2552
--- /dev/null
+++ b/drivers/platform/msm/ipa/a2_service.c
@@ -0,0 +1,276 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <mach/bam_dmux.h>
+#include <mach/ipa.h>
+#include <mach/sps.h>
+#include "ipa_i.h"
+
+static struct a2_service_cb_type {
+	void *tx_complete_cb;
+	void *rx_cb;
+	u32 producer_handle;
+	u32 consumer_handle;
+} a2_service_cb;
+
+static struct sps_mem_buffer data_mem_buf[2];
+static struct sps_mem_buffer desc_mem_buf[2];
+
+static int connect_pipe_ipa(enum a2_mux_pipe_direction pipe_dir,
+			u8 *usb_pipe_idx,
+			u32 *clnt_hdl,
+			struct sps_pipe *pipe);
+
+static int a2_ipa_connect_pipe(struct ipa_connect_params *in_params,
+		struct ipa_sps_params *out_params, u32 *clnt_hdl);
+
+/**
+ * a2_mux_initialize() - initialize A2 MUX module
+ *
+ * Return codes:
+ * 0: success
+ */
+int a2_mux_initialize(void)
+{
+	(void) msm_bam_dmux_ul_power_vote();
+
+	return 0;
+}
+
+/**
+ * a2_mux_close() - close A2 MUX module
+ *
+ * Return codes:
+ * 0: success
+ * -EINVAL: invalid parameters
+ */
+int a2_mux_close(void)
+{
+	int ret = 0;
+
+	(void) msm_bam_dmux_ul_power_unvote();
+
+	ret = ipa_disconnect(a2_service_cb.consumer_handle);
+	if (0 != ret) {
+		pr_err("%s: ipa_disconnect failure\n", __func__);
+		goto bail;
+	}
+
+	ret = ipa_disconnect(a2_service_cb.producer_handle);
+	if (0 != ret) {
+		pr_err("%s: ipa_disconnect failure\n", __func__);
+		goto bail;
+	}
+
+	ret = 0;
+
+bail:
+
+	return ret;
+}
+
+/**
+ * a2_mux_open_port() - open connection to A2
+ * @wwan_logical_channel_id:	 WWAN logical channel ID
+ * @rx_cb:	Rx callback
+ * @tx_complete_cb:	Tx completed callback
+ *
+ * Return codes:
+ * 0: success
+ * -EINVAL: invalid parameters
+ */
+int a2_mux_open_port(int wwan_logical_channel_id, void *rx_cb,
+		void *tx_complete_cb)
+{
+	int ret = 0;
+	u8 src_pipe = 0;
+	u8 dst_pipe = 0;
+	struct sps_pipe *a2_to_ipa_pipe = NULL;
+	struct sps_pipe *ipa_to_a2_pipe = NULL;
+
+	(void) wwan_logical_channel_id;
+
+	a2_service_cb.rx_cb = rx_cb;
+	a2_service_cb.tx_complete_cb = tx_complete_cb;
+
+	ret = connect_pipe_ipa(A2_TO_IPA,
+			&src_pipe,
+			&(a2_service_cb.consumer_handle),
+			a2_to_ipa_pipe);
+	if (ret) {
+		pr_err("%s: A2 to IPA pipe connection failure\n", __func__);
+		goto bail;
+	}
+
+	ret = connect_pipe_ipa(IPA_TO_A2,
+			&dst_pipe,
+			&(a2_service_cb.producer_handle),
+			ipa_to_a2_pipe);
+	if (ret) {
+		pr_err("%s: IPA to A2 pipe connection failure\n", __func__);
+		sps_disconnect(a2_to_ipa_pipe);
+		sps_free_endpoint(a2_to_ipa_pipe);
+		(void) ipa_disconnect(a2_service_cb.consumer_handle);
+		goto bail;
+	}
+
+	ret = 0;
+
+bail:
+
+	return ret;
+}
+
+static int connect_pipe_ipa(enum a2_mux_pipe_direction pipe_dir,
+			u8 *usb_pipe_idx,
+			u32 *clnt_hdl,
+			struct sps_pipe *pipe)
+{
+	int ret;
+	struct sps_connect connection = {0, };
+	u32 a2_handle = 0;
+	u32 a2_phy_addr = 0;
+	struct a2_mux_pipe_connection pipe_connection = { 0, };
+	struct ipa_connect_params ipa_in_params;
+	struct ipa_sps_params sps_out_params;
+
+	memset(&ipa_in_params, 0, sizeof(ipa_in_params));
+	memset(&sps_out_params, 0, sizeof(sps_out_params));
+
+	if (!usb_pipe_idx || !clnt_hdl) {
+		pr_err("connect_pipe_ipa :: null arguments\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	ret = ipa_get_a2_mux_pipe_info(pipe_dir, &pipe_connection);
+	if (ret) {
+		pr_err("ipa_get_a2_mux_pipe_info failed\n");
+		goto bail;
+	}
+
+	if (pipe_dir == A2_TO_IPA) {
+		a2_phy_addr = pipe_connection.src_phy_addr;
+		ipa_in_params.client = IPA_CLIENT_A2_TETHERED_PROD;
+		ipa_in_params.ipa_ep_cfg.mode.mode = IPA_DMA;
+		ipa_in_params.ipa_ep_cfg.mode.dst = IPA_CLIENT_USB_CONS;
+		pr_err("-*&- pipe_connection->src_pipe_index = %d\n",
+				pipe_connection.src_pipe_index);
+		ipa_in_params.client_ep_idx = pipe_connection.src_pipe_index;
+	} else {
+		a2_phy_addr = pipe_connection.dst_phy_addr;
+		ipa_in_params.client = IPA_CLIENT_A2_TETHERED_CONS;
+		ipa_in_params.client_ep_idx = pipe_connection.dst_pipe_index;
+	}
+
+	ret = sps_phy2h(a2_phy_addr, &a2_handle);
+	if (ret) {
+		pr_err("%s: sps_phy2h failed (A2 BAM) %d\n", __func__, ret);
+		goto bail;
+	}
+
+	ipa_in_params.client_bam_hdl = a2_handle;
+	ipa_in_params.desc_fifo_sz = pipe_connection.desc_fifo_size;
+	ipa_in_params.data_fifo_sz = pipe_connection.data_fifo_size;
+
+	if (pipe_connection.mem_type == IPA_SPS_PIPE_MEM) {
+		pr_debug("%s: A2 BAM using SPS pipe memory\n", __func__);
+		ret = sps_setup_bam2bam_fifo(&data_mem_buf[pipe_dir],
+				pipe_connection.data_fifo_base_offset,
+				pipe_connection.data_fifo_size, 1);
+		if (ret) {
+			pr_err("%s: data fifo setup failure %d\n",
+					__func__, ret);
+			goto bail;
+		}
+
+		ret = sps_setup_bam2bam_fifo(&desc_mem_buf[pipe_dir],
+				pipe_connection.desc_fifo_base_offset,
+				pipe_connection.desc_fifo_size, 1);
+		if (ret) {
+			pr_err("%s: desc. fifo setup failure %d\n",
+					__func__, ret);
+			goto bail;
+		}
+
+		ipa_in_params.data = data_mem_buf[pipe_dir];
+		ipa_in_params.desc = desc_mem_buf[pipe_dir];
+	}
+
+	ret = a2_ipa_connect_pipe(&ipa_in_params,
+			&sps_out_params,
+			clnt_hdl);
+	if (ret) {
+		pr_err("-**- USB-IPA info: ipa_connect failed\n");
+		pr_err("%s: usb_ipa_connect_pipe failed\n", __func__);
+		goto bail;
+	}
+
+	pipe = sps_alloc_endpoint();
+	if (pipe == NULL) {
+		pr_err("%s: sps_alloc_endpoint failed\n", __func__);
+		ret = -ENOMEM;
+		goto a2_ipa_connect_pipe_failed;
+	}
+
+	ret = sps_get_config(pipe, &connection);
+	if (ret) {
+		pr_err("%s: tx get config failed %d\n", __func__, ret);
+		goto get_config_failed;
+	}
+
+	if (pipe_dir == A2_TO_IPA) {
+		connection.mode = SPS_MODE_SRC;
+		*usb_pipe_idx = connection.src_pipe_index;
+		connection.source = a2_handle;
+		connection.destination = sps_out_params.ipa_bam_hdl;
+		connection.src_pipe_index = pipe_connection.src_pipe_index;
+		connection.dest_pipe_index = sps_out_params.ipa_ep_idx;
+	} else {
+		connection.mode = SPS_MODE_DEST;
+		*usb_pipe_idx = connection.dest_pipe_index;
+		connection.source = sps_out_params.ipa_bam_hdl;
+		connection.destination = a2_handle;
+		connection.src_pipe_index = sps_out_params.ipa_ep_idx;
+		connection.dest_pipe_index = pipe_connection.dst_pipe_index;
+	}
+
+	connection.event_thresh = 16;
+	connection.data = sps_out_params.data;
+	connection.desc = sps_out_params.desc;
+
+	ret = sps_connect(pipe, &connection);
+	if (ret < 0) {
+		pr_err("%s: tx connect error %d\n", __func__, ret);
+		goto error;
+	}
+
+	ret = 0;
+	goto bail;
+error:
+	sps_disconnect(pipe);
+get_config_failed:
+	sps_free_endpoint(pipe);
+a2_ipa_connect_pipe_failed:
+	(void) ipa_disconnect(*clnt_hdl);
+bail:
+	return ret;
+}
+
+static int a2_ipa_connect_pipe(struct ipa_connect_params *in_params,
+		struct ipa_sps_params *out_params, u32 *clnt_hdl)
+{
+	return ipa_connect(in_params, out_params, clnt_hdl);
+}
+
diff --git a/drivers/platform/msm/ipa/a2_service.h b/drivers/platform/msm/ipa/a2_service.h
new file mode 100644
index 0000000..80885da
--- /dev/null
+++ b/drivers/platform/msm/ipa/a2_service.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _A2_SERVICE_H_
+#define _A2_SERVICE_H_
+
+int a2_mux_initialize(void);
+
+int a2_mux_close(void);
+
+int a2_mux_open_port(int wwan_logical_channel_id, void *rx_cb,
+		void *tx_complete_cb);
+
+#endif /* _A2_SERVICE_H_ */
+
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
new file mode 100644
index 0000000..8f68ef5
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -0,0 +1,1790 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/fs.h>
+#include <linux/genalloc.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/uaccess.h>
+#include "ipa_i.h"
+
+#define IPA_SUMMING_THRESHOLD (0x10)
+#define IPA_PIPE_MEM_START_OFST (0x0)
+#define IPA_PIPE_MEM_SIZE (0x0)
+#define IPA_READ_MAX (16)
+#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
+			       x == IPA_MODE_MOBILE_AP_WAN || \
+			       x == IPA_MODE_MOBILE_AP_WLAN)
+#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
+#define IPA_V1_CLK_RATE (92.31 * 1000 * 1000UL)
+#define IPA_DMA_POOL_SIZE (512)
+#define IPA_DMA_POOL_ALIGNMENT (4)
+#define IPA_DMA_POOL_BOUNDARY (1024)
+#define WLAN_AMPDU_TX_EP (15)
+#define IPA_ROUTING_RULE_BYTE_SIZE (4)
+#define IPA_BAM_CNFG_BITS_VAL (0x7FFFE004)
+
+#define IPA_AGGR_MAX_STR_LENGTH (10)
+
+#define IPA_AGGR_STR_IN_BYTES(str) \
+	(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
+
+struct ipa_plat_drv_res {
+	u32 ipa_mem_base;
+	u32 ipa_mem_size;
+	u32 bam_mem_base;
+	u32 bam_mem_size;
+	u32 ipa_irq;
+	u32 bam_irq;
+	u32 ipa_pipe_mem_start_ofst;
+	u32 ipa_pipe_mem_size;
+	struct a2_mux_pipe_connection a2_to_ipa_pipe;
+	struct a2_mux_pipe_connection ipa_to_a2_pipe;
+};
+
+static struct ipa_plat_drv_res ipa_res = {0, };
+static struct of_device_id ipa_plat_drv_match[] = {
+	{
+		.compatible = "qcom,ipa",
+	},
+
+	{
+	}
+};
+
+static struct clk *ipa_clk_src;
+static struct clk *ipa_clk;
+static struct clk *sys_noc_ipa_axi_clk;
+static struct clk *ipa_cnoc_clk;
+static struct device *ipa_dev;
+
+struct ipa_context *ipa_ctx;
+
+static bool polling_mode;
+module_param(polling_mode, bool, 0644);
+MODULE_PARM_DESC(polling_mode,
+		"1 - pure polling mode; 0 - interrupt+polling mode");
+static uint polling_delay_ms = 50;
+module_param(polling_delay_ms, uint, 0644);
+MODULE_PARM_DESC(polling_delay_ms, "set to desired delay between polls");
+static bool hdr_tbl_lcl = 1;
+module_param(hdr_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(hdr_tbl_lcl, "where hdr tbl resides 1-local; 0-system");
+static bool ip4_rt_tbl_lcl = 1;
+module_param(ip4_rt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip4_rt_tbl_lcl,
+		"where ip4 rt tables reside 1-local; 0-system");
+static bool ip6_rt_tbl_lcl = 1;
+module_param(ip6_rt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip6_rt_tbl_lcl,
+		"where ip6 rt tables reside 1-local; 0-system");
+static bool ip4_flt_tbl_lcl = 1;
+module_param(ip4_flt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip4_flt_tbl_lcl,
+		"where ip4 flt tables reside 1-local; 0-system");
+static bool ip6_flt_tbl_lcl = 1;
+module_param(ip6_flt_tbl_lcl, bool, 0644);
+MODULE_PARM_DESC(ip6_flt_tbl_lcl,
+		"where ip6 flt tables reside 1-local; 0-system");
+
+static int ipa_load_pipe_connection(struct platform_device *pdev,
+				    enum a2_mux_pipe_direction pipe_dir,
+				    struct a2_mux_pipe_connection     *pdata);
+
+static int ipa_update_connections_info(struct device_node *node,
+			struct a2_mux_pipe_connection *pipe_connection);
+
+static void ipa_set_aggregation_params(void);
+
+static ssize_t ipa_read(struct file *filp, char __user *buf, size_t count,
+		loff_t *f_pos)
+{
+	u32 reg_val = 0xfeedface;
+	char str[IPA_READ_MAX];
+	int result;
+	static int read_cnt;
+
+	if (read_cnt) {
+		IPAERR("only supports one call to read\n");
+		return 0;
+	}
+
+	reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST);
+	result = scnprintf(str, IPA_READ_MAX, "%x\n", reg_val);
+	if (copy_to_user(buf, str, result))
+		return -EFAULT;
+	read_cnt = 1;
+
+	return result;
+}
+
+static int ipa_open(struct inode *inode, struct file *filp)
+{
+	struct ipa_context *ctx = NULL;
+
+	IPADBG("ENTER\n");
+	ctx = container_of(inode->i_cdev, struct ipa_context, cdev);
+	filp->private_data = ctx;
+
+	return 0;
+}
+
+static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int retval = 0;
+	u32 pyld_sz;
+	u8 header[128] = { 0 };
+	u8 *param = NULL;
+	struct ipa_ioc_nat_alloc_mem nat_mem;
+	struct ipa_ioc_v4_nat_init nat_init;
+	struct ipa_ioc_v4_nat_del nat_del;
+
+	IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
+
+	if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
+		return -ENOTTY;
+	if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
+		return -ENOTTY;
+
+	switch (cmd) {
+	case IPA_IOC_ALLOC_NAT_MEM:
+		if (copy_from_user((u8 *)&nat_mem, (u8 *)arg,
+					sizeof(struct ipa_ioc_nat_alloc_mem))) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (allocate_nat_device(&nat_mem)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, (u8 *)&nat_mem,
+					sizeof(struct ipa_ioc_nat_alloc_mem))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_V4_INIT_NAT:
+		if (copy_from_user((u8 *)&nat_init, (u8 *)arg,
+					sizeof(struct ipa_ioc_v4_nat_init))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_nat_init_cmd(&nat_init)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_NAT_DMA:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_nat_dma_cmd))) {
+			retval = -EFAULT;
+			break;
+		}
+
+		pyld_sz =
+		   sizeof(struct ipa_ioc_nat_dma_cmd) +
+		   ((struct ipa_ioc_nat_dma_cmd *)header)->entries *
+		   sizeof(struct ipa_ioc_nat_dma_one);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (ipa_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_V4_DEL_NAT:
+		if (copy_from_user((u8 *)&nat_del, (u8 *)arg,
+					sizeof(struct ipa_ioc_v4_nat_del))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_nat_del_cmd(&nat_del)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_add_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_hdr) +
+		   ((struct ipa_ioc_add_hdr *)header)->num_hdrs *
+		   sizeof(struct ipa_hdr_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_add_hdr((struct ipa_ioc_add_hdr *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_del_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_hdr) +
+		   ((struct ipa_ioc_del_hdr *)header)->num_hdls *
+		   sizeof(struct ipa_hdr_del);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_del_hdr((struct ipa_ioc_del_hdr *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_RT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_add_rt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_rt_rule) +
+		   ((struct ipa_ioc_add_rt_rule *)header)->num_rules *
+		   sizeof(struct ipa_rt_rule_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_RT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_del_rt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_rt_rule) +
+		   ((struct ipa_ioc_del_rt_rule *)header)->num_hdls *
+		   sizeof(struct ipa_rt_rule_del);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_FLT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_add_flt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_flt_rule) +
+		   ((struct ipa_ioc_add_flt_rule *)header)->num_rules *
+		   sizeof(struct ipa_flt_rule_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_FLT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_del_flt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_flt_rule) +
+		   ((struct ipa_ioc_del_flt_rule *)header)->num_hdls *
+		   sizeof(struct ipa_flt_rule_del);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_COMMIT_HDR:
+		retval = ipa_commit_hdr();
+		break;
+	case IPA_IOC_RESET_HDR:
+		retval = ipa_reset_hdr();
+		break;
+	case IPA_IOC_COMMIT_RT:
+		retval = ipa_commit_rt(arg);
+		break;
+	case IPA_IOC_RESET_RT:
+		retval = ipa_reset_rt(arg);
+		break;
+	case IPA_IOC_COMMIT_FLT:
+		retval = ipa_commit_flt(arg);
+		break;
+	case IPA_IOC_RESET_FLT:
+		retval = ipa_reset_flt(arg);
+		break;
+	case IPA_IOC_DUMP:
+		ipa_dump();
+		break;
+	case IPA_IOC_GET_RT_TBL:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_get_rt_tbl))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_get_rt_tbl))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_PUT_RT_TBL:
+		retval = ipa_put_rt_tbl(arg);
+		break;
+	case IPA_IOC_GET_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_get_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_get_hdr((struct ipa_ioc_get_hdr *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_get_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_PUT_HDR:
+		retval = ipa_put_hdr(arg);
+		break;
+	case IPA_IOC_SET_FLT:
+		retval = ipa_cfg_filter(arg);
+		break;
+	case IPA_IOC_COPY_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_copy_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_copy_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	default:        /* redundant, as cmd was checked against MAXNR */
+		return -ENOTTY;
+	}
+	kfree(param);
+
+	return retval;
+}
+
+/**
+* ipa_setup_dflt_rt_tables() - Setup default routing tables
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+int ipa_setup_dflt_rt_tables(void)
+{
+	struct ipa_ioc_add_rt_rule *rt_rule;
+	struct ipa_rt_rule_add *rt_rule_entry;
+
+	rt_rule =
+	   kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
+			   sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
+	if (!rt_rule) {
+		IPAERR("fail to alloc mem\n");
+		return -ENOMEM;
+	}
+	/* setup a default v4 route to point to A5 */
+	rt_rule->num_rules = 1;
+	rt_rule->commit = 1;
+	rt_rule->ip = IPA_IP_v4;
+	strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
+			IPA_RESOURCE_NAME_MAX);
+
+	rt_rule_entry = &rt_rule->rules[0];
+	rt_rule_entry->at_rear = 1;
+	rt_rule_entry->rule.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+	rt_rule_entry->rule.hdr_hdl = ipa_ctx->excp_hdr_hdl;
+
+	if (ipa_add_rt_rule(rt_rule)) {
+		IPAERR("fail to add dflt v4 rule\n");
+		kfree(rt_rule);
+		return -EPERM;
+	}
+	IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+	ipa_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+	/* setup a default v6 route to point to A5 */
+	rt_rule->ip = IPA_IP_v6;
+	if (ipa_add_rt_rule(rt_rule)) {
+		IPAERR("fail to add dflt v6 rule\n");
+		kfree(rt_rule);
+		return -EPERM;
+	}
+	IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+	ipa_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+	/*
+	 * because these tables are the very first to be added, they will both
+	 * have the same index (0) which is essential for programming the
+	 * "route" end-point config
+	 */
+
+	kfree(rt_rule);
+
+	return 0;
+}
+
+static int ipa_setup_exception_path(void)
+{
+	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_hdr_add *hdr_entry;
+	struct ipa_route route = { 0 };
+	int ret;
+
+	/* install the basic exception header */
+	hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
+		      sizeof(struct ipa_hdr_add), GFP_KERNEL);
+	if (!hdr) {
+		IPAERR("fail to alloc exception hdr\n");
+		return -ENOMEM;
+	}
+	hdr->num_hdrs = 1;
+	hdr->commit = 1;
+	hdr_entry = &hdr->hdr[0];
+	strlcpy(hdr_entry->name, IPA_DFLT_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+
+	/*
+	 * only single stream for MBIM supported and no exception packets
+	 * expected so set default header to zero
+	 */
+	hdr_entry->hdr_len = 1;
+	hdr_entry->hdr[0] = 0;
+
+	/*
+	 * SW does not know anything about default exception header so
+	 * we don't set it. IPA HW will use it as a template
+	 */
+	if (ipa_add_hdr(hdr)) {
+		IPAERR("fail to add exception hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	if (hdr_entry->status) {
+		IPAERR("fail to add exception hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ipa_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
+
+	/* exception packets goto LAN-WAN pipe from IPA to A5 */
+	route.route_def_pipe = IPA_A5_LAN_WAN_IN;
+	route.route_def_hdr_table = !ipa_ctx->hdr_tbl_lcl;
+
+	if (ipa_cfg_route(&route)) {
+		IPAERR("fail to add exception hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ret = 0;
+bail:
+	kfree(hdr);
+	return ret;
+}
+
+static void ipa_handle_tx_poll_for_pipe(struct ipa_sys_context *sys)
+{
+	struct ipa_tx_pkt_wrapper *tx_pkt, *t;
+	struct sps_iovec iov;
+	unsigned long irq_flags;
+	int ret;
+
+	while (1) {
+		iov.addr = 0;
+		ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+		if (ret) {
+			pr_err("%s: sps_get_iovec failed %d\n", __func__, ret);
+			break;
+		}
+		if (!iov.addr)
+			break;
+		spin_lock_irqsave(&sys->spinlock, irq_flags);
+		tx_pkt = list_first_entry(&sys->head_desc_list,
+					  struct ipa_tx_pkt_wrapper, link);
+		spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+		switch (tx_pkt->cnt) {
+		case 1:
+			ipa_write_done(&tx_pkt->work);
+			break;
+		case 0xFFFF:
+			/* reached end of set */
+			spin_lock_irqsave(&sys->spinlock, irq_flags);
+			list_for_each_entry_safe(tx_pkt, t,
+						 &sys->wait_desc_list, link) {
+				list_del(&tx_pkt->link);
+				list_add(&tx_pkt->link, &sys->head_desc_list);
+			}
+			tx_pkt =
+			   list_first_entry(&sys->head_desc_list,
+					    struct ipa_tx_pkt_wrapper, link);
+			spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+			ipa_write_done(&tx_pkt->work);
+			break;
+		default:
+			/* keep looping till reach the end of the set */
+			spin_lock_irqsave(&sys->spinlock,
+					  irq_flags);
+			list_del(&tx_pkt->link);
+			list_add_tail(&tx_pkt->link,
+				      &sys->wait_desc_list);
+			spin_unlock_irqrestore(&sys->spinlock,
+					       irq_flags);
+			break;
+		}
+	}
+}
+
+static void ipa_poll_function(struct work_struct *work)
+{
+	int ret;
+	int tx_pipes[] = { IPA_A5_CMD, IPA_A5_LAN_WAN_OUT,
+		IPA_A5_WLAN_AMPDU_OUT };
+	int i;
+	int num_tx_pipes;
+
+	/* check all the system pipes for tx completions and rx available */
+	if (ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep->valid)
+		ipa_handle_rx_core();
+
+	num_tx_pipes = sizeof(tx_pipes) / sizeof(tx_pipes[0]);
+
+	if (!IPA_MOBILE_AP_MODE(ipa_ctx->mode))
+		num_tx_pipes--;
+
+	for (i = 0; i < num_tx_pipes; i++)
+		if (ipa_ctx->sys[tx_pipes[i]].ep->valid)
+			ipa_handle_tx_poll_for_pipe(&ipa_ctx->sys[tx_pipes[i]]);
+
+	/* re-post the poll work */
+	INIT_DELAYED_WORK(&ipa_ctx->poll_work, ipa_poll_function);
+	ret = schedule_delayed_work_on(smp_processor_id(), &ipa_ctx->poll_work,
+			msecs_to_jiffies(polling_delay_ms));
+
+	return;
+}
+
+static int ipa_setup_a5_pipes(void)
+{
+	struct ipa_sys_connect_params sys_in;
+	int result = 0;
+
+	/* CMD OUT (A5->IPA) */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_A5_CMD_PROD;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+	if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_cmd)) {
+		IPAERR(":setup sys pipe failed.\n");
+		result = -EPERM;
+		goto fail;
+	}
+
+	if (ipa_setup_exception_path()) {
+		IPAERR(":fail to setup excp path\n");
+		result = -EPERM;
+		goto fail_cmd;
+	}
+
+	/* LAN-WAN IN (IPA->A5) */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_A5_LAN_WAN_CONS;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.ipa_ep_cfg.hdr.hdr_a5_mux = 1;
+	sys_in.ipa_ep_cfg.hdr.hdr_len = 8;  /* size of A5 exception hdr */
+	if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_in)) {
+		IPAERR(":setup sys pipe failed.\n");
+		result = -EPERM;
+		goto fail_cmd;
+	}
+	/* LAN-WAN OUT (A5->IPA) */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_A5_LAN_WAN_PROD;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+	if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_out)) {
+		IPAERR(":setup sys pipe failed.\n");
+		result = -EPERM;
+		goto fail_data_out;
+	}
+	if (ipa_ctx->polling_mode) {
+		INIT_DELAYED_WORK(&ipa_ctx->poll_work, ipa_poll_function);
+		result =
+		   schedule_delayed_work_on(smp_processor_id(),
+					&ipa_ctx->poll_work,
+					msecs_to_jiffies(polling_delay_ms));
+		if (!result) {
+			IPAERR(":schedule delayed work failed.\n");
+			goto fail_schedule_delayed_work;
+		}
+	}
+
+	return 0;
+
+fail_schedule_delayed_work:
+	ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_out);
+fail_data_out:
+	ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in);
+fail_cmd:
+	ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd);
+fail:
+	return result;
+}
+
+static void ipa_teardown_a5_pipes(void)
+{
+	cancel_delayed_work(&ipa_ctx->poll_work);
+	ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_out);
+	ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in);
+	ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd);
+}
+
+static int ipa_load_pipe_connection(struct platform_device *pdev,
+				    enum a2_mux_pipe_direction  pipe_dir,
+				    struct a2_mux_pipe_connection *pdata)
+{
+	struct device_node *node = pdev->dev.of_node;
+	int rc = 0;
+
+	if (!pdata || !pdev)
+		goto err;
+
+	/* retrieve device tree parameters */
+	for_each_child_of_node(pdev->dev.of_node, node)
+	{
+		const char *str;
+
+		rc = of_property_read_string(node, "label", &str);
+		if (rc) {
+			IPAERR("Cannot read string\n");
+			goto err;
+		}
+
+		/* Check if connection type is supported */
+		if (strncmp(str, "a2-to-ipa", 10)
+			&& strncmp(str, "ipa-to-a2", 10))
+			goto err;
+
+		if (strnstr(str, "a2-to-ipa", strnlen("a2-to-ipa", 10))
+				&& IPA_TO_A2 == pipe_dir)
+			continue; /* skip to the next pipe */
+		else if (strnstr(str, "ipa-to-a2", strnlen("ipa-to-a2", 10))
+				&& A2_TO_IPA == pipe_dir)
+			continue; /* skip to the next pipe */
+
+
+		rc = ipa_update_connections_info(node, pdata);
+		if (rc)
+			goto err;
+	}
+
+	return 0;
+err:
+	IPAERR("%s: failed\n", __func__);
+
+	return rc;
+}
+
+static int ipa_update_connections_info(struct device_node *node,
+		struct a2_mux_pipe_connection     *pipe_connection)
+{
+	u32      rc;
+	char     *key;
+	uint32_t val;
+	enum ipa_pipe_mem_type mem_type;
+
+	if (!pipe_connection || !node)
+		goto err;
+
+	key = "qcom,src-bam-physical-address";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->src_phy_addr = val;
+
+	key = "qcom,ipa-bam-mem-type";
+	rc = of_property_read_u32(node, key, &mem_type);
+	if (rc)
+		goto err;
+	pipe_connection->mem_type = mem_type;
+
+	key = "qcom,src-bam-pipe-index";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->src_pipe_index = val;
+
+	key = "qcom,dst-bam-physical-address";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->dst_phy_addr = val;
+
+	key = "qcom,dst-bam-pipe-index";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->dst_pipe_index = val;
+
+	key = "qcom,data-fifo-offset";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->data_fifo_base_offset = val;
+
+	key = "qcom,data-fifo-size";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->data_fifo_size = val;
+
+	key = "qcom,descriptor-fifo-offset";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->desc_fifo_base_offset = val;
+
+	key = "qcom,descriptor-fifo-size";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+
+	pipe_connection->desc_fifo_size = val;
+
+	return 0;
+err:
+	IPAERR("%s: Error in name %s key %s\n", __func__, node->full_name, key);
+
+	return rc;
+}
+
+/**
+* ipa_get_a2_mux_pipe_info() - Exposes A2 parameters fetched from DTS
+*
+* @pipe_dir: pipe direction
+* @pipe_connect: connect structure containing the parameters fetched from DTS
+*
+* Return codes:
+* 0: success
+* -EFAULT: invalid parameters
+*/
+int ipa_get_a2_mux_pipe_info(enum a2_mux_pipe_direction  pipe_dir,
+			     struct a2_mux_pipe_connection *pipe_connect)
+{
+	if (!pipe_connect) {
+		IPAERR("ipa_get_a2_mux_pipe_info switch null args\n");
+		return -EFAULT;
+	}
+
+	switch (pipe_dir) {
+	case A2_TO_IPA:
+		*pipe_connect = ipa_res.a2_to_ipa_pipe;
+		break;
+	case IPA_TO_A2:
+		*pipe_connect = ipa_res.ipa_to_a2_pipe;
+		break;
+	default:
+		IPAERR("ipa_get_a2_mux_pipe_info switch in default\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static void ipa_set_aggregation_params(void)
+{
+	struct ipa_ep_cfg_aggr agg_params;
+	u32 producer_hdl = 0;
+	u32 consumer_hdl = 0;
+
+	rmnet_bridge_get_client_handles(&producer_hdl, &consumer_hdl);
+
+	agg_params.aggr = ipa_ctx->aggregation_type;
+	agg_params.aggr_byte_limit = ipa_ctx->aggregation_byte_limit;
+	agg_params.aggr_time_limit = ipa_ctx->aggregation_time_limit;
+
+	/* configure aggregation on producer */
+	agg_params.aggr_en = IPA_ENABLE_AGGR;
+	ipa_cfg_ep_aggr(producer_hdl, &agg_params);
+
+	/* configure deaggregation on consumer */
+	agg_params.aggr_en = IPA_ENABLE_DEAGGR;
+	ipa_cfg_ep_aggr(consumer_hdl, &agg_params);
+
+}
+
+/*
+ * The following device attributes are for configuring the aggregation
+ * attributes when the driver is already running.
+ * The attributes are for configuring the aggregation type
+ * (MBIM_16/MBIM_32/TLP), the aggregation byte limit and the aggregation
+ * time limit.
+ */
+static ssize_t ipa_show_aggregation_type(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	ssize_t ret_val;
+	char str[IPA_AGGR_MAX_STR_LENGTH];
+
+	if (!buf) {
+		IPAERR("buffer for ipa_show_aggregation_type is NULL\n");
+		return -EINVAL;
+	}
+
+	memset(str, 0, sizeof(str));
+
+	switch (ipa_ctx->aggregation_type) {
+	case IPA_MBIM_16:
+		strlcpy(str, "MBIM_16", IPA_AGGR_STR_IN_BYTES("MBIM_16"));
+		break;
+	case IPA_MBIM_32:
+		strlcpy(str, "MBIM_32", IPA_AGGR_STR_IN_BYTES("MBIM_32"));
+		break;
+	case IPA_TLP:
+		strlcpy(str, "TLP", IPA_AGGR_STR_IN_BYTES("TLP"));
+		break;
+	default:
+		strlcpy(str, "NONE", IPA_AGGR_STR_IN_BYTES("NONE"));
+		break;
+	}
+
+	ret_val = scnprintf(buf, PAGE_SIZE, "%s\n", str);
+
+	return ret_val;
+}
+
+static ssize_t ipa_store_aggregation_type(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	char str[IPA_AGGR_MAX_STR_LENGTH], *pstr;
+
+	if (!buf) {
+		IPAERR("buffer for ipa_store_aggregation_type is NULL\n");
+		return -EINVAL;
+	}
+
+	strlcpy(str, buf, sizeof(str));
+	pstr = strim(str);
+
+	if (!strncmp(pstr, "MBIM_16", IPA_AGGR_STR_IN_BYTES("MBIM_16")))
+		ipa_ctx->aggregation_type = IPA_MBIM_16;
+	else if (!strncmp(pstr, "MBIM_32", IPA_AGGR_STR_IN_BYTES("MBIM_32")))
+		ipa_ctx->aggregation_type = IPA_MBIM_32;
+	else if (!strncmp(pstr, "TLP", IPA_AGGR_STR_IN_BYTES("TLP")))
+		ipa_ctx->aggregation_type = IPA_TLP;
+	else {
+		IPAERR("ipa_store_aggregation_type wrong input\n");
+		return -EINVAL;
+	}
+
+	ipa_set_aggregation_params();
+
+	return count;
+}
+
+static DEVICE_ATTR(aggregation_type, S_IWUSR | S_IRUSR,
+		ipa_show_aggregation_type,
+		ipa_store_aggregation_type);
+
+static ssize_t ipa_show_aggregation_byte_limit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	ssize_t ret_val;
+
+	if (!buf) {
+		IPAERR("buffer for ipa_show_aggregation_byte_limit is NULL\n");
+		return -EINVAL;
+	}
+
+	ret_val = scnprintf(buf, PAGE_SIZE, "%u\n",
+			    ipa_ctx->aggregation_byte_limit);
+
+	return ret_val;
+}
+
+static ssize_t ipa_store_aggregation_byte_limit(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	char str[IPA_AGGR_MAX_STR_LENGTH];
+	char *pstr;
+	u32 ret = 0;
+
+	if (!buf) {
+		IPAERR("buffer for ipa_store_aggregation_byte_limit is NULL\n");
+		return -EINVAL;
+	}
+
+	strlcpy(str, buf, sizeof(str));
+	pstr = strim(str);
+
+	if (kstrtouint(pstr, IPA_AGGR_MAX_STR_LENGTH, &ret)) {
+		IPAERR("ipa_store_aggregation_byte_limit wrong input\n");
+		return -EINVAL;
+	}
+
+	ipa_ctx->aggregation_byte_limit = ret;
+
+	ipa_set_aggregation_params();
+
+	return count;
+}
+
+static DEVICE_ATTR(aggregation_byte_limit, S_IWUSR | S_IRUSR,
+		ipa_show_aggregation_byte_limit,
+		ipa_store_aggregation_byte_limit);
+
+static ssize_t ipa_show_aggregation_time_limit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	ssize_t ret_val;
+
+	if (!buf) {
+		IPAERR("buffer for ipa_show_aggregation_time_limit is NULL\n");
+		return -EINVAL;
+	}
+
+	ret_val = scnprintf(buf,
+			    PAGE_SIZE,
+			    "%u\n",
+			    ipa_ctx->aggregation_time_limit);
+
+	return ret_val;
+}
+
+static ssize_t ipa_store_aggregation_time_limit(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	char str[IPA_AGGR_MAX_STR_LENGTH], *pstr;
+	u32 ret = 0;
+
+	if (!buf) {
+		IPAERR("buffer for ipa_store_aggregation_time_limit is NULL\n");
+		return -EINVAL;
+	}
+
+	strlcpy(str, buf, sizeof(str));
+	pstr = strim(str);
+
+	if (kstrtouint(pstr, IPA_AGGR_MAX_STR_LENGTH, &ret)) {
+		IPAERR("ipa_store_aggregation_time_limit wrong input\n");
+		return -EINVAL;
+	}
+
+	ipa_ctx->aggregation_time_limit = ret;
+
+	ipa_set_aggregation_params();
+
+	return count;
+}
+
+static DEVICE_ATTR(aggregation_time_limit, S_IWUSR | S_IRUSR,
+		ipa_show_aggregation_time_limit,
+		ipa_store_aggregation_time_limit);
+
+static const struct file_operations ipa_drv_fops = {
+	.owner = THIS_MODULE,
+	.open = ipa_open,
+	.read = ipa_read,
+	.unlocked_ioctl = ipa_ioctl,
+};
+
+static int ipa_get_clks(struct device *dev)
+{
+	ipa_cnoc_clk = clk_get(dev, "iface_clk");
+	if (IS_ERR(ipa_cnoc_clk)) {
+		ipa_cnoc_clk = NULL;
+		IPAERR("fail to get cnoc clk\n");
+		return -ENODEV;
+	}
+
+	ipa_clk_src = clk_get(dev, "core_src_clk");
+	if (IS_ERR(ipa_clk_src)) {
+		ipa_clk_src = NULL;
+		IPAERR("fail to get ipa clk src\n");
+		return -ENODEV;
+	}
+
+	ipa_clk = clk_get(dev, "core_clk");
+	if (IS_ERR(ipa_clk)) {
+		ipa_clk = NULL;
+		IPAERR("fail to get ipa clk\n");
+		return -ENODEV;
+	}
+
+	sys_noc_ipa_axi_clk = clk_get(dev, "bus_clk");
+	if (IS_ERR(sys_noc_ipa_axi_clk)) {
+		sys_noc_ipa_axi_clk = NULL;
+		IPAERR("fail to get sys_noc_ipa_axi clk\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+* ipa_enable_clks() - Turn on IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa_enable_clks(void)
+{
+	if (ipa_cnoc_clk) {
+		clk_prepare(ipa_cnoc_clk);
+		clk_enable(ipa_cnoc_clk);
+		clk_set_rate(ipa_cnoc_clk, IPA_CNOC_CLK_RATE);
+	} else {
+		WARN_ON(1);
+	}
+
+	if (ipa_clk_src)
+		clk_set_rate(ipa_clk_src, IPA_V1_CLK_RATE);
+	else
+		WARN_ON(1);
+
+	if (ipa_clk)
+		clk_prepare(ipa_clk);
+	else
+		WARN_ON(1);
+
+	if (sys_noc_ipa_axi_clk)
+		clk_prepare(sys_noc_ipa_axi_clk);
+	else
+		WARN_ON(1);
+
+	if (ipa_clk)
+		clk_enable(ipa_clk);
+	else
+		WARN_ON(1);
+
+	if (sys_noc_ipa_axi_clk)
+		clk_enable(sys_noc_ipa_axi_clk);
+	else
+		WARN_ON(1);
+}
+
+/**
+* ipa_disable_clks() - Turn off IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa_disable_clks(void)
+{
+	if (sys_noc_ipa_axi_clk)
+		clk_disable_unprepare(sys_noc_ipa_axi_clk);
+	else
+		WARN_ON(1);
+
+	if (ipa_clk)
+		clk_disable_unprepare(ipa_clk);
+	else
+		WARN_ON(1);
+
+	if (ipa_cnoc_clk)
+		clk_disable_unprepare(ipa_cnoc_clk);
+	else
+		WARN_ON(1);
+}
+
+static int ipa_setup_bam_cfg(const struct ipa_plat_drv_res *res)
+{
+	void *bam_cnfg_bits;
+
+	bam_cnfg_bits = ioremap(res->ipa_mem_base + IPA_BAM_REG_BASE_OFST,
+				IPA_BAM_REMAP_SIZE);
+	if (!bam_cnfg_bits)
+		return -ENOMEM;
+	ipa_write_reg(bam_cnfg_bits, IPA_BAM_CNFG_BITS_OFST,
+		      IPA_BAM_CNFG_BITS_VAL);
+	iounmap(bam_cnfg_bits);
+
+	return 0;
+}
+/**
+* ipa_init() - Initialize the IPA Driver
+*@resource_p:	contain platform specific values from DST file
+*
+* Function initialization process:
+* - Allocate memory for the driver context data struct
+* - Initializing the ipa_ctx with:
+*    1)parsed values from the dts file
+*    2)parameters passed to the module initialization
+*    3)read HW values(such as core memory size)
+* - Map IPA core registers to CPU memory
+* - Restart IPA core(HW reset)
+* - Register IPA BAM to SPS driver and get a BAM handler
+* - Set configuration for IPA BAM via BAM_CNFG_BITS
+* - Initialize the look-aside caches(kmem_cache/slab) for filter,
+*   routing and IPA-tree
+* - Create memory pool with 4 objects for DMA operations(each object
+*   is 512Bytes long), this object will be use for tx(A5->IPA)
+* - Initialize lists head(routing,filter,hdr,system pipes)
+* - Initialize mutexes (for ipa_ctx and NAT memory mutexes)
+* - Initialize spinlocks (for list related to A5<->IPA pipes)
+* - Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
+* - Initialize Red-Black-Tree(s) for handles of header,routing rule,
+*   routing table ,filtering rule
+* - Setup all A5<->IPA pipes by calling to ipa_setup_a5_pipes
+* - Preparing the descriptors for System pipes
+* - Initialize the filter block by committing IPV4 and IPV6 default rules
+* - Create empty routing table in system memory(no committing)
+* - Initialize pipes memory pool with ipa_pipe_mem_init for supported platforms
+* - Create a char-device for IPA
+*/
+static int ipa_init(const struct ipa_plat_drv_res *resource_p)
+{
+	int result = 0;
+	int i;
+	struct sps_bam_props bam_props = { 0 };
+	struct ipa_flt_tbl *flt_tbl;
+	struct ipa_rt_tbl_set *rset;
+
+	IPADBG("IPA init\n");
+
+	ipa_ctx = kzalloc(sizeof(*ipa_ctx), GFP_KERNEL);
+	if (!ipa_ctx) {
+		IPAERR(":kzalloc err.\n");
+		result = -ENOMEM;
+		goto fail_mem;
+	}
+
+	IPADBG("polling_mode=%u delay_ms=%u\n", polling_mode, polling_delay_ms);
+	ipa_ctx->polling_mode = polling_mode;
+	IPADBG("hdr_lcl=%u ip4_rt=%u ip6_rt=%u ip4_flt=%u ip6_flt=%u\n",
+	       hdr_tbl_lcl, ip4_rt_tbl_lcl, ip6_rt_tbl_lcl, ip4_flt_tbl_lcl,
+	       ip6_flt_tbl_lcl);
+	ipa_ctx->hdr_tbl_lcl = hdr_tbl_lcl;
+	ipa_ctx->ip4_rt_tbl_lcl = ip4_rt_tbl_lcl;
+	ipa_ctx->ip6_rt_tbl_lcl = ip6_rt_tbl_lcl;
+	ipa_ctx->ip4_flt_tbl_lcl = ip4_flt_tbl_lcl;
+	ipa_ctx->ip6_flt_tbl_lcl = ip6_flt_tbl_lcl;
+
+	ipa_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
+
+	/* setup IPA register access */
+	ipa_ctx->mmio = ioremap(resource_p->ipa_mem_base + IPA_REG_BASE_OFST,
+			resource_p->ipa_mem_size);
+	if (!ipa_ctx->mmio) {
+		IPAERR(":ipa-base ioremap err.\n");
+		result = -EFAULT;
+		goto fail_remap;
+	}
+	/* do POR programming to setup HW */
+	result = ipa_init_hw();
+	if (result) {
+		IPAERR(":error initializing driver.\n");
+		result = -ENODEV;
+		goto fail_init_hw;
+	}
+	/* read how much SRAM is available for SW use */
+	ipa_ctx->smem_sz = ipa_read_reg(ipa_ctx->mmio,
+			IPA_SHARED_MEM_SIZE_OFST);
+
+	if (IPA_RAM_END_OFST > ipa_ctx->smem_sz) {
+		IPAERR("SW expect more core memory, needed %d, avail %d\n",
+				IPA_RAM_END_OFST, ipa_ctx->smem_sz);
+		result = -ENOMEM;
+		goto fail_init_hw;
+	}
+	/* register IPA with SPS driver */
+	bam_props.phys_addr = resource_p->bam_mem_base;
+	bam_props.virt_addr = ioremap(resource_p->bam_mem_base,
+			resource_p->bam_mem_size);
+	if (!bam_props.virt_addr) {
+		IPAERR(":bam-base ioremap err.\n");
+		result = -EFAULT;
+		goto fail_bam_remap;
+	}
+	bam_props.virt_size = resource_p->bam_mem_size;
+	bam_props.irq = resource_p->bam_irq;
+	bam_props.num_pipes = IPA_NUM_PIPES;
+	bam_props.summing_threshold = IPA_SUMMING_THRESHOLD;
+	bam_props.event_threshold = IPA_EVENT_THRESHOLD;
+
+	result = sps_register_bam_device(&bam_props, &ipa_ctx->bam_handle);
+	if (result) {
+		IPAERR(":bam register err.\n");
+		result = -ENODEV;
+		goto fail_bam_register;
+	}
+
+	if (ipa_setup_bam_cfg(resource_p)) {
+		IPAERR(":bam cfg err.\n");
+		result = -ENODEV;
+		goto fail_flt_rule_cache;
+	}
+
+	/* set up the default op mode */
+	ipa_ctx->mode = IPA_MODE_USB_DONGLE;
+
+	/* init the lookaside cache */
+	ipa_ctx->flt_rule_cache = kmem_cache_create("IPA FLT",
+			sizeof(struct ipa_flt_entry), 0, 0, NULL);
+	if (!ipa_ctx->flt_rule_cache) {
+		IPAERR(":ipa flt cache create failed\n");
+		result = -ENOMEM;
+		goto fail_flt_rule_cache;
+	}
+	ipa_ctx->rt_rule_cache = kmem_cache_create("IPA RT",
+			sizeof(struct ipa_rt_entry), 0, 0, NULL);
+	if (!ipa_ctx->rt_rule_cache) {
+		IPAERR(":ipa rt cache create failed\n");
+		result = -ENOMEM;
+		goto fail_rt_rule_cache;
+	}
+	ipa_ctx->hdr_cache = kmem_cache_create("IPA HDR",
+			sizeof(struct ipa_hdr_entry), 0, 0, NULL);
+	if (!ipa_ctx->hdr_cache) {
+		IPAERR(":ipa hdr cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_cache;
+	}
+	ipa_ctx->hdr_offset_cache =
+	   kmem_cache_create("IPA HDR OFF", sizeof(struct ipa_hdr_offset_entry),
+			   0, 0, NULL);
+	if (!ipa_ctx->hdr_offset_cache) {
+		IPAERR(":ipa hdr off cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_offset_cache;
+	}
+	ipa_ctx->rt_tbl_cache = kmem_cache_create("IPA RT TBL",
+			sizeof(struct ipa_rt_tbl), 0, 0, NULL);
+	if (!ipa_ctx->rt_tbl_cache) {
+		IPAERR(":ipa rt tbl cache create failed\n");
+		result = -ENOMEM;
+		goto fail_rt_tbl_cache;
+	}
+	ipa_ctx->tx_pkt_wrapper_cache =
+	   kmem_cache_create("IPA TX PKT WRAPPER",
+			   sizeof(struct ipa_tx_pkt_wrapper), 0, 0, NULL);
+	if (!ipa_ctx->tx_pkt_wrapper_cache) {
+		IPAERR(":ipa tx pkt wrapper cache create failed\n");
+		result = -ENOMEM;
+		goto fail_tx_pkt_wrapper_cache;
+	}
+	ipa_ctx->rx_pkt_wrapper_cache =
+	   kmem_cache_create("IPA RX PKT WRAPPER",
+			   sizeof(struct ipa_rx_pkt_wrapper), 0, 0, NULL);
+	if (!ipa_ctx->rx_pkt_wrapper_cache) {
+		IPAERR(":ipa rx pkt wrapper cache create failed\n");
+		result = -ENOMEM;
+		goto fail_rx_pkt_wrapper_cache;
+	}
+	ipa_ctx->tree_node_cache =
+	   kmem_cache_create("IPA TREE", sizeof(struct ipa_tree_node), 0, 0,
+			   NULL);
+	if (!ipa_ctx->tree_node_cache) {
+		IPAERR(":ipa tree node cache create failed\n");
+		result = -ENOMEM;
+		goto fail_tree_node_cache;
+	}
+
+	/*
+	 * setup DMA pool 4 byte aligned, don't cross 1k boundaries, nominal
+	 * size 512 bytes
+	 */
+	ipa_ctx->one_kb_no_straddle_pool = dma_pool_create("ipa_1k", NULL,
+			IPA_DMA_POOL_SIZE, IPA_DMA_POOL_ALIGNMENT,
+			IPA_DMA_POOL_BOUNDARY);
+	if (!ipa_ctx->one_kb_no_straddle_pool) {
+		IPAERR("cannot setup 1kb alloc DMA pool.\n");
+		result = -ENOMEM;
+		goto fail_dma_pool;
+	}
+
+	ipa_ctx->glob_flt_tbl[IPA_IP_v4].in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
+	ipa_ctx->glob_flt_tbl[IPA_IP_v6].in_sys = !ipa_ctx->ip6_flt_tbl_lcl;
+
+	/* init the various list heads */
+	INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v4].head_flt_rule_list);
+	INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v6].head_flt_rule_list);
+	INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_hdr_entry_list);
+	for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+		INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_offset_list[i]);
+		INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_free_offset_list[i]);
+	}
+	INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
+	INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
+	for (i = 0; i < IPA_NUM_PIPES; i++) {
+		flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v4];
+		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+		flt_tbl->in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
+
+		flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v6];
+		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+		flt_tbl->in_sys = !ipa_ctx->ip6_flt_tbl_lcl;
+	}
+
+	rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v4];
+	INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+	rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v6];
+	INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+
+	mutex_init(&ipa_ctx->lock);
+	mutex_init(&ipa_ctx->nat_mem.lock);
+
+	for (i = 0; i < IPA_A5_SYS_MAX; i++) {
+		INIT_LIST_HEAD(&ipa_ctx->sys[i].head_desc_list);
+		spin_lock_init(&ipa_ctx->sys[i].spinlock);
+		if (i != IPA_A5_WLAN_AMPDU_OUT)
+			ipa_ctx->sys[i].ep = &ipa_ctx->ep[i];
+		else
+			ipa_ctx->sys[i].ep = &ipa_ctx->ep[WLAN_AMPDU_TX_EP];
+		INIT_LIST_HEAD(&ipa_ctx->sys[i].wait_desc_list);
+	}
+
+	ipa_ctx->rx_wq = create_singlethread_workqueue("ipa rx wq");
+	if (!ipa_ctx->rx_wq) {
+		IPAERR(":fail to create rx wq\n");
+		result = -ENOMEM;
+		goto fail_rx_wq;
+	}
+
+	ipa_ctx->tx_wq = create_singlethread_workqueue("ipa tx wq");
+	if (!ipa_ctx->tx_wq) {
+		IPAERR(":fail to create tx wq\n");
+		result = -ENOMEM;
+		goto fail_tx_wq;
+	}
+
+	ipa_ctx->hdr_hdl_tree = RB_ROOT;
+	ipa_ctx->rt_rule_hdl_tree = RB_ROOT;
+	ipa_ctx->rt_tbl_hdl_tree = RB_ROOT;
+	ipa_ctx->flt_rule_hdl_tree = RB_ROOT;
+
+	atomic_set(&ipa_ctx->ipa_active_clients, 0);
+
+	result = ipa_bridge_init();
+	if (result) {
+		IPAERR("ipa bridge init err.\n");
+		result = -ENODEV;
+		goto fail_bridge_init;
+	}
+
+	/* setup the A5-IPA pipes */
+	if (ipa_setup_a5_pipes()) {
+		IPAERR(":failed to setup IPA-A5 pipes.\n");
+		result = -ENODEV;
+		goto fail_a5_pipes;
+	}
+
+	ipa_replenish_rx_cache();
+
+	/* init the filtering block */
+	ipa_commit_flt(IPA_IP_v4);
+	ipa_commit_flt(IPA_IP_v6);
+
+	/*
+	 * setup an empty routing table in system memory, this will be used
+	 * to delete a routing table cleanly and safely
+	 */
+	ipa_ctx->empty_rt_tbl_mem.size = IPA_ROUTING_RULE_BYTE_SIZE;
+
+	ipa_ctx->empty_rt_tbl_mem.base =
+		dma_alloc_coherent(NULL, ipa_ctx->empty_rt_tbl_mem.size,
+				    &ipa_ctx->empty_rt_tbl_mem.phys_base,
+				    GFP_KERNEL);
+	if (!ipa_ctx->empty_rt_tbl_mem.base) {
+		IPAERR("DMA buff alloc fail %d bytes for empty routing tbl\n",
+				ipa_ctx->empty_rt_tbl_mem.size);
+		result = -ENOMEM;
+		goto fail_empty_rt_tbl;
+	}
+	memset(ipa_ctx->empty_rt_tbl_mem.base, 0,
+			ipa_ctx->empty_rt_tbl_mem.size);
+
+	/* setup the IPA pipe mem pool */
+	ipa_pipe_mem_init(resource_p->ipa_pipe_mem_start_ofst,
+			resource_p->ipa_pipe_mem_size);
+
+	ipa_ctx->class = class_create(THIS_MODULE, DRV_NAME);
+
+	result = alloc_chrdev_region(&ipa_ctx->dev_num, 0, 1, DRV_NAME);
+	if (result) {
+		IPAERR("alloc_chrdev_region err.\n");
+		result = -ENODEV;
+		goto fail_alloc_chrdev_region;
+	}
+
+	ipa_ctx->dev = device_create(ipa_ctx->class, NULL, ipa_ctx->dev_num,
+			ipa_ctx, DRV_NAME);
+	if (IS_ERR(ipa_ctx->dev)) {
+		IPAERR(":device_create err.\n");
+		result = -ENODEV;
+		goto fail_device_create;
+	}
+
+	cdev_init(&ipa_ctx->cdev, &ipa_drv_fops);
+	ipa_ctx->cdev.owner = THIS_MODULE;
+	ipa_ctx->cdev.ops = &ipa_drv_fops;  /* from LDD3 */
+
+	result = cdev_add(&ipa_ctx->cdev, ipa_ctx->dev_num, 1);
+	if (result) {
+		IPAERR(":cdev_add err=%d\n", -result);
+		result = -ENODEV;
+		goto fail_cdev_add;
+	}
+
+	/* default aggregation parameters */
+	ipa_ctx->aggregation_type = IPA_MBIM_16;
+	ipa_ctx->aggregation_byte_limit = 1;
+	ipa_ctx->aggregation_time_limit = 0;
+	IPADBG(":IPA driver init OK.\n");
+
+	/* gate IPA clocks */
+	ipa_disable_clks();
+
+	return 0;
+
+fail_cdev_add:
+	device_destroy(ipa_ctx->class, ipa_ctx->dev_num);
+fail_device_create:
+	unregister_chrdev_region(ipa_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+	if (ipa_ctx->pipe_mem_pool)
+		gen_pool_destroy(ipa_ctx->pipe_mem_pool);
+	dma_free_coherent(NULL,
+			  ipa_ctx->empty_rt_tbl_mem.size,
+			  ipa_ctx->empty_rt_tbl_mem.base,
+			  ipa_ctx->empty_rt_tbl_mem.phys_base);
+fail_empty_rt_tbl:
+	ipa_cleanup_rx();
+	ipa_teardown_a5_pipes();
+fail_a5_pipes:
+	ipa_bridge_cleanup();
+fail_bridge_init:
+	destroy_workqueue(ipa_ctx->tx_wq);
+fail_tx_wq:
+	destroy_workqueue(ipa_ctx->rx_wq);
+fail_rx_wq:
+	dma_pool_destroy(ipa_ctx->one_kb_no_straddle_pool);
+fail_dma_pool:
+	kmem_cache_destroy(ipa_ctx->tree_node_cache);
+fail_tree_node_cache:
+	kmem_cache_destroy(ipa_ctx->rx_pkt_wrapper_cache);
+fail_rx_pkt_wrapper_cache:
+	kmem_cache_destroy(ipa_ctx->tx_pkt_wrapper_cache);
+fail_tx_pkt_wrapper_cache:
+	kmem_cache_destroy(ipa_ctx->rt_tbl_cache);
+fail_rt_tbl_cache:
+	kmem_cache_destroy(ipa_ctx->hdr_offset_cache);
+fail_hdr_offset_cache:
+	kmem_cache_destroy(ipa_ctx->hdr_cache);
+fail_hdr_cache:
+	kmem_cache_destroy(ipa_ctx->rt_rule_cache);
+fail_rt_rule_cache:
+	kmem_cache_destroy(ipa_ctx->flt_rule_cache);
+fail_flt_rule_cache:
+	sps_deregister_bam_device(ipa_ctx->bam_handle);
+fail_bam_register:
+	iounmap(bam_props.virt_addr);
+fail_bam_remap:
+fail_init_hw:
+	iounmap(ipa_ctx->mmio);
+fail_remap:
+	kfree(ipa_ctx);
+	ipa_ctx = NULL;
+fail_mem:
+	/* gate IPA clocks */
+	ipa_disable_clks();
+	return result;
+}
+
+static int ipa_plat_drv_probe(struct platform_device *pdev_p)
+{
+	int result = 0;
+	struct resource *resource_p;
+	IPADBG("IPA plat drv probe\n");
+
+	/* initialize ipa_res */
+	ipa_res.ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
+	ipa_res.ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
+
+	result = ipa_load_pipe_connection(pdev_p,
+					A2_TO_IPA,
+					&ipa_res.a2_to_ipa_pipe);
+	if (0 != result)
+		IPAERR(":ipa_load_pipe_connection failed!\n");
+
+	result = ipa_load_pipe_connection(pdev_p, IPA_TO_A2,
+					  &ipa_res.ipa_to_a2_pipe);
+	if (0 != result)
+		IPAERR(":ipa_load_pipe_connection failed!\n");
+
+	/* Get IPA wrapper address */
+	resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
+			"ipa-base");
+
+	if (!resource_p) {
+		IPAERR(":get resource failed for ipa-base!\n");
+		return -ENODEV;
+	} else {
+		ipa_res.ipa_mem_base = resource_p->start;
+		ipa_res.ipa_mem_size = resource_size(resource_p);
+	}
+
+	/* Get IPA BAM address */
+	resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
+			"bam-base");
+
+	if (!resource_p) {
+		IPAERR(":get resource failed for bam-base!\n");
+		return -ENODEV;
+	} else {
+		ipa_res.bam_mem_base = resource_p->start;
+		ipa_res.bam_mem_size = resource_size(resource_p);
+	}
+
+	/* Get IPA pipe mem start ofst */
+	resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
+			"ipa-pipe-mem");
+
+	if (!resource_p) {
+		IPADBG(":get resource failed for ipa-pipe-mem\n");
+	} else {
+		ipa_res.ipa_pipe_mem_start_ofst = resource_p->start;
+		ipa_res.ipa_pipe_mem_size = resource_size(resource_p);
+	}
+
+	/* Get IPA IRQ number */
+	resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_IRQ,
+			"ipa-irq");
+
+	if (!resource_p) {
+		IPAERR(":get resource failed for ipa-irq!\n");
+		return -ENODEV;
+	} else {
+		ipa_res.ipa_irq = resource_p->start;
+	}
+
+	/* Get IPA BAM IRQ number */
+	resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_IRQ,
+			"bam-irq");
+
+	if (!resource_p) {
+		IPAERR(":get resource failed for bam-irq!\n");
+		return -ENODEV;
+	} else {
+		ipa_res.bam_irq = resource_p->start;
+	}
+
+	IPADBG(":ipa_mem_base = 0x%x, ipa_mem_size = 0x%x\n",
+	       ipa_res.ipa_mem_base, ipa_res.ipa_mem_size);
+	IPADBG(":bam_mem_base = 0x%x, bam_mem_size = 0x%x\n",
+	       ipa_res.bam_mem_base, ipa_res.bam_mem_size);
+	IPADBG(":pipe_mem_start_ofst = 0x%x, pipe_mem_size = 0x%x\n",
+	       ipa_res.ipa_pipe_mem_start_ofst, ipa_res.ipa_pipe_mem_size);
+
+	IPADBG(":ipa_irq = %d\n", ipa_res.ipa_irq);
+	IPADBG(":bam_irq = %d\n", ipa_res.bam_irq);
+
+	/* stash the IPA dev ptr */
+	ipa_dev = &pdev_p->dev;
+
+	/* get IPA clocks */
+	if (ipa_get_clks(ipa_dev) != 0)
+		return -ENODEV;
+
+	/* enable IPA clocks */
+	ipa_enable_clks();
+
+	/* Proceed to real initialization */
+	result = ipa_init(&ipa_res);
+	if (result)
+		IPAERR("ipa_init failed\n");
+
+	result = device_create_file(&pdev_p->dev,
+			&dev_attr_aggregation_type);
+	if (result)
+		IPAERR("failed to create device file\n");
+
+	result = device_create_file(&pdev_p->dev,
+			&dev_attr_aggregation_byte_limit);
+	if (result)
+		IPAERR("failed to create device file\n");
+
+	result = device_create_file(&pdev_p->dev,
+			&dev_attr_aggregation_time_limit);
+	if (result)
+		IPAERR("failed to create device file\n");
+
+	return result;
+}
+
+static struct platform_driver ipa_plat_drv = {
+	.probe = ipa_plat_drv_probe,
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = ipa_plat_drv_match,
+	},
+};
+
+static int ipa_plat_drv_init(void)
+{
+	return platform_driver_register(&ipa_plat_drv);
+}
+
+struct ipa_context *ipa_get_ctx(void)
+{
+	return ipa_ctx;
+}
+
+static int __init ipa_module_init(void)
+{
+	int result = 0;
+
+	IPADBG("IPA module init\n");
+	ipa_debugfs_init();
+	/* Register as a platform device driver */
+	result = ipa_plat_drv_init();
+
+	return result;
+}
+
+late_initcall(ipa_module_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA HW device driver");
+
diff --git a/drivers/platform/msm/ipa/ipa_bridge.c b/drivers/platform/msm/ipa/ipa_bridge.c
new file mode 100644
index 0000000..cf51ab6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_bridge.c
@@ -0,0 +1,789 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/ratelimit.h>
+#include "ipa_i.h"
+
+enum ipa_bridge_id {
+	IPA_DL_FROM_A2,
+	IPA_DL_TO_IPA,
+	IPA_UL_FROM_IPA,
+	IPA_UL_TO_A2,
+	IPA_BRIDGE_ID_MAX
+};
+
+static int polling_min_sleep[IPA_DIR_MAX] = { 950, 950 };
+static int polling_max_sleep[IPA_DIR_MAX] = { 1050, 1050 };
+static int polling_inactivity[IPA_DIR_MAX] = { 20, 20 };
+
+struct ipa_pkt_info {
+	void *buffer;
+	dma_addr_t dma_address;
+	uint32_t len;
+	struct list_head list_node;
+};
+
+struct ipa_bridge_pipe_context {
+	struct list_head head_desc_list;
+	struct sps_pipe *pipe;
+	struct sps_connect connection;
+	struct sps_mem_buffer desc_mem_buf;
+	struct sps_register_event register_event;
+	spinlock_t spinlock;
+	u32 len;
+	u32 free_len;
+	struct list_head free_desc_list;
+};
+
+static struct ipa_bridge_pipe_context bridge[IPA_BRIDGE_ID_MAX];
+
+static struct workqueue_struct *ipa_ul_workqueue;
+static struct workqueue_struct *ipa_dl_workqueue;
+static void ipa_do_bridge_work(enum ipa_bridge_dir dir);
+
+static u32 alloc_cnt[IPA_DIR_MAX];
+
+static void ul_work_func(struct work_struct *work)
+{
+	ipa_do_bridge_work(IPA_UL);
+}
+
+static void dl_work_func(struct work_struct *work)
+{
+	ipa_do_bridge_work(IPA_DL);
+}
+
+static DECLARE_WORK(ul_work, ul_work_func);
+static DECLARE_WORK(dl_work, dl_work_func);
+
+static int ipa_switch_to_intr_mode(enum ipa_bridge_dir dir)
+{
+	int ret;
+	struct ipa_bridge_pipe_context *sys = &bridge[2 * dir];
+
+	ret = sps_get_config(sys->pipe, &sys->connection);
+	if (ret) {
+		IPAERR("sps_get_config() failed %d\n", ret);
+		goto fail;
+	}
+	sys->register_event.options = SPS_O_EOT;
+	ret = sps_register_event(sys->pipe, &sys->register_event);
+	if (ret) {
+		IPAERR("sps_register_event() failed %d\n", ret);
+		goto fail;
+	}
+	sys->connection.options =
+	   SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+	ret = sps_set_config(sys->pipe, &sys->connection);
+	if (ret) {
+		IPAERR("sps_set_config() failed %d\n", ret);
+		goto fail;
+	}
+	ret = 0;
+fail:
+	return ret;
+}
+
+static int ipa_switch_to_poll_mode(enum ipa_bridge_dir dir)
+{
+	int ret;
+	struct ipa_bridge_pipe_context *sys = &bridge[2 * dir];
+
+	ret = sps_get_config(sys->pipe, &sys->connection);
+	if (ret) {
+		IPAERR("sps_get_config() failed %d\n", ret);
+		goto fail;
+	}
+	sys->connection.options =
+	   SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+	ret = sps_set_config(sys->pipe, &sys->connection);
+	if (ret) {
+		IPAERR("sps_set_config() failed %d\n", ret);
+		goto fail;
+	}
+	ret = 0;
+fail:
+	return ret;
+}
+
+static int queue_rx_single(enum ipa_bridge_dir dir)
+{
+	struct ipa_bridge_pipe_context *sys_rx = &bridge[2 * dir];
+	struct ipa_pkt_info *info;
+	int ret;
+
+	info = kmalloc(sizeof(struct ipa_pkt_info), GFP_KERNEL);
+	if (!info) {
+		IPAERR("unable to alloc rx_pkt_info\n");
+		goto fail_pkt;
+	}
+
+	info->buffer = kmalloc(IPA_RX_SKB_SIZE, GFP_KERNEL | GFP_DMA);
+	if (!info->buffer) {
+		IPAERR("unable to alloc rx_pkt_buffer\n");
+		goto fail_buffer;
+	}
+
+	info->dma_address = dma_map_single(NULL, info->buffer, IPA_RX_SKB_SIZE,
+					   DMA_BIDIRECTIONAL);
+	if (info->dma_address == 0 || info->dma_address == ~0) {
+		IPAERR("dma_map_single failure %p for %p\n",
+				(void *)info->dma_address, info->buffer);
+		goto fail_dma;
+	}
+
+	info->len = ~0;
+
+	list_add_tail(&info->list_node, &sys_rx->head_desc_list);
+	ret = sps_transfer_one(sys_rx->pipe, info->dma_address,
+			       IPA_RX_SKB_SIZE, info,
+			       SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
+	if (ret) {
+		list_del(&info->list_node);
+		dma_unmap_single(NULL, info->dma_address, IPA_RX_SKB_SIZE,
+				 DMA_BIDIRECTIONAL);
+		IPAERR("sps_transfer_one failed %d\n", ret);
+		goto fail_dma;
+	}
+	sys_rx->len++;
+	return 0;
+
+fail_dma:
+	kfree(info->buffer);
+fail_buffer:
+	kfree(info);
+fail_pkt:
+	IPAERR("failed\n");
+	return -ENOMEM;
+}
+
+static void ipa_do_bridge_work(enum ipa_bridge_dir dir)
+{
+	struct ipa_bridge_pipe_context *sys_rx = &bridge[2 * dir];
+	struct ipa_bridge_pipe_context *sys_tx = &bridge[2 * dir + 1];
+	struct ipa_pkt_info *tx_pkt;
+	struct ipa_pkt_info *rx_pkt;
+	struct ipa_pkt_info *tmp_pkt;
+	struct sps_iovec iov;
+	int ret;
+	int inactive_cycles = 0;
+
+	while (1) {
+		++inactive_cycles;
+		iov.addr = 0;
+		ret = sps_get_iovec(sys_tx->pipe, &iov);
+		if (ret || iov.addr == 0) {
+			/* no-op */
+		} else {
+			inactive_cycles = 0;
+
+			tx_pkt = list_first_entry(&sys_tx->head_desc_list,
+						  struct ipa_pkt_info,
+						  list_node);
+			list_move_tail(&tx_pkt->list_node,
+					&sys_tx->free_desc_list);
+			sys_tx->len--;
+			sys_tx->free_len++;
+			tx_pkt->len = ~0;
+		}
+
+		iov.addr = 0;
+		ret = sps_get_iovec(sys_rx->pipe, &iov);
+		if (ret || iov.addr == 0) {
+			/* no-op */
+		} else {
+			inactive_cycles = 0;
+
+			rx_pkt = list_first_entry(&sys_rx->head_desc_list,
+						  struct ipa_pkt_info,
+						  list_node);
+			list_del(&rx_pkt->list_node);
+			sys_rx->len--;
+			rx_pkt->len = iov.size;
+
+retry_alloc_tx:
+			if (list_empty(&sys_tx->free_desc_list)) {
+				tmp_pkt = kmalloc(sizeof(struct ipa_pkt_info),
+						GFP_KERNEL);
+				if (!tmp_pkt) {
+					pr_err_ratelimited("%s: unable to alloc tx_pkt_info\n",
+					       __func__);
+					usleep_range(polling_min_sleep[dir],
+							polling_max_sleep[dir]);
+					goto retry_alloc_tx;
+				}
+
+				tmp_pkt->buffer = kmalloc(IPA_RX_SKB_SIZE,
+						GFP_KERNEL | GFP_DMA);
+				if (!tmp_pkt->buffer) {
+					pr_err_ratelimited("%s: unable to alloc tx_pkt_buffer\n",
+					       __func__);
+					kfree(tmp_pkt);
+					usleep_range(polling_min_sleep[dir],
+							polling_max_sleep[dir]);
+					goto retry_alloc_tx;
+				}
+
+				tmp_pkt->dma_address = dma_map_single(NULL,
+						tmp_pkt->buffer,
+						IPA_RX_SKB_SIZE,
+						DMA_BIDIRECTIONAL);
+				if (tmp_pkt->dma_address == 0 ||
+						tmp_pkt->dma_address == ~0) {
+					pr_err_ratelimited("%s: dma_map_single failure %p for %p\n",
+					       __func__,
+					       (void *)tmp_pkt->dma_address,
+					       tmp_pkt->buffer);
+				}
+
+				list_add_tail(&tmp_pkt->list_node,
+						&sys_tx->free_desc_list);
+				sys_tx->free_len++;
+				alloc_cnt[dir]++;
+
+				tmp_pkt->len = ~0;
+			}
+
+			tx_pkt = list_first_entry(&sys_tx->free_desc_list,
+						  struct ipa_pkt_info,
+						  list_node);
+			list_del(&tx_pkt->list_node);
+			sys_tx->free_len--;
+
+retry_add_rx:
+			list_add_tail(&tx_pkt->list_node,
+					&sys_rx->head_desc_list);
+			ret = sps_transfer_one(sys_rx->pipe,
+					tx_pkt->dma_address,
+					IPA_RX_SKB_SIZE,
+					tx_pkt,
+					SPS_IOVEC_FLAG_INT |
+					SPS_IOVEC_FLAG_EOT);
+			if (ret) {
+				list_del(&tx_pkt->list_node);
+				pr_err_ratelimited("%s: sps_transfer_one failed %d\n",
+						__func__, ret);
+				usleep_range(polling_min_sleep[dir],
+						polling_max_sleep[dir]);
+				goto retry_add_rx;
+			}
+			sys_rx->len++;
+
+retry_add_tx:
+			list_add_tail(&rx_pkt->list_node,
+					&sys_tx->head_desc_list);
+			ret = sps_transfer_one(sys_tx->pipe,
+					       rx_pkt->dma_address,
+					       iov.size,
+					       rx_pkt,
+					       SPS_IOVEC_FLAG_INT |
+					       SPS_IOVEC_FLAG_EOT);
+			if (ret) {
+				pr_err_ratelimited("%s: fail to add to TX dir=%d\n",
+						__func__, dir);
+				list_del(&rx_pkt->list_node);
+				usleep_range(polling_min_sleep[dir],
+						polling_max_sleep[dir]);
+				goto retry_add_tx;
+			}
+			sys_tx->len++;
+		}
+
+		if (inactive_cycles >= polling_inactivity[dir]) {
+			ipa_switch_to_intr_mode(dir);
+			break;
+		}
+	}
+}
+
+static void ipa_rx_notify(struct sps_event_notify *notify)
+{
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		ipa_switch_to_poll_mode(IPA_UL);
+		queue_work(ipa_ul_workqueue, &ul_work);
+		break;
+	default:
+		IPAERR("recieved unexpected event id %d\n", notify->event_id);
+	}
+}
+
+static int setup_bridge_to_ipa(enum ipa_bridge_dir dir)
+{
+	struct ipa_bridge_pipe_context *sys;
+	struct ipa_ep_cfg_mode mode;
+	dma_addr_t dma_addr;
+	int ipa_ep_idx;
+	int ret;
+	int i;
+
+	if (dir == IPA_DL) {
+		ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode,
+				IPA_CLIENT_A2_TETHERED_PROD);
+		if (ipa_ep_idx == -1) {
+			IPAERR("Invalid client.\n");
+			ret = -EINVAL;
+			goto tx_alloc_endpoint_failed;
+		}
+
+		sys = &bridge[IPA_DL_TO_IPA];
+		sys->pipe = sps_alloc_endpoint();
+		if (sys->pipe == NULL) {
+			IPAERR("tx alloc endpoint failed\n");
+			ret = -ENOMEM;
+			goto tx_alloc_endpoint_failed;
+		}
+		ret = sps_get_config(sys->pipe, &sys->connection);
+		if (ret) {
+			IPAERR("tx get config failed %d\n", ret);
+			goto tx_get_config_failed;
+		}
+
+		sys->connection.source = SPS_DEV_HANDLE_MEM;
+		sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++;
+		sys->connection.destination = ipa_ctx->bam_handle;
+		sys->connection.dest_pipe_index = ipa_ep_idx;
+		sys->connection.mode = SPS_MODE_DEST;
+		sys->connection.options =
+		   SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+		sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+		sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+				sys->desc_mem_buf.size,
+				&dma_addr,
+				0);
+		if (sys->desc_mem_buf.base == NULL) {
+			IPAERR("tx memory alloc failed\n");
+			ret = -ENOMEM;
+			goto tx_get_config_failed;
+		}
+		sys->desc_mem_buf.phys_base = dma_addr;
+		memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+		sys->connection.desc = sys->desc_mem_buf;
+		sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+		ret = sps_connect(sys->pipe, &sys->connection);
+		if (ret < 0) {
+			IPAERR("tx connect error %d\n", ret);
+			goto tx_connect_failed;
+		}
+
+		INIT_LIST_HEAD(&sys->head_desc_list);
+		INIT_LIST_HEAD(&sys->free_desc_list);
+		spin_lock_init(&sys->spinlock);
+
+		ipa_ctx->ep[ipa_ep_idx].valid = 1;
+
+		mode.mode = IPA_DMA;
+		mode.dst = IPA_CLIENT_USB_CONS;
+		ret = ipa_cfg_ep_mode(ipa_ep_idx, &mode);
+		if (ret < 0) {
+			IPAERR("DMA mode set error %d\n", ret);
+			goto tx_mode_set_failed;
+		}
+
+		return 0;
+
+tx_mode_set_failed:
+		sps_disconnect(sys->pipe);
+tx_connect_failed:
+		dma_free_coherent(NULL, sys->desc_mem_buf.size,
+				sys->desc_mem_buf.base,
+				sys->desc_mem_buf.phys_base);
+tx_get_config_failed:
+		sps_free_endpoint(sys->pipe);
+tx_alloc_endpoint_failed:
+		return ret;
+	} else {
+
+		ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode,
+				IPA_CLIENT_A2_TETHERED_CONS);
+		if (ipa_ep_idx == -1) {
+			IPAERR("Invalid client.\n");
+			ret = -EINVAL;
+			goto rx_alloc_endpoint_failed;
+		}
+
+		sys = &bridge[IPA_UL_FROM_IPA];
+		sys->pipe = sps_alloc_endpoint();
+		if (sys->pipe == NULL) {
+			IPAERR("rx alloc endpoint failed\n");
+			ret = -ENOMEM;
+			goto rx_alloc_endpoint_failed;
+		}
+		ret = sps_get_config(sys->pipe, &sys->connection);
+		if (ret) {
+			IPAERR("rx get config failed %d\n", ret);
+			goto rx_get_config_failed;
+		}
+
+		sys->connection.source = ipa_ctx->bam_handle;
+		sys->connection.src_pipe_index = 7;
+		sys->connection.destination = SPS_DEV_HANDLE_MEM;
+		sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++;
+		sys->connection.mode = SPS_MODE_SRC;
+		sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
+		      SPS_O_ACK_TRANSFERS;
+		sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+		sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+				sys->desc_mem_buf.size,
+				&dma_addr,
+				0);
+		if (sys->desc_mem_buf.base == NULL) {
+			IPAERR("rx memory alloc failed\n");
+			ret = -ENOMEM;
+			goto rx_get_config_failed;
+		}
+		sys->desc_mem_buf.phys_base = dma_addr;
+		memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+		sys->connection.desc = sys->desc_mem_buf;
+		sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+		ret = sps_connect(sys->pipe, &sys->connection);
+		if (ret < 0) {
+			IPAERR("rx connect error %d\n", ret);
+			goto rx_connect_failed;
+		}
+
+		sys->register_event.options = SPS_O_EOT;
+		sys->register_event.mode = SPS_TRIGGER_CALLBACK;
+		sys->register_event.xfer_done = NULL;
+		sys->register_event.callback = ipa_rx_notify;
+		sys->register_event.user = NULL;
+		ret = sps_register_event(sys->pipe, &sys->register_event);
+		if (ret < 0) {
+			IPAERR("tx register event error %d\n", ret);
+			goto rx_event_reg_failed;
+		}
+
+		INIT_LIST_HEAD(&sys->head_desc_list);
+		INIT_LIST_HEAD(&sys->free_desc_list);
+		spin_lock_init(&sys->spinlock);
+
+		for (i = 0; i < IPA_RX_POOL_CEIL; i++) {
+			ret = queue_rx_single(dir);
+			if (ret < 0)
+				IPAERR("queue fail %d %d\n", dir, i);
+		}
+
+		return 0;
+
+rx_event_reg_failed:
+		sps_disconnect(sys->pipe);
+rx_connect_failed:
+		dma_free_coherent(NULL,
+				sys->desc_mem_buf.size,
+				sys->desc_mem_buf.base,
+				sys->desc_mem_buf.phys_base);
+rx_get_config_failed:
+		sps_free_endpoint(sys->pipe);
+rx_alloc_endpoint_failed:
+		return ret;
+	}
+}
+
+static void bam_mux_rx_notify(struct sps_event_notify *notify)
+{
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		ipa_switch_to_poll_mode(IPA_DL);
+		queue_work(ipa_dl_workqueue, &dl_work);
+		break;
+	default:
+		IPAERR("recieved unexpected event id %d\n", notify->event_id);
+	}
+}
+
+static int setup_bridge_to_a2(enum ipa_bridge_dir dir)
+{
+	struct ipa_bridge_pipe_context *sys;
+	struct a2_mux_pipe_connection pipe_conn = { 0, };
+	dma_addr_t dma_addr;
+	u32 a2_handle;
+	int ret;
+	int i;
+
+	if (dir == IPA_UL) {
+		ret = ipa_get_a2_mux_pipe_info(IPA_TO_A2, &pipe_conn);
+		if (ret) {
+			IPAERR("ipa_get_a2_mux_pipe_info failed IPA_TO_A2\n");
+			goto tx_alloc_endpoint_failed;
+		}
+
+		ret = sps_phy2h(pipe_conn.dst_phy_addr, &a2_handle);
+		if (ret) {
+			IPAERR("sps_phy2h failed (A2 BAM) %d\n", ret);
+			goto tx_alloc_endpoint_failed;
+		}
+
+		sys = &bridge[IPA_UL_TO_A2];
+		sys->pipe = sps_alloc_endpoint();
+		if (sys->pipe == NULL) {
+			IPAERR("tx alloc endpoint failed\n");
+			ret = -ENOMEM;
+			goto tx_alloc_endpoint_failed;
+		}
+		ret = sps_get_config(sys->pipe, &sys->connection);
+		if (ret) {
+			IPAERR("tx get config failed %d\n", ret);
+			goto tx_get_config_failed;
+		}
+
+		sys->connection.source = SPS_DEV_HANDLE_MEM;
+		sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++;
+		sys->connection.destination = a2_handle;
+		sys->connection.dest_pipe_index = pipe_conn.dst_pipe_index;
+		sys->connection.mode = SPS_MODE_DEST;
+		sys->connection.options =
+		   SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+		sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+		sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+				sys->desc_mem_buf.size,
+				&dma_addr,
+				0);
+		if (sys->desc_mem_buf.base == NULL) {
+			IPAERR("tx memory alloc failed\n");
+			ret = -ENOMEM;
+			goto tx_get_config_failed;
+		}
+		sys->desc_mem_buf.phys_base = dma_addr;
+		memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+		sys->connection.desc = sys->desc_mem_buf;
+		sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+		ret = sps_connect(sys->pipe, &sys->connection);
+		if (ret < 0) {
+			IPAERR("tx connect error %d\n", ret);
+			goto tx_connect_failed;
+		}
+
+		INIT_LIST_HEAD(&sys->head_desc_list);
+		INIT_LIST_HEAD(&sys->free_desc_list);
+		spin_lock_init(&sys->spinlock);
+
+		return 0;
+
+tx_connect_failed:
+		dma_free_coherent(NULL,
+				sys->desc_mem_buf.size,
+				sys->desc_mem_buf.base,
+				sys->desc_mem_buf.phys_base);
+tx_get_config_failed:
+		sps_free_endpoint(sys->pipe);
+tx_alloc_endpoint_failed:
+		return ret;
+	} else { /* dir == IPA_UL */
+
+		ret = ipa_get_a2_mux_pipe_info(A2_TO_IPA, &pipe_conn);
+		if (ret) {
+			IPAERR("ipa_get_a2_mux_pipe_info failed A2_TO_IPA\n");
+			goto rx_alloc_endpoint_failed;
+		}
+
+		ret = sps_phy2h(pipe_conn.src_phy_addr, &a2_handle);
+		if (ret) {
+			IPAERR("sps_phy2h failed (A2 BAM) %d\n", ret);
+			goto rx_alloc_endpoint_failed;
+		}
+
+		sys = &bridge[IPA_DL_FROM_A2];
+		sys->pipe = sps_alloc_endpoint();
+		if (sys->pipe == NULL) {
+			IPAERR("rx alloc endpoint failed\n");
+			ret = -ENOMEM;
+			goto rx_alloc_endpoint_failed;
+		}
+		ret = sps_get_config(sys->pipe, &sys->connection);
+		if (ret) {
+			IPAERR("rx get config failed %d\n", ret);
+			goto rx_get_config_failed;
+		}
+
+		sys->connection.source = a2_handle;
+		sys->connection.src_pipe_index = pipe_conn.src_pipe_index;
+		sys->connection.destination = SPS_DEV_HANDLE_MEM;
+		sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++;
+		sys->connection.mode = SPS_MODE_SRC;
+		sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
+		      SPS_O_ACK_TRANSFERS;
+		sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
+		sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
+				sys->desc_mem_buf.size,
+				&dma_addr,
+				0);
+		if (sys->desc_mem_buf.base == NULL) {
+			IPAERR("rx memory alloc failed\n");
+			ret = -ENOMEM;
+			goto rx_get_config_failed;
+		}
+		sys->desc_mem_buf.phys_base = dma_addr;
+		memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
+		sys->connection.desc = sys->desc_mem_buf;
+		sys->connection.event_thresh = IPA_EVENT_THRESHOLD;
+
+		ret = sps_connect(sys->pipe, &sys->connection);
+		if (ret < 0) {
+			IPAERR("rx connect error %d\n", ret);
+			goto rx_connect_failed;
+		}
+
+		sys->register_event.options = SPS_O_EOT;
+		sys->register_event.mode = SPS_TRIGGER_CALLBACK;
+		sys->register_event.xfer_done = NULL;
+		sys->register_event.callback = bam_mux_rx_notify;
+		sys->register_event.user = NULL;
+		ret = sps_register_event(sys->pipe, &sys->register_event);
+		if (ret < 0) {
+			IPAERR("tx register event error %d\n", ret);
+			goto rx_event_reg_failed;
+		}
+
+		INIT_LIST_HEAD(&sys->head_desc_list);
+		INIT_LIST_HEAD(&sys->free_desc_list);
+		spin_lock_init(&sys->spinlock);
+
+
+		for (i = 0; i < IPA_RX_POOL_CEIL; i++) {
+			ret = queue_rx_single(dir);
+			if (ret < 0)
+				IPAERR("queue fail %d %d\n", dir, i);
+		}
+
+		return 0;
+
+rx_event_reg_failed:
+		sps_disconnect(sys->pipe);
+rx_connect_failed:
+		dma_free_coherent(NULL,
+				sys->desc_mem_buf.size,
+				sys->desc_mem_buf.base,
+				sys->desc_mem_buf.phys_base);
+rx_get_config_failed:
+		sps_free_endpoint(sys->pipe);
+rx_alloc_endpoint_failed:
+		return ret;
+	}
+}
+
+/**
+ * ipa_bridge_init() - initialize the tethered bridge, allocate UL and DL
+ * workqueues
+ *
+ * Return codes: 0: success, -ENOMEM: failure
+ */
+int ipa_bridge_init(void)
+{
+	int ret;
+
+	ipa_ul_workqueue = alloc_workqueue("ipa_ul",
+			WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+	if (!ipa_ul_workqueue) {
+		IPAERR("ipa ul wq alloc failed\n");
+		ret = -ENOMEM;
+		goto fail_ul;
+	}
+
+	ipa_dl_workqueue = alloc_workqueue("ipa_dl",
+			WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+	if (!ipa_dl_workqueue) {
+		IPAERR("ipa dl wq alloc failed\n");
+		ret = -ENOMEM;
+		goto fail_dl;
+	}
+
+	return 0;
+fail_dl:
+	destroy_workqueue(ipa_ul_workqueue);
+fail_ul:
+	return ret;
+}
+
+/**
+ * ipa_bridge_setup() - setup tethered SW bridge in specified direction
+ * @dir: downlink or uplink (from air interface perspective)
+ *
+ * Return codes:
+ * 0: success
+ * various negative error codes on errors
+ */
+int ipa_bridge_setup(enum ipa_bridge_dir dir)
+{
+	int ret;
+
+	if (atomic_inc_return(&ipa_ctx->ipa_active_clients) == 1)
+		ipa_enable_clks();
+
+	if (setup_bridge_to_a2(dir)) {
+		IPAERR("fail to setup SYS pipe to A2 %d\n", dir);
+		ret = -EINVAL;
+		goto bail_a2;
+	}
+
+	if (setup_bridge_to_ipa(dir)) {
+		IPAERR("fail to setup SYS pipe to IPA %d\n", dir);
+		ret = -EINVAL;
+		goto bail_ipa;
+	}
+
+	return 0;
+
+bail_ipa:
+	if (dir == IPA_UL)
+		sps_disconnect(bridge[IPA_UL_TO_A2].pipe);
+	else
+		sps_disconnect(bridge[IPA_DL_FROM_A2].pipe);
+bail_a2:
+	if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+		ipa_disable_clks();
+	return ret;
+}
+
+/**
+ * ipa_bridge_teardown() - teardown the tethered bridge in the specified dir
+ * @dir: downlink or uplink (from air interface perspective)
+ *
+ * Return codes:
+ * 0: always
+ */
+int ipa_bridge_teardown(enum ipa_bridge_dir dir)
+{
+	struct ipa_bridge_pipe_context *sys;
+
+	if (dir == IPA_UL) {
+		sys = &bridge[IPA_UL_TO_A2];
+		sps_disconnect(sys->pipe);
+		sys = &bridge[IPA_UL_FROM_IPA];
+		sps_disconnect(sys->pipe);
+	} else {
+		sys = &bridge[IPA_DL_FROM_A2];
+		sps_disconnect(sys->pipe);
+		sys = &bridge[IPA_DL_TO_IPA];
+		sps_disconnect(sys->pipe);
+	}
+
+	if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+		ipa_disable_clks();
+
+	return 0;
+}
+
+/**
+ * ipa_bridge_cleanup() - de-initialize the tethered bridge
+ *
+ * Return codes:
+ * None
+ */
+void ipa_bridge_cleanup(void)
+{
+	destroy_workqueue(ipa_dl_workqueue);
+	destroy_workqueue(ipa_ul_workqueue);
+}
diff --git a/drivers/platform/msm/ipa/ipa_client.c b/drivers/platform/msm/ipa/ipa_client.c
new file mode 100644
index 0000000..823b17d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_client.c
@@ -0,0 +1,325 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+static int ipa_connect_configure_sps(const struct ipa_connect_params *in,
+				     struct ipa_ep_context *ep, int ipa_ep_idx)
+{
+	int result = -EFAULT;
+
+	/* Default Config */
+	ep->ep_hdl = sps_alloc_endpoint();
+
+	if (ep->ep_hdl == NULL) {
+		IPAERR("SPS EP alloc failed EP.\n");
+		return -EFAULT;
+	}
+
+	result = sps_get_config(ep->ep_hdl,
+		&ep->connect);
+	if (result) {
+		IPAERR("fail to get config.\n");
+		return -EFAULT;
+	}
+
+	/* Specific Config */
+	if (IPA_CLIENT_IS_CONS(in->client)) {
+		ep->connect.mode = SPS_MODE_SRC;
+		ep->connect.destination =
+			in->client_bam_hdl;
+		ep->connect.source = ipa_ctx->bam_handle;
+		ep->connect.dest_pipe_index =
+			in->client_ep_idx;
+		ep->connect.src_pipe_index = ipa_ep_idx;
+	} else {
+		ep->connect.mode = SPS_MODE_DEST;
+		ep->connect.source = in->client_bam_hdl;
+		ep->connect.destination = ipa_ctx->bam_handle;
+		ep->connect.src_pipe_index = in->client_ep_idx;
+		ep->connect.dest_pipe_index = ipa_ep_idx;
+	}
+
+	return 0;
+}
+
+static int ipa_connect_allocate_fifo(const struct ipa_connect_params *in,
+				     struct sps_mem_buffer *mem_buff_ptr,
+				     bool *fifo_in_pipe_mem_ptr,
+				     u32 *fifo_pipe_mem_ofst_ptr,
+				     u32 fifo_size, int ipa_ep_idx)
+{
+	dma_addr_t dma_addr;
+	u32 ofst;
+	int result = -EFAULT;
+
+	mem_buff_ptr->size = fifo_size;
+	if (in->pipe_mem_preferred) {
+		if (ipa_pipe_mem_alloc(&ofst, fifo_size)) {
+			IPAERR("FIFO pipe mem alloc fail ep %u\n",
+				ipa_ep_idx);
+			mem_buff_ptr->base =
+				dma_alloc_coherent(NULL,
+				mem_buff_ptr->size,
+				&dma_addr, GFP_KERNEL);
+		} else {
+			memset(mem_buff_ptr, 0, sizeof(struct sps_mem_buffer));
+			result = sps_setup_bam2bam_fifo(mem_buff_ptr, ofst,
+				fifo_size, 1);
+			WARN_ON(result);
+			*fifo_in_pipe_mem_ptr = 1;
+			dma_addr = mem_buff_ptr->phys_base;
+			*fifo_pipe_mem_ofst_ptr = ofst;
+		}
+	} else {
+		mem_buff_ptr->base =
+			dma_alloc_coherent(NULL, mem_buff_ptr->size,
+			&dma_addr, GFP_KERNEL);
+	}
+	mem_buff_ptr->phys_base = dma_addr;
+	if (mem_buff_ptr->base == NULL) {
+		IPAERR("fail to get DMA memory.\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+
+/**
+ * ipa_connect() - low-level IPA client connect
+ * @in:	[in] input parameters from client
+ * @sps:	[out] sps output from IPA needed by client for sps_connect
+ * @clnt_hdl:	[out] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to connect to
+ * IPA in BAM-BAM mode. these peripherals are A2, USB and HSIC. this api
+ * expects caller to take responsibility to add any needed headers, routing
+ * and filtering tables and rules as needed.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
+		u32 *clnt_hdl)
+{
+	int ipa_ep_idx;
+	int ipa_ep_idx_dst;
+	int result = -EFAULT;
+	struct ipa_ep_context *ep;
+
+	if (atomic_inc_return(&ipa_ctx->ipa_active_clients) == 1)
+		ipa_enable_clks();
+
+	if (in == NULL || sps == NULL || clnt_hdl == NULL ||
+	    in->client >= IPA_CLIENT_MAX ||
+	    in->ipa_ep_cfg.mode.dst >= IPA_CLIENT_MAX ||
+	    in->desc_fifo_sz == 0 || in->data_fifo_sz == 0) {
+		IPAERR("bad parm.\n");
+		result = -EINVAL;
+		goto fail;
+	}
+
+	ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, in->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("fail to alloc EP.\n");
+		goto fail;
+	}
+
+	ep = &ipa_ctx->ep[ipa_ep_idx];
+
+	if (ep->valid) {
+		IPAERR("EP already allocated.\n");
+		goto fail;
+	}
+
+	if (IPA_CLIENT_IS_PROD(in->client) &&
+			(in->ipa_ep_cfg.mode.mode == IPA_DMA)) {
+		ipa_ep_idx_dst = ipa_get_ep_mapping(ipa_ctx->mode,
+				in->ipa_ep_cfg.mode.dst);
+		if ((ipa_ep_idx_dst == -1) ||
+				(ipa_ctx->ep[ipa_ep_idx_dst].valid)) {
+			IPADBG("dst EP for IPA input pipe doesn't yet exist\n");
+		}
+	}
+
+	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+
+	ep->valid = 1;
+	ep->client = in->client;
+	ep->notify = in->notify;
+	ep->priv = in->priv;
+
+	if (ipa_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) {
+		IPAERR("fail to configure EP.\n");
+		goto ipa_cfg_ep_fail;
+	}
+
+	result = ipa_connect_configure_sps(in, ep, ipa_ep_idx);
+	if (result) {
+		IPAERR("fail to configure SPS.\n");
+		goto ipa_cfg_ep_fail;
+	}
+
+	if (in->desc.base == NULL) {
+		result = ipa_connect_allocate_fifo(in, &ep->connect.desc,
+						  &ep->desc_fifo_in_pipe_mem,
+						  &ep->desc_fifo_pipe_mem_ofst,
+						  in->desc_fifo_sz, ipa_ep_idx);
+		if (result) {
+			IPAERR("fail to allocate DESC FIFO.\n");
+			goto desc_mem_alloc_fail;
+		}
+	} else {
+		IPADBG("client allocated DESC FIFO\n");
+		ep->connect.desc = in->desc;
+		ep->desc_fifo_client_allocated = 1;
+	}
+	IPADBG("Descriptor FIFO pa=0x%x, size=%d\n", ep->connect.desc.phys_base,
+	       ep->connect.desc.size);
+
+	if (in->data.base == NULL) {
+		result = ipa_connect_allocate_fifo(in, &ep->connect.data,
+						&ep->data_fifo_in_pipe_mem,
+						&ep->data_fifo_pipe_mem_ofst,
+						in->data_fifo_sz, ipa_ep_idx);
+		if (result) {
+			IPAERR("fail to allocate DATA FIFO.\n");
+			goto data_mem_alloc_fail;
+		}
+	} else {
+		IPADBG("client allocated DATA FIFO\n");
+		ep->connect.data = in->data;
+		ep->data_fifo_client_allocated = 1;
+	}
+	IPADBG("Data FIFO pa=0x%x, size=%d\n", ep->connect.data.phys_base,
+	       ep->connect.data.size);
+
+	ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
+	ep->connect.options = SPS_O_AUTO_ENABLE;    /* BAM-to-BAM */
+
+	result = sps_connect(ep->ep_hdl, &ep->connect);
+	if (result) {
+		IPAERR("sps_connect fails.\n");
+		goto sps_connect_fail;
+	}
+
+	sps->ipa_bam_hdl = ipa_ctx->bam_handle;
+	sps->ipa_ep_idx = ipa_ep_idx;
+	*clnt_hdl = ipa_ep_idx;
+	memcpy(&sps->desc, &ep->connect.desc, sizeof(struct sps_mem_buffer));
+	memcpy(&sps->data, &ep->connect.data, sizeof(struct sps_mem_buffer));
+
+	return 0;
+
+sps_connect_fail:
+	if (!ep->data_fifo_in_pipe_mem)
+		dma_free_coherent(NULL,
+				  ep->connect.data.size,
+				  ep->connect.data.base,
+				  ep->connect.data.phys_base);
+	else
+		ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+				  ep->connect.data.size);
+
+data_mem_alloc_fail:
+	if (!ep->desc_fifo_in_pipe_mem)
+		dma_free_coherent(NULL,
+				  ep->connect.desc.size,
+				  ep->connect.desc.base,
+				  ep->connect.desc.phys_base);
+	else
+		ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+				  ep->connect.desc.size);
+
+desc_mem_alloc_fail:
+	sps_free_endpoint(ep->ep_hdl);
+ipa_cfg_ep_fail:
+	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+fail:
+	if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+		ipa_disable_clks();
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_connect);
+
+/**
+ * ipa_disconnect() - low-level IPA client disconnect
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to disconnect
+ * from IPA in BAM-BAM mode. this api expects caller to take responsibility to
+ * free any needed headers, routing and filtering tables and rules as needed.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_disconnect(u32 clnt_hdl)
+{
+	int result;
+	struct ipa_ep_context *ep;
+
+	if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+
+	result = sps_disconnect(ep->ep_hdl);
+	if (result) {
+		IPAERR("SPS disconnect failed.\n");
+		return -EPERM;
+	}
+
+	if (!ep->desc_fifo_client_allocated &&
+	     ep->connect.desc.base) {
+		if (!ep->desc_fifo_in_pipe_mem)
+			dma_free_coherent(NULL,
+					  ep->connect.desc.size,
+					  ep->connect.desc.base,
+					  ep->connect.desc.phys_base);
+		else
+			ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+					  ep->connect.desc.size);
+	}
+
+	if (!ep->data_fifo_client_allocated &&
+	     ep->connect.data.base) {
+		if (!ep->data_fifo_in_pipe_mem)
+			dma_free_coherent(NULL,
+					  ep->connect.data.size,
+					  ep->connect.data.base,
+					  ep->connect.data.phys_base);
+		else
+			ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+					  ep->connect.data.size);
+	}
+
+	result = sps_free_endpoint(ep->ep_hdl);
+	if (result) {
+		IPAERR("SPS de-alloc EP failed.\n");
+		return -EPERM;
+	}
+
+	memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
+
+	if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0)
+		ipa_disable_clks();
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_disconnect);
+
diff --git a/drivers/platform/msm/ipa/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_debugfs.c
new file mode 100644
index 0000000..43b0178d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_debugfs.c
@@ -0,0 +1,507 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include "ipa_i.h"
+
+
+#define IPA_MAX_MSG_LEN 1024
+static struct dentry *dent;
+static struct dentry *dfile_gen_reg;
+static struct dentry *dfile_ep_reg;
+static struct dentry *dfile_hdr;
+static struct dentry *dfile_ip4_rt;
+static struct dentry *dfile_ip6_rt;
+static struct dentry *dfile_ip4_flt;
+static struct dentry *dfile_ip6_flt;
+static char dbg_buff[IPA_MAX_MSG_LEN];
+static s8 ep_reg_idx;
+
+static ssize_t ipa_read_gen_reg(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"IPA_VERSION=0x%x\n"
+			"IPA_COMP_HW_VERSION=0x%x\n"
+			"IPA_ROUTE=0x%x\n"
+			"IPA_FILTER=0x%x\n"
+			"IPA_SHARED_MEM_SIZE=0x%x\n"
+			"IPA_HEAD_OF_LINE_BLOCK_EN=0x%x\n",
+			ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST),
+			ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST),
+			ipa_read_reg(ipa_ctx->mmio, IPA_ROUTE_OFST),
+			ipa_read_reg(ipa_ctx->mmio, IPA_FILTER_OFST),
+			ipa_read_reg(ipa_ctx->mmio, IPA_SHARED_MEM_SIZE_OFST),
+			ipa_read_reg(ipa_ctx->mmio,
+				IPA_HEAD_OF_LINE_BLOCK_EN_OFST));
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_write_ep_reg(struct file *file, const char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	s8 option = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &option))
+		return -EFAULT;
+
+	if (option >= IPA_NUM_PIPES) {
+		IPAERR("bad pipe specified %u\n", option);
+		return count;
+	}
+
+	ep_reg_idx = option;
+
+	return count;
+}
+
+static ssize_t ipa_read_ep_reg(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int i;
+	int start_idx;
+	int end_idx;
+	int size = 0;
+	int ret;
+	loff_t pos;
+
+	/* negative ep_reg_idx means all registers */
+	if (ep_reg_idx < 0) {
+		start_idx = 0;
+		end_idx = IPA_NUM_PIPES;
+	} else {
+		start_idx = ep_reg_idx;
+		end_idx = start_idx + 1;
+	}
+	pos = *ppos;
+	for (i = start_idx; i < end_idx; i++) {
+
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"IPA_ENDP_INIT_NAT_%u=0x%x\n"
+				"IPA_ENDP_INIT_HDR_%u=0x%x\n"
+				"IPA_ENDP_INIT_MODE_%u=0x%x\n"
+				"IPA_ENDP_INIT_AGGR_%u=0x%x\n"
+				"IPA_ENDP_INIT_ROUTE_%u=0x%x\n",
+				i, ipa_read_reg(ipa_ctx->mmio,
+					IPA_ENDP_INIT_NAT_n_OFST(i)),
+				i, ipa_read_reg(ipa_ctx->mmio,
+					IPA_ENDP_INIT_HDR_n_OFST(i)),
+				i, ipa_read_reg(ipa_ctx->mmio,
+					IPA_ENDP_INIT_MODE_n_OFST(i)),
+				i, ipa_read_reg(ipa_ctx->mmio,
+					IPA_ENDP_INIT_AGGR_n_OFST(i)),
+				i, ipa_read_reg(ipa_ctx->mmio,
+					IPA_ENDP_INIT_ROUTE_n_OFST(i)));
+		*ppos = pos;
+		ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff,
+					      nbytes);
+		if (ret < 0)
+			return ret;
+
+		size += ret;
+		ubuf += nbytes;
+		count -= nbytes;
+	}
+
+	*ppos = pos + size;
+	return size;
+}
+
+static ssize_t ipa_read_hdr(struct file *file, char __user *ubuf, size_t count,
+		loff_t *ppos)
+{
+	int nbytes = 0;
+	int cnt = 0;
+	int i = 0;
+	struct ipa_hdr_entry *entry;
+
+	mutex_lock(&ipa_ctx->lock);
+	list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+				   "name:%s len=%d ref=%d partial=%d lcl=%d ofst=%u ",
+				   entry->name,
+				   entry->hdr_len, entry->ref_cnt,
+				   entry->is_partial,
+				   ipa_ctx->hdr_tbl_lcl,
+				   entry->offset_entry->offset >> 2);
+		for (i = 0; i < entry->hdr_len; i++) {
+			scnprintf(dbg_buff + cnt + nbytes + i * 2,
+				  IPA_MAX_MSG_LEN - cnt - nbytes - i * 2,
+				  "%02x", entry->hdr[i]);
+		}
+		scnprintf(dbg_buff + cnt + nbytes + entry->hdr_len * 2,
+			  IPA_MAX_MSG_LEN - cnt - nbytes - entry->hdr_len * 2,
+			  "\n");
+		cnt += nbytes + entry->hdr_len * 2 + 1;
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static int ipa_attrib_dump(char *buff, size_t sz,
+		struct ipa_rule_attrib *attrib, enum ipa_ip_type ip)
+{
+	int nbytes = 0;
+	int cnt = 0;
+	uint32_t addr[4];
+	uint32_t mask[4];
+	int i;
+
+	if (attrib->attrib_mask & IPA_FLT_TOS) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "tos:%d ",
+				attrib->u.v4.tos);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "protocol:%d ",
+				attrib->u.v4.protocol);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+		if (ip == IPA_IP_v4) {
+			addr[0] = htonl(attrib->u.v4.src_addr);
+			mask[0] = htonl(attrib->u.v4.src_addr_mask);
+			nbytes = scnprintf(buff + cnt, sz - cnt,
+					"src_addr:%pI4 src_addr_mask:%pI4 ",
+					addr + 0, mask + 0);
+			cnt += nbytes;
+		} else if (ip == IPA_IP_v6) {
+			for (i = 0; i < 4; i++) {
+				addr[i] = htonl(attrib->u.v6.src_addr[i]);
+				mask[i] = htonl(attrib->u.v6.src_addr_mask[i]);
+			}
+			nbytes =
+			   scnprintf(buff + cnt, sz - cnt,
+					   "src_addr:%pI6 src_addr_mask:%pI6 ",
+					   addr + 0, mask + 0);
+			cnt += nbytes;
+		} else {
+			WARN_ON(1);
+		}
+	}
+	if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+		if (ip == IPA_IP_v4) {
+			addr[0] = htonl(attrib->u.v4.dst_addr);
+			mask[0] = htonl(attrib->u.v4.dst_addr_mask);
+			nbytes =
+			   scnprintf(buff + cnt, sz - cnt,
+					   "dst_addr:%pI4 dst_addr_mask:%pI4 ",
+					   addr + 0, mask + 0);
+			cnt += nbytes;
+		} else if (ip == IPA_IP_v6) {
+			for (i = 0; i < 4; i++) {
+				addr[i] = htonl(attrib->u.v6.dst_addr[i]);
+				mask[i] = htonl(attrib->u.v6.dst_addr_mask[i]);
+			}
+			nbytes =
+			   scnprintf(buff + cnt, sz - cnt,
+					   "dst_addr:%pI6 dst_addr_mask:%pI6 ",
+					   addr + 0, mask + 0);
+			cnt += nbytes;
+		} else {
+			WARN_ON(1);
+		}
+	}
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+		nbytes =
+		   scnprintf(buff + cnt, sz - cnt, "src_port_range:%u %u ",
+				   attrib->src_port_lo,
+			     attrib->src_port_hi);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+		nbytes =
+		   scnprintf(buff + cnt, sz - cnt, "dst_port_range:%u %u ",
+				   attrib->dst_port_lo,
+			     attrib->dst_port_hi);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_TYPE) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "type:%d ",
+				attrib->type);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_CODE) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "code:%d ",
+				attrib->code);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_SPI) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "spi:%x ",
+				attrib->spi);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "src_port:%u ",
+				attrib->src_port);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "dst_port:%u ",
+				attrib->dst_port);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_TC) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "tc:%d ",
+				attrib->u.v6.tc);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "flow_label:%x ",
+				attrib->u.v6.flow_label);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "next_hdr:%d ",
+				attrib->u.v6.next_hdr);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+		nbytes =
+		   scnprintf(buff + cnt, sz - cnt,
+				   "metadata:%x metadata_mask:%x",
+				   attrib->meta_data, attrib->meta_data_mask);
+		cnt += nbytes;
+	}
+	if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+		nbytes = scnprintf(buff + cnt, sz - cnt, "frg ");
+		cnt += nbytes;
+	}
+	nbytes = scnprintf(buff + cnt, sz - cnt, "\n");
+	cnt += nbytes;
+
+	return cnt;
+}
+
+static int ipa_open_dbg(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t ipa_read_rt(struct file *file, char __user *ubuf, size_t count,
+		loff_t *ppos)
+{
+	int nbytes = 0;
+	int cnt = 0;
+	int i = 0;
+	struct ipa_rt_tbl *tbl;
+	struct ipa_rt_entry *entry;
+	struct ipa_rt_tbl_set *set;
+	enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+	u32 hdr_ofst;
+
+	set = &ipa_ctx->rt_tbl_set[ip];
+
+	mutex_lock(&ipa_ctx->lock);
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		i = 0;
+		list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+			if (entry->hdr)
+				hdr_ofst = entry->hdr->offset_entry->offset;
+			else
+				hdr_ofst = 0;
+			nbytes = scnprintf(dbg_buff + cnt,
+					IPA_MAX_MSG_LEN - cnt,
+					"tbl_idx:%d tbl_name:%s tbl_ref:%u rule_idx:%d dst:%d ep:%d S:%u hdr_ofst[words]:%u attrib_mask:%08x ",
+					entry->tbl->idx, entry->tbl->name,
+					entry->tbl->ref_cnt, i, entry->rule.dst,
+					ipa_get_ep_mapping(ipa_ctx->mode,
+						entry->rule.dst),
+					   !ipa_ctx->hdr_tbl_lcl,
+					   hdr_ofst >> 2,
+					   entry->rule.attrib.attrib_mask);
+			cnt += nbytes;
+			cnt += ipa_attrib_dump(dbg_buff + cnt,
+					   IPA_MAX_MSG_LEN - cnt,
+					   &entry->rule.attrib,
+					   ip);
+			i++;
+		}
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
+		loff_t *ppos)
+{
+	int nbytes = 0;
+	int cnt = 0;
+	int i;
+	int j;
+	struct ipa_flt_tbl *tbl;
+	struct ipa_flt_entry *entry;
+	enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+	struct ipa_rt_tbl *rt_tbl;
+
+	tbl = &ipa_ctx->glob_flt_tbl[ip];
+	mutex_lock(&ipa_ctx->lock);
+	i = 0;
+	list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+		rt_tbl = (struct ipa_rt_tbl *)entry->rule.rt_tbl_hdl;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+				   "ep_idx:global rule_idx:%d act:%d rt_tbl_idx:%d attrib_mask:%08x ",
+				   i, entry->rule.action, rt_tbl->idx,
+				   entry->rule.attrib.attrib_mask);
+		cnt += nbytes;
+		cnt += ipa_attrib_dump(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+				&entry->rule.attrib, ip);
+		i++;
+	}
+
+	for (j = 0; j < IPA_NUM_PIPES; j++) {
+		tbl = &ipa_ctx->flt_tbl[j][ip];
+		i = 0;
+		list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+			rt_tbl = (struct ipa_rt_tbl *)entry->rule.rt_tbl_hdl;
+			nbytes = scnprintf(dbg_buff + cnt,
+					IPA_MAX_MSG_LEN - cnt,
+					"ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d attrib_mask:%08x ",
+					j, i, entry->rule.action, rt_tbl->idx,
+					entry->rule.attrib.attrib_mask);
+			cnt += nbytes;
+			cnt +=
+			   ipa_attrib_dump(dbg_buff + cnt,
+					   IPA_MAX_MSG_LEN - cnt,
+					   &entry->rule.attrib,
+					   ip);
+			i++;
+		}
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+const struct file_operations ipa_gen_reg_ops = {
+	.read = ipa_read_gen_reg,
+};
+
+const struct file_operations ipa_ep_reg_ops = {
+	.read = ipa_read_ep_reg,
+	.write = ipa_write_ep_reg,
+};
+
+const struct file_operations ipa_hdr_ops = {
+	.read = ipa_read_hdr,
+};
+
+const struct file_operations ipa_rt_ops = {
+	.read = ipa_read_rt,
+	.open = ipa_open_dbg,
+};
+
+const struct file_operations ipa_flt_ops = {
+	.read = ipa_read_flt,
+	.open = ipa_open_dbg,
+};
+
+void ipa_debugfs_init(void)
+{
+	const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+	const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+			S_IWUSR | S_IWGRP | S_IWOTH;
+
+	dent = debugfs_create_dir("ipa", 0);
+	if (IS_ERR(dent)) {
+		IPAERR("fail to create folder in debug_fs.\n");
+		return;
+	}
+
+	dfile_gen_reg = debugfs_create_file("gen_reg", read_only_mode, dent, 0,
+			&ipa_gen_reg_ops);
+	if (!dfile_gen_reg || IS_ERR(dfile_gen_reg)) {
+		IPAERR("fail to create file for debug_fs gen_reg\n");
+		goto fail;
+	}
+
+	dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, 0,
+			&ipa_ep_reg_ops);
+	if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) {
+		IPAERR("fail to create file for debug_fs ep_reg\n");
+		goto fail;
+	}
+
+	dfile_hdr = debugfs_create_file("hdr", read_only_mode, dent, 0,
+			&ipa_hdr_ops);
+	if (!dfile_hdr || IS_ERR(dfile_hdr)) {
+		IPAERR("fail to create file for debug_fs hdr\n");
+		goto fail;
+	}
+
+	dfile_ip4_rt = debugfs_create_file("ip4_rt", read_only_mode, dent,
+			(void *)IPA_IP_v4, &ipa_rt_ops);
+	if (!dfile_ip4_rt || IS_ERR(dfile_ip4_rt)) {
+		IPAERR("fail to create file for debug_fs ip4 rt\n");
+		goto fail;
+	}
+
+	dfile_ip6_rt = debugfs_create_file("ip6_rt", read_only_mode, dent,
+			(void *)IPA_IP_v6, &ipa_rt_ops);
+	if (!dfile_ip6_rt || IS_ERR(dfile_ip6_rt)) {
+		IPAERR("fail to create file for debug_fs ip6:w" " rt\n");
+		goto fail;
+	}
+
+	dfile_ip4_flt = debugfs_create_file("ip4_flt", read_only_mode, dent,
+			(void *)IPA_IP_v4, &ipa_flt_ops);
+	if (!dfile_ip4_flt || IS_ERR(dfile_ip4_flt)) {
+		IPAERR("fail to create file for debug_fs ip4 flt\n");
+		goto fail;
+	}
+
+	dfile_ip6_flt = debugfs_create_file("ip6_flt", read_only_mode, dent,
+			(void *)IPA_IP_v6, &ipa_flt_ops);
+	if (!dfile_ip6_flt || IS_ERR(dfile_ip6_flt)) {
+		IPAERR("fail to create file for debug_fs ip6 flt\n");
+		goto fail;
+	}
+
+	return;
+
+fail:
+	debugfs_remove_recursive(dent);
+}
+
+void ipa_debugfs_remove(void)
+{
+	if (IS_ERR(dent)) {
+		IPAERR("ipa_debugfs_remove: folder was not created.\n");
+		return;
+	}
+	debugfs_remove_recursive(dent);
+}
+
+#else /* !CONFIG_DEBUG_FS */
+void ipa_debugfs_init(void) {}
+void ipa_debugfs_remove(void) {}
+#endif
+
diff --git a/drivers/platform/msm/ipa/ipa_dp.c b/drivers/platform/msm/ipa/ipa_dp.c
new file mode 100644
index 0000000..c677a6e
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_dp.c
@@ -0,0 +1,1038 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include "ipa_i.h"
+
+#define list_next_entry(pos, member) \
+	list_entry(pos->member.next, typeof(*pos), member)
+/**
+ * ipa_write_done - this function will be (enevtually) called when a Tx
+ * operation is complete
+ * @work:	work_struct used by the work queue
+ */
+void ipa_write_done(struct work_struct *work)
+{
+	struct ipa_tx_pkt_wrapper *tx_pkt;
+	struct ipa_tx_pkt_wrapper *next_pkt;
+	struct ipa_tx_pkt_wrapper *tx_pkt_expected;
+	unsigned long irq_flags;
+	struct ipa_mem_buffer mult = { 0 };
+	int i;
+	u16 cnt;
+
+	tx_pkt = container_of(work, struct ipa_tx_pkt_wrapper, work);
+	cnt = tx_pkt->cnt;
+	IPADBG("cnt=%d\n", cnt);
+
+	if (unlikely(cnt == 0))
+		WARN_ON(1);
+
+	if (cnt > 1 && cnt != 0xFFFF)
+		mult = tx_pkt->mult;
+
+	for (i = 0; i < cnt; i++) {
+		if (unlikely(tx_pkt == NULL))
+			WARN_ON(1);
+		spin_lock_irqsave(&tx_pkt->sys->spinlock, irq_flags);
+		tx_pkt_expected = list_first_entry(&tx_pkt->sys->head_desc_list,
+						   struct ipa_tx_pkt_wrapper,
+						   link);
+		if (unlikely(tx_pkt != tx_pkt_expected)) {
+			spin_unlock_irqrestore(&tx_pkt->sys->spinlock,
+					irq_flags);
+			WARN_ON(1);
+		}
+		next_pkt = list_next_entry(tx_pkt, link);
+		list_del(&tx_pkt->link);
+		tx_pkt->sys->len--;
+		spin_unlock_irqrestore(&tx_pkt->sys->spinlock, irq_flags);
+		dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
+				tx_pkt->mem.phys_base);
+		if (tx_pkt->callback)
+			tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
+
+		kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+		tx_pkt = next_pkt;
+	}
+
+	if (mult.phys_base)
+		dma_free_coherent(NULL, mult.size, mult.base, mult.phys_base);
+}
+
+/**
+ * ipa_send_one() - Send a single descriptor
+ * @sys:	system pipe context
+ * @desc:	descriptor to send
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc)
+{
+	struct ipa_tx_pkt_wrapper *tx_pkt;
+	unsigned long irq_flags;
+	int result;
+	u16 sps_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_INT;
+	dma_addr_t dma_address;
+	u16 len;
+
+	tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, GFP_KERNEL);
+	if (!tx_pkt) {
+		IPAERR("failed to alloc tx wrapper\n");
+		goto fail_mem_alloc;
+	}
+
+	WARN_ON(desc->len > 512);
+
+	/*
+	 * Due to a HW limitation, we need to make sure that the packet does not
+	 * cross a 1KB boundary
+	 */
+	tx_pkt->bounce = dma_pool_alloc(ipa_ctx->one_kb_no_straddle_pool,
+			GFP_KERNEL, &dma_address);
+	if (!tx_pkt->bounce) {
+		dma_address = 0;
+	} else {
+		WARN_ON(!ipa_straddle_boundary
+		       ((u32)dma_address, (u32)dma_address + desc->len - 1,
+			1024));
+		memcpy(tx_pkt->bounce, desc->pyld, desc->len);
+	}
+
+	if (!dma_address) {
+		IPAERR("failed to DMA wrap\n");
+		goto fail_dma_map;
+	}
+
+	INIT_LIST_HEAD(&tx_pkt->link);
+	INIT_WORK(&tx_pkt->work, ipa_write_done);
+	tx_pkt->type = desc->type;
+	tx_pkt->cnt = 1;    /* only 1 desc in this "set" */
+
+	tx_pkt->mem.phys_base = dma_address;
+	tx_pkt->mem.base = desc->pyld;
+	tx_pkt->mem.size = desc->len;
+	tx_pkt->sys = sys;
+	tx_pkt->callback = desc->callback;
+	tx_pkt->user1 = desc->user1;
+	tx_pkt->user2 = desc->user2;
+
+	/*
+	 * Special treatment for immediate commands, where the structure of the
+	 * descriptor is different
+	 */
+	if (desc->type == IPA_IMM_CMD_DESC) {
+		sps_flags |= SPS_IOVEC_FLAG_IMME;
+		len = desc->opcode;
+	} else {
+		len = desc->len;
+	}
+
+	if (desc->type == IPA_IMM_CMD_DESC) {
+		IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
+				desc->opcode, desc->len, sps_flags);
+		IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
+	}
+
+	spin_lock_irqsave(&sys->spinlock, irq_flags);
+	list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+	sys->len++;
+	result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt,
+			sps_flags);
+	if (result) {
+		IPAERR("sps_transfer_one failed rc=%d\n", result);
+		goto fail_sps_send;
+	}
+
+	spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+	return 0;
+
+fail_sps_send:
+	list_del(&tx_pkt->link);
+	spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+	dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
+			dma_address);
+fail_dma_map:
+	kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+fail_mem_alloc:
+	return -EFAULT;
+}
+
+/**
+ * ipa_send() - Send multiple descriptors in one HW transaction
+ * @sys: system pipe context
+ * @num_desc: number of packets
+ * @desc: packets to send
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa_send(struct ipa_sys_context *sys, u16 num_desc, struct ipa_desc *desc)
+{
+	struct ipa_tx_pkt_wrapper *tx_pkt;
+	struct ipa_tx_pkt_wrapper *next_pkt;
+	struct sps_transfer transfer = { 0 };
+	struct sps_iovec *iovec;
+	unsigned long irq_flags;
+	dma_addr_t dma_addr;
+	int i;
+	int j;
+	int result;
+	int fail_dma_wrap;
+	uint size = num_desc * sizeof(struct sps_iovec);
+
+	for (i = 0; i < num_desc; i++) {
+		fail_dma_wrap = 0;
+		tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache,
+					   GFP_KERNEL);
+		if (!tx_pkt) {
+			IPAERR("failed to alloc tx wrapper\n");
+			goto failure;
+		}
+		/*
+		 * first desc of set is "special" as it holds the count and
+		 * other info
+		 */
+		if (i == 0) {
+			transfer.user = tx_pkt;
+			transfer.iovec =
+				dma_alloc_coherent(NULL, size, &dma_addr, 0);
+			transfer.iovec_phys = dma_addr;
+			transfer.iovec_count = num_desc;
+			if (!transfer.iovec) {
+				IPAERR("fail alloc DMA mem for sps xfr buff\n");
+				goto failure;
+			}
+
+			tx_pkt->mult.phys_base = dma_addr;
+			tx_pkt->mult.base = transfer.iovec;
+			tx_pkt->mult.size = size;
+			tx_pkt->cnt = num_desc;
+		}
+
+		iovec = &transfer.iovec[i];
+		iovec->flags = 0;
+
+		INIT_LIST_HEAD(&tx_pkt->link);
+		INIT_WORK(&tx_pkt->work, ipa_write_done);
+		tx_pkt->type = desc[i].type;
+
+		tx_pkt->mem.base = desc[i].pyld;
+		tx_pkt->mem.size = desc[i].len;
+
+		WARN_ON(tx_pkt->mem.size > 512);
+
+		/*
+		 * Due to a HW limitation, we need to make sure that the
+		 * packet does not cross a 1KB boundary
+		 */
+		tx_pkt->bounce =
+		   dma_pool_alloc(ipa_ctx->one_kb_no_straddle_pool, GFP_KERNEL,
+				   &tx_pkt->mem.phys_base);
+		if (!tx_pkt->bounce) {
+			tx_pkt->mem.phys_base = 0;
+		} else {
+			WARN_ON(!ipa_straddle_boundary(
+						(u32)tx_pkt->mem.phys_base,
+						(u32)tx_pkt->mem.phys_base +
+						tx_pkt->mem.size - 1, 1024));
+			memcpy(tx_pkt->bounce, tx_pkt->mem.base,
+					tx_pkt->mem.size);
+		}
+
+		if (!tx_pkt->mem.phys_base) {
+			IPAERR("failed to alloc tx wrapper\n");
+			fail_dma_wrap = 1;
+			goto failure;
+		}
+
+		tx_pkt->sys = sys;
+		tx_pkt->callback = desc[i].callback;
+		tx_pkt->user1 = desc[i].user1;
+		tx_pkt->user2 = desc[i].user2;
+
+		iovec->addr = tx_pkt->mem.phys_base;
+		spin_lock_irqsave(&sys->spinlock, irq_flags);
+		list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+		sys->len++;
+		spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+		/*
+		 * Special treatment for immediate commands, where the structure
+		 * of the descriptor is different
+		 */
+		if (desc[i].type == IPA_IMM_CMD_DESC) {
+			iovec->size = desc[i].opcode;
+			iovec->flags |= SPS_IOVEC_FLAG_IMME;
+		} else {
+			iovec->size = desc[i].len;
+		}
+
+		if (i == (num_desc - 1)) {
+			iovec->flags |= (SPS_IOVEC_FLAG_EOT |
+					SPS_IOVEC_FLAG_INT);
+			/* "mark" the last desc */
+			tx_pkt->cnt = 0xFFFF;
+		}
+	}
+
+	result = sps_transfer(sys->ep->ep_hdl, &transfer);
+	if (result) {
+		IPAERR("sps_transfer failed rc=%d\n", result);
+		goto failure;
+	}
+
+	return 0;
+
+failure:
+	tx_pkt = transfer.user;
+	for (j = 0; j < i; j++) {
+		spin_lock_irqsave(&sys->spinlock, irq_flags);
+		next_pkt = list_next_entry(tx_pkt, link);
+		list_del(&tx_pkt->link);
+		spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+		dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
+				tx_pkt->mem.phys_base);
+		kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+		tx_pkt = next_pkt;
+	}
+	if (i < num_desc)
+		/* last desc failed */
+		if (fail_dma_wrap)
+			kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+	if (transfer.iovec_phys)
+		dma_free_coherent(NULL, size, transfer.iovec,
+				  transfer.iovec_phys);
+
+	return -EFAULT;
+}
+
+/**
+ * ipa_cmd_ack - callback function which will be called by SPS driver after an
+ * immediate command is complete.
+ * @user1:	pointer to the descriptor of the transfer
+ * @user2:
+ *
+ * Complete the immediate commands completion object, this will release the
+ * thread which waits on this completion object (ipa_send_cmd())
+ */
+static void ipa_cmd_ack(void *user1, void *user2)
+{
+	struct ipa_desc *desc = (struct ipa_desc *)user1;
+
+	if (!desc)
+		WARN_ON(1);
+	IPADBG("got ack for cmd=%d\n", desc->opcode);
+	complete(&desc->xfer_done);
+}
+
+/**
+ * ipa_send_cmd - send immediate commands
+ * @num_desc:	number of descriptors within the descr struct
+ * @descr:	descriptor structure
+ *
+ * Function will block till command gets ACK from IPA HW, caller needs
+ * to free any resources it allocated after function returns
+ */
+int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr)
+{
+	struct ipa_desc *desc;
+
+	if (num_desc == 1) {
+		init_completion(&descr->xfer_done);
+
+		/* client should not set these */
+		if (descr->callback || descr->user1)
+			WARN_ON(1);
+
+		descr->callback = ipa_cmd_ack;
+		descr->user1 = descr;
+		if (ipa_send_one(&ipa_ctx->sys[IPA_A5_CMD], descr)) {
+			IPAERR("fail to send immediate command\n");
+			return -EFAULT;
+		}
+		wait_for_completion(&descr->xfer_done);
+	} else {
+		desc = &descr[num_desc - 1];
+		init_completion(&desc->xfer_done);
+
+		/* client should not set these */
+		if (desc->callback || desc->user1)
+			WARN_ON(1);
+
+		desc->callback = ipa_cmd_ack;
+		desc->user1 = desc;
+		if (ipa_send(&ipa_ctx->sys[IPA_A5_CMD], num_desc, descr)) {
+			IPAERR("fail to send multiple immediate command set\n");
+			return -EFAULT;
+		}
+		wait_for_completion(&desc->xfer_done);
+	}
+
+	return 0;
+}
+
+/**
+ * ipa_tx_notify() - Callback function which will be called by the SPS driver
+ * after a Tx operation is complete. Called in an interrupt context.
+ * @notify:	SPS driver supplied notification struct
+ */
+static void ipa_tx_notify(struct sps_event_notify *notify)
+{
+	struct ipa_tx_pkt_wrapper *tx_pkt;
+
+	IPADBG("event %d notified\n", notify->event_id);
+
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		tx_pkt = notify->data.transfer.user;
+		queue_work(ipa_ctx->tx_wq, &tx_pkt->work);
+		break;
+	default:
+		IPAERR("recieved unexpected event id %d\n", notify->event_id);
+	}
+}
+
+/**
+ * ipa_handle_rx_core() - The core functionality of packet reception. This
+ * function is read from multiple code paths.
+ *
+ * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
+ * endpoint. The function runs as long as there are packets in the pipe.
+ * For each packet:
+ *  - Disconnect the packet from the system pipe linked list
+ *  - Unmap the packets skb, make it non DMAable
+ *  - Free the packet from the cache
+ *  - Prepare a proper skb
+ *  - Call the endpoints notify function, passing the skb in the parameters
+ *  - Replenish the rx cache
+ */
+void ipa_handle_rx_core(void)
+{
+	struct ipa_a5_mux_hdr *mux_hdr;
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+	struct sk_buff *rx_skb;
+	struct sps_iovec iov;
+	unsigned long irq_flags;
+	u16 pull_len;
+	u16 padding;
+	int ret;
+	struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+	struct ipa_ep_context *ep;
+
+	do {
+		ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+		if (ret) {
+			IPAERR("sps_get_iovec failed %d\n", ret);
+			break;
+		}
+
+		/* Break the loop when there are no more packets to receive */
+		if (iov.addr == 0)
+			break;
+
+		spin_lock_irqsave(&sys->spinlock, irq_flags);
+		if (list_empty(&sys->head_desc_list))
+			WARN_ON(1);
+		rx_pkt = list_first_entry(&sys->head_desc_list,
+					  struct ipa_rx_pkt_wrapper, link);
+		if (!rx_pkt)
+			WARN_ON(1);
+		rx_pkt->len = iov.size;
+		sys->len--;
+		list_del(&rx_pkt->link);
+		spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+		IPADBG("--curr_cnt=%d\n", sys->len);
+
+		rx_skb = rx_pkt->skb;
+		dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
+				 DMA_FROM_DEVICE);
+		kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+
+		/*
+		 * make it look like a real skb, "data" was already set at
+		 * alloc time
+		 */
+		rx_skb->tail = rx_skb->data + rx_pkt->len;
+		rx_skb->len = rx_pkt->len;
+		rx_skb->truesize = rx_pkt->len + sizeof(struct sk_buff);
+
+		mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data;
+
+		IPADBG("RX pkt len=%d IID=0x%x src=%d, flags=0x%x, meta=0x%x\n",
+		       rx_skb->len, ntohs(mux_hdr->interface_id),
+		       mux_hdr->src_pipe_index,
+		       mux_hdr->flags, ntohl(mux_hdr->metadata));
+
+		IPA_DUMP_BUFF(rx_skb->data, 0, rx_skb->len);
+
+		if (mux_hdr->src_pipe_index >= IPA_NUM_PIPES ||
+		    !ipa_ctx->ep[mux_hdr->src_pipe_index].valid ||
+		    !ipa_ctx->ep[mux_hdr->src_pipe_index].notify) {
+			IPAERR("drop pipe=%d ep_valid=%d notify=%p\n",
+			       mux_hdr->src_pipe_index,
+			       ipa_ctx->ep[mux_hdr->src_pipe_index].valid,
+			       ipa_ctx->ep[mux_hdr->src_pipe_index].notify);
+			dev_kfree_skb_any(rx_skb);
+			ipa_replenish_rx_cache();
+			continue;
+		}
+
+		ep = &ipa_ctx->ep[mux_hdr->src_pipe_index];
+		pull_len = sizeof(struct ipa_a5_mux_hdr);
+
+		/*
+		 * IP packet starts on word boundary
+		 * remove the MUX header and any padding and pass the frame to
+		 * the client which registered a rx callback on the "src pipe"
+		 */
+		padding = ep->cfg.hdr.hdr_len & 0x3;
+		if (padding)
+			pull_len += 4 - padding;
+
+		IPADBG("pulling %d bytes from skb\n", pull_len);
+		skb_pull(rx_skb, pull_len);
+		ep->notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
+		ipa_replenish_rx_cache();
+	} while (1);
+}
+
+/**
+ * ipa_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
+ */
+static void ipa_rx_switch_to_intr_mode(void)
+{
+	int ret;
+	struct ipa_sys_context *sys;
+
+	IPADBG("Enter");
+	if (!ipa_ctx->curr_polling_state) {
+		IPAERR("already in intr mode\n");
+		return;
+	}
+
+	sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+
+	ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
+	if (ret) {
+		IPAERR("sps_get_config() failed %d\n", ret);
+		return;
+	}
+	sys->event.options = SPS_O_EOT;
+	ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
+	if (ret) {
+		IPAERR("sps_register_event() failed %d\n", ret);
+		return;
+	}
+	sys->ep->connect.options =
+		SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+	ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
+	if (ret) {
+		IPAERR("sps_set_config() failed %d\n", ret);
+		return;
+	}
+	ipa_handle_rx_core();
+	ipa_ctx->curr_polling_state = 0;
+}
+
+/**
+ * ipa_rx_switch_to_poll_mode() - Operate the Rx data path in polling mode
+ */
+static void ipa_rx_switch_to_poll_mode(void)
+{
+	int ret;
+	struct ipa_ep_context *ep;
+
+	IPADBG("Enter");
+	ep = ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep;
+
+	ret = sps_get_config(ep->ep_hdl, &ep->connect);
+	if (ret) {
+		IPAERR("sps_get_config() failed %d\n", ret);
+		return;
+	}
+	ep->connect.options =
+		SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+	ret = sps_set_config(ep->ep_hdl, &ep->connect);
+	if (ret) {
+		IPAERR("sps_set_config() failed %d\n", ret);
+		return;
+	}
+	ipa_ctx->curr_polling_state = 1;
+}
+
+/**
+ * ipa_rx_notify() - Callback function which is called by the SPS driver when a
+ * a packet is received
+ * @notify:	SPS driver supplied notification information
+ *
+ * Called in an interrupt context, therefore the majority of the work is
+ * deffered using a work queue.
+ *
+ * After receiving a packet, the driver goes to polling mode and keeps pulling
+ * packets until the rx buffer is empty, then it goes back to interrupt mode.
+ * This comes to prevent the CPU from handling too many interrupts when the
+ * throughput is high.
+ */
+static void ipa_rx_notify(struct sps_event_notify *notify)
+{
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+
+	IPADBG("event %d notified\n", notify->event_id);
+
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		if (!ipa_ctx->curr_polling_state) {
+			ipa_rx_switch_to_poll_mode();
+			rx_pkt = notify->data.transfer.user;
+			queue_work(ipa_ctx->rx_wq, &rx_pkt->work);
+		}
+		break;
+	default:
+		IPAERR("recieved unexpected event id %d\n", notify->event_id);
+	}
+}
+
+/**
+ * ipa_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
+ * IPA EP configuration
+ * @sys_in:	[in] input needed to setup BAM pipe and config EP
+ * @clnt_hdl:	[out] client handle
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
+{
+	int ipa_ep_idx;
+	int sys_idx = -1;
+	int result = -EFAULT;
+	dma_addr_t dma_addr;
+
+	if (sys_in == NULL || clnt_hdl == NULL ||
+	    sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
+		IPAERR("bad parm.\n");
+		result = -EINVAL;
+		goto fail_bad_param;
+	}
+
+	ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, sys_in->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client.\n");
+		goto fail_bad_param;
+	}
+
+	if (ipa_ctx->ep[ipa_ep_idx].valid == 1) {
+		IPAERR("EP already allocated.\n");
+		goto fail_bad_param;
+	}
+
+	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+
+	ipa_ctx->ep[ipa_ep_idx].valid = 1;
+	ipa_ctx->ep[ipa_ep_idx].client = sys_in->client;
+
+	if (ipa_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
+		IPAERR("fail to configure EP.\n");
+		goto fail_sps_api;
+	}
+
+	/* Default Config */
+	ipa_ctx->ep[ipa_ep_idx].ep_hdl = sps_alloc_endpoint();
+
+	if (ipa_ctx->ep[ipa_ep_idx].ep_hdl == NULL) {
+		IPAERR("SPS EP allocation failed.\n");
+		goto fail_sps_api;
+	}
+
+	result = sps_get_config(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+			&ipa_ctx->ep[ipa_ep_idx].connect);
+	if (result) {
+		IPAERR("fail to get config.\n");
+		goto fail_mem_alloc;
+	}
+
+	/* Specific Config */
+	if (IPA_CLIENT_IS_CONS(sys_in->client)) {
+		ipa_ctx->ep[ipa_ep_idx].connect.mode = SPS_MODE_SRC;
+		ipa_ctx->ep[ipa_ep_idx].connect.destination =
+			SPS_DEV_HANDLE_MEM;
+		ipa_ctx->ep[ipa_ep_idx].connect.source = ipa_ctx->bam_handle;
+		ipa_ctx->ep[ipa_ep_idx].connect.dest_pipe_index =
+			ipa_ctx->a5_pipe_index++;
+		ipa_ctx->ep[ipa_ep_idx].connect.src_pipe_index = ipa_ep_idx;
+		ipa_ctx->ep[ipa_ep_idx].connect.options =
+			SPS_O_AUTO_ENABLE | SPS_O_EOT | SPS_O_ACK_TRANSFERS;
+		if (ipa_ctx->polling_mode)
+			ipa_ctx->ep[ipa_ep_idx].connect.options |= SPS_O_POLL;
+	} else {
+		ipa_ctx->ep[ipa_ep_idx].connect.mode = SPS_MODE_DEST;
+		ipa_ctx->ep[ipa_ep_idx].connect.source = SPS_DEV_HANDLE_MEM;
+		ipa_ctx->ep[ipa_ep_idx].connect.destination =
+			ipa_ctx->bam_handle;
+		ipa_ctx->ep[ipa_ep_idx].connect.src_pipe_index =
+			ipa_ctx->a5_pipe_index++;
+		ipa_ctx->ep[ipa_ep_idx].connect.dest_pipe_index = ipa_ep_idx;
+		ipa_ctx->ep[ipa_ep_idx].connect.options =
+			SPS_O_AUTO_ENABLE | SPS_O_EOT;
+		if (ipa_ctx->polling_mode)
+			ipa_ctx->ep[ipa_ep_idx].connect.options |=
+				SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+	}
+
+	ipa_ctx->ep[ipa_ep_idx].connect.desc.size = sys_in->desc_fifo_sz;
+	ipa_ctx->ep[ipa_ep_idx].connect.desc.base =
+	   dma_alloc_coherent(NULL, ipa_ctx->ep[ipa_ep_idx].connect.desc.size,
+			   &dma_addr, 0);
+	ipa_ctx->ep[ipa_ep_idx].connect.desc.phys_base = dma_addr;
+	if (ipa_ctx->ep[ipa_ep_idx].connect.desc.base == NULL) {
+		IPAERR("fail to get DMA desc memory.\n");
+		goto fail_mem_alloc;
+	}
+
+	ipa_ctx->ep[ipa_ep_idx].connect.event_thresh = IPA_EVENT_THRESHOLD;
+
+	result = sps_connect(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+			&ipa_ctx->ep[ipa_ep_idx].connect);
+	if (result) {
+		IPAERR("sps_connect fails.\n");
+		goto fail_sps_connect;
+	}
+
+	switch (ipa_ep_idx) {
+	case 1:
+		/* fall through */
+	case 2:
+		/* fall through */
+	case 3:
+		sys_idx = ipa_ep_idx;
+		break;
+	case 15:
+		sys_idx = IPA_A5_WLAN_AMPDU_OUT;
+		break;
+	default:
+		IPAERR("Invalid EP index.\n");
+		result = -EFAULT;
+		goto fail_register_event;
+	}
+
+	if (!ipa_ctx->polling_mode) {
+		if (IPA_CLIENT_IS_CONS(sys_in->client)) {
+			ipa_ctx->sys[sys_idx].event.options = SPS_O_EOT;
+			ipa_ctx->sys[sys_idx].event.mode = SPS_TRIGGER_CALLBACK;
+			ipa_ctx->sys[sys_idx].event.xfer_done = NULL;
+			ipa_ctx->sys[sys_idx].event.callback = ipa_rx_notify;
+			ipa_ctx->sys[sys_idx].event.user =
+				&ipa_ctx->sys[sys_idx];
+			result =
+			   sps_register_event(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+					      &ipa_ctx->sys[sys_idx].event);
+			if (result < 0) {
+				IPAERR("rx register event error %d\n", result);
+				goto fail_register_event;
+			}
+		} else {
+			ipa_ctx->sys[sys_idx].event.options = SPS_O_EOT;
+			ipa_ctx->sys[sys_idx].event.mode = SPS_TRIGGER_CALLBACK;
+			ipa_ctx->sys[sys_idx].event.xfer_done = NULL;
+			ipa_ctx->sys[sys_idx].event.callback = ipa_tx_notify;
+			ipa_ctx->sys[sys_idx].event.user =
+				&ipa_ctx->sys[sys_idx];
+			result =
+			   sps_register_event(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+					      &ipa_ctx->sys[sys_idx].event);
+			if (result < 0) {
+				IPAERR("tx register event error %d\n", result);
+				goto fail_register_event;
+			}
+		}
+	}
+
+	return 0;
+
+fail_register_event:
+	sps_disconnect(ipa_ctx->ep[ipa_ep_idx].ep_hdl);
+fail_sps_connect:
+	dma_free_coherent(NULL, ipa_ctx->ep[ipa_ep_idx].connect.desc.size,
+			  ipa_ctx->ep[ipa_ep_idx].connect.desc.base,
+			  ipa_ctx->ep[ipa_ep_idx].connect.desc.phys_base);
+fail_mem_alloc:
+	sps_free_endpoint(ipa_ctx->ep[ipa_ep_idx].ep_hdl);
+fail_sps_api:
+	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+fail_bad_param:
+	return result;
+}
+EXPORT_SYMBOL(ipa_setup_sys_pipe);
+
+/**
+ * ipa_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
+ * @clnt_hdl:	[in] the handle obtained from ipa_setup_sys_pipe
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_teardown_sys_pipe(u32 clnt_hdl)
+{
+	if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	sps_disconnect(ipa_ctx->ep[clnt_hdl].ep_hdl);
+	dma_free_coherent(NULL, ipa_ctx->ep[clnt_hdl].connect.desc.size,
+			  ipa_ctx->ep[clnt_hdl].connect.desc.base,
+			  ipa_ctx->ep[clnt_hdl].connect.desc.phys_base);
+	sps_free_endpoint(ipa_ctx->ep[clnt_hdl].ep_hdl);
+	memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
+	return 0;
+}
+EXPORT_SYMBOL(ipa_teardown_sys_pipe);
+
+/**
+ * ipa_tx_comp() - Callback function which will call the user supplied callback
+ * function to release the skb, or release it on its own if no callback function
+ * was supplied.
+ * @user1
+ * @user2
+ */
+static void ipa_tx_comp(void *user1, void *user2)
+{
+	struct sk_buff *skb = (struct sk_buff *)user1;
+	u32 ep_idx = (u32)user2;
+
+	IPADBG("skb=%p ep=%d\n", skb, ep_idx);
+
+	if (ipa_ctx->ep[ep_idx].notify)
+		ipa_ctx->ep[ep_idx].notify(ipa_ctx->ep[ep_idx].priv,
+				IPA_WRITE_DONE, (unsigned long)skb);
+	else
+		dev_kfree_skb_any(skb);
+}
+
+/**
+ * ipa_tx_dp() - Data-path tx handler
+ * @dst:	[in] which IPA destination to route tx packets to
+ * @skb:	[in] the packet to send
+ * @metadata:	[in] TX packet meta-data
+ *
+ * Data-path tx handler, this is used for both SW data-path which by-passes most
+ * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
+ * dst is a "valid" CONS type, then SW data-path is used. If dst is the
+ * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
+ * is an error. For errors, client needs to free the skb as needed. For success,
+ * IPA driver will later invoke client calback if one was supplied. That
+ * callback should free the skb. If no callback supplied, IPA driver will free
+ * the skb internally
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *meta)
+{
+	struct ipa_desc desc[2];
+	int ipa_ep_idx;
+	struct ipa_ip_packet_init *cmd;
+
+	memset(&desc, 0, 2 * sizeof(struct ipa_desc));
+
+	ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, dst);
+	if (ipa_ep_idx == -1) {
+		IPAERR("dest EP does not exist.\n");
+		goto fail_gen;
+	}
+
+	if (ipa_ctx->ep[ipa_ep_idx].valid == 0) {
+		IPAERR("dest EP not valid.\n");
+		goto fail_gen;
+	}
+
+	if (IPA_CLIENT_IS_CONS(dst)) {
+		cmd = kzalloc(sizeof(struct ipa_ip_packet_init), GFP_KERNEL);
+		if (!cmd) {
+			IPAERR("failed to alloc immediate command object\n");
+			goto fail_mem_alloc;
+		}
+
+		cmd->destination_pipe_index = ipa_ep_idx;
+		if (meta && meta->mbim_stream_id_valid)
+			cmd->metadata = meta->mbim_stream_id;
+		desc[0].opcode = IPA_IP_PACKET_INIT;
+		desc[0].pyld = cmd;
+		desc[0].len = sizeof(struct ipa_ip_packet_init);
+		desc[0].type = IPA_IMM_CMD_DESC;
+		desc[1].pyld = skb->data;
+		desc[1].len = skb->len;
+		desc[1].type = IPA_DATA_DESC_SKB;
+		desc[1].callback = ipa_tx_comp;
+		desc[1].user1 = skb;
+		desc[1].user2 = (void *)ipa_ep_idx;
+
+		if (ipa_send(&ipa_ctx->sys[IPA_A5_LAN_WAN_OUT], 2, desc)) {
+			IPAERR("fail to send immediate command\n");
+			goto fail_send;
+		}
+	} else if (dst == IPA_CLIENT_A5_WLAN_AMPDU_PROD) {
+		desc[0].pyld = skb->data;
+		desc[0].len = skb->len;
+		desc[0].type = IPA_DATA_DESC_SKB;
+		desc[0].callback = ipa_tx_comp;
+		desc[0].user1 = skb;
+		desc[0].user2 = (void *)ipa_ep_idx;
+
+		if (ipa_send_one(&ipa_ctx->sys[IPA_A5_WLAN_AMPDU_OUT],
+					&desc[0])) {
+			IPAERR("fail to send skb\n");
+			goto fail_gen;
+		}
+	} else {
+		IPAERR("%d PROD is not supported.\n", dst);
+		goto fail_gen;
+	}
+
+	return 0;
+
+fail_send:
+	kfree(cmd);
+fail_mem_alloc:
+fail_gen:
+	return -EFAULT;
+}
+EXPORT_SYMBOL(ipa_tx_dp);
+
+/**
+ * ipa_handle_rx() - handle packet reception. This function is executed in the
+ * context of a work queue.
+ * @work: work struct needed by the work queue
+ *
+ * ipa_handle_rx_core() is run in polling mode. After all packets has been
+ * received, the driver switches back to interrupt mode.
+ */
+void ipa_handle_rx(struct work_struct *work)
+{
+	ipa_handle_rx_core();
+	ipa_rx_switch_to_intr_mode();
+}
+
+/**
+ * ipa_replenish_rx_cache() - Replenish the Rx packets cache.
+ *
+ * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
+ * are IPA_RX_POOL_CEIL buffers in the cache.
+ *   - Allocate a buffer in the cache
+ *   - Initialized the packets link
+ *   - Initialize the packets work struct
+ *   - Allocate the packets socket buffer (skb)
+ *   - Fill the packets skb with data
+ *   - Make the packet DMAable
+ *   - Add the packet to the system pipe linked list
+ *   - Initiate a SPS transfer so that SPS driver will use this packet later.
+ */
+void ipa_replenish_rx_cache(void)
+{
+	void *ptr;
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+	int ret;
+	int rx_len_cached;
+	unsigned long irq_flags;
+	struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+
+	spin_lock_irqsave(&sys->spinlock, irq_flags);
+	rx_len_cached = sys->len;
+	spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+	/* true RX data path is not currently exercised so drop the ceil */
+	while (rx_len_cached < (IPA_RX_POOL_CEIL >> 3)) {
+		rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
+					   GFP_KERNEL);
+		if (!rx_pkt) {
+			IPAERR("failed to alloc rx wrapper\n");
+			return;
+		}
+
+		INIT_LIST_HEAD(&rx_pkt->link);
+		INIT_WORK(&rx_pkt->work, ipa_handle_rx);
+
+		rx_pkt->skb = __dev_alloc_skb(IPA_RX_SKB_SIZE, GFP_KERNEL);
+		if (rx_pkt->skb == NULL) {
+			IPAERR("failed to alloc skb\n");
+			goto fail_skb_alloc;
+		}
+		ptr = skb_put(rx_pkt->skb, IPA_RX_SKB_SIZE);
+		rx_pkt->dma_address = dma_map_single(NULL, ptr,
+						     IPA_RX_SKB_SIZE,
+						     DMA_FROM_DEVICE);
+		if (rx_pkt->dma_address == 0 || rx_pkt->dma_address == ~0) {
+			IPAERR("dma_map_single failure %p for %p\n",
+			       (void *)rx_pkt->dma_address, ptr);
+			goto fail_dma_mapping;
+		}
+
+		spin_lock_irqsave(&sys->spinlock, irq_flags);
+		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+		rx_len_cached = ++sys->len;
+		spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+
+		ret = sps_transfer_one(sys->ep->ep_hdl, rx_pkt->dma_address,
+				       IPA_RX_SKB_SIZE, rx_pkt,
+				       SPS_IOVEC_FLAG_INT);
+
+		if (ret) {
+			IPAERR("sps_transfer_one failed %d\n", ret);
+			goto fail_sps_transfer;
+		}
+
+		IPADBG("++curr_cnt=%d\n", sys->len);
+	}
+
+	return;
+
+fail_sps_transfer:
+	spin_lock_irqsave(&sys->spinlock, irq_flags);
+	list_del(&rx_pkt->link);
+	--sys->len;
+	spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+	dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
+			 DMA_FROM_DEVICE);
+fail_dma_mapping:
+	dev_kfree_skb_any(rx_pkt->skb);
+fail_skb_alloc:
+	kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+
+	return;
+}
+
+/**
+ * ipa_cleanup_rx() - release RX queue resources
+ *
+ */
+void ipa_cleanup_rx(void)
+{
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+	struct ipa_rx_pkt_wrapper *r;
+	unsigned long irq_flags;
+	struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+
+	spin_lock_irqsave(&sys->spinlock, irq_flags);
+	list_for_each_entry_safe(rx_pkt, r,
+				 &sys->head_desc_list, link) {
+		list_del(&rx_pkt->link);
+		dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
+				 DMA_FROM_DEVICE);
+		dev_kfree_skb_any(rx_pkt->skb);
+		kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+	}
+	spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_flt.c b/drivers/platform/msm/ipa/ipa_flt.c
new file mode 100644
index 0000000..81f3a80
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_flt.c
@@ -0,0 +1,811 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+#define IPA_FLT_TABLE_WORD_SIZE			(4)
+#define IPA_FLT_ENTRY_MEMORY_ALLIGNMENT		(0x3)
+#define IPA_FLT_BIT_MASK			(0x1)
+#define IPA_FLT_TABLE_INDEX_NOT_FOUND		(-1)
+#define IPA_FLT_STATUS_OF_ADD_FAILED		(-1)
+#define IPA_FLT_STATUS_OF_DEL_FAILED		(-1)
+
+/**
+ * ipa_generate_flt_hw_rule() - generates the filtering hardware rule
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ *		caller wants to know the size of the rule as seen
+ *		by HW so they did not pass a valid buffer, we will use a
+ *		scratch buffer instead.
+ *		With this scheme we are going to
+ *		generate the rule twice, once to know size using scratch
+ *		buffer and second to write the rule to the actual caller
+ *		supplied buffer which is of required size
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_generate_flt_hw_rule(enum ipa_ip_type ip,
+		struct ipa_flt_entry *entry, u8 *buf)
+{
+	struct ipa_flt_rule_hw_hdr *hdr;
+	const struct ipa_flt_rule *rule =
+		(const struct ipa_flt_rule *)&entry->rule;
+	u16 en_rule = 0;
+	u8 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE];
+	u8 *start;
+
+	memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
+	if (buf == NULL)
+		buf = tmp;
+
+	start = buf;
+	hdr = (struct ipa_flt_rule_hw_hdr *)buf;
+	hdr->u.hdr.action = entry->rule.action;
+	hdr->u.hdr.rt_tbl_idx = entry->rt_tbl->idx;
+	hdr->u.hdr.rsvd = 0;
+	buf += sizeof(struct ipa_flt_rule_hw_hdr);
+
+	if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
+		IPAERR("fail to generate hw rule\n");
+		return -EPERM;
+	}
+
+	IPADBG("en_rule %x\n", en_rule);
+
+	hdr->u.hdr.en_rule = en_rule;
+	ipa_write_32(hdr->u.word, (u8 *)hdr);
+
+	if (entry->hw_len == 0) {
+		entry->hw_len = buf - start;
+	} else if (entry->hw_len != (buf - start)) {
+		IPAERR("hw_len differs b/w passes passed=%x calc=%x\n",
+		       entry->hw_len, (buf - start));
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa_get_flt_hw_tbl_size() - returns the size of HW filtering table
+ * @ip: the ip address family type
+ * @hdr_sz: header size
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_get_flt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz)
+{
+	struct ipa_flt_tbl *tbl;
+	struct ipa_flt_entry *entry;
+	u32 total_sz = 0;
+	u32 rule_set_sz;
+	int i;
+
+	*hdr_sz = 0;
+	tbl = &ipa_ctx->glob_flt_tbl[ip];
+	rule_set_sz = 0;
+	list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+		if (ipa_generate_flt_hw_rule(ip, entry, NULL)) {
+			IPAERR("failed to find HW FLT rule size\n");
+			return -EPERM;
+		}
+		IPADBG("glob ip %d len %d\n", ip, entry->hw_len);
+		rule_set_sz += entry->hw_len;
+	}
+
+	if (rule_set_sz) {
+		tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE;
+		/* this rule-set uses a word in header block */
+		*hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+		if (!tbl->in_sys) {
+			/* add the terminator */
+			total_sz += (rule_set_sz + IPA_FLT_TABLE_WORD_SIZE);
+			total_sz = (total_sz +
+					IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) &
+					~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+		}
+	}
+
+	for (i = 0; i < IPA_NUM_PIPES; i++) {
+		tbl = &ipa_ctx->flt_tbl[i][ip];
+		rule_set_sz = 0;
+		list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+			if (ipa_generate_flt_hw_rule(ip, entry, NULL)) {
+				IPAERR("failed to find HW FLT rule size\n");
+				return -EPERM;
+			}
+			IPADBG("pipe %d len %d\n", i, entry->hw_len);
+			rule_set_sz += entry->hw_len;
+		}
+
+		if (rule_set_sz) {
+			tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE;
+			/* this rule-set uses a word in header block */
+			*hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+			if (!tbl->in_sys) {
+				/* add the terminator */
+				total_sz += (rule_set_sz +
+					    IPA_FLT_TABLE_WORD_SIZE);
+				total_sz = (total_sz +
+					IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) &
+					~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+			}
+		}
+	}
+
+	*hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+	total_sz += *hdr_sz;
+	IPADBG("FLT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
+
+	return total_sz;
+}
+
+/**
+ * ipa_generate_flt_hw_tbl() - generates the filtering hardware table
+ * @ip:	[in] the ip address family type
+ * @mem:	[out] buffer to put the filtering table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_generate_flt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem)
+{
+	struct ipa_flt_tbl *tbl;
+	struct ipa_flt_entry *entry;
+	u32 hdr_top = 0;
+	int i;
+	u32 hdr_sz;
+	u32 offset;
+	u8 *hdr;
+	u8 *body;
+	u8 *base;
+	struct ipa_mem_buffer flt_tbl_mem;
+	u8 *ftbl_membody;
+
+	mem->size = ipa_get_flt_hw_tbl_size(ip, &hdr_sz);
+	mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size);
+
+	if (mem->size == 0) {
+		IPAERR("flt tbl empty ip=%d\n", ip);
+		goto error;
+	}
+	mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
+			GFP_KERNEL);
+	if (!mem->base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+		goto error;
+	}
+
+	memset(mem->base, 0, mem->size);
+
+	/* build the flt tbl in the DMA buffer to submit to IPA HW */
+	base = hdr = (u8 *)mem->base;
+	body = base + hdr_sz;
+
+	/* write a dummy header to move cursor */
+	hdr = ipa_write_32(hdr_top, hdr);
+
+	tbl = &ipa_ctx->glob_flt_tbl[ip];
+
+	if (!list_empty(&tbl->head_flt_rule_list)) {
+		hdr_top |= IPA_FLT_BIT_MASK;
+		if (!tbl->in_sys) {
+			offset = body - base;
+			if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) {
+				IPAERR("offset is not word multiple %d\n",
+						offset);
+				goto proc_err;
+			}
+
+			offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+			/* rule is at an offset from base */
+			offset |= IPA_FLT_BIT_MASK;
+			hdr = ipa_write_32(offset, hdr);
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_flt_rule_list,
+					link) {
+				if (ipa_generate_flt_hw_rule(ip, entry, body)) {
+					IPAERR("failed to gen HW FLT rule\n");
+					goto proc_err;
+				}
+				body += entry->hw_len;
+			}
+
+			/* write the rule-set terminator */
+			body = ipa_write_32(0, body);
+			if ((u32)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)
+				/* advance body to next word boundary */
+				body = body + (IPA_FLT_TABLE_WORD_SIZE -
+					((u32)body &
+					IPA_FLT_ENTRY_MEMORY_ALLIGNMENT));
+		} else {
+			WARN_ON(tbl->sz == 0);
+			/* allocate memory for the flt tbl */
+			flt_tbl_mem.size = tbl->sz;
+			flt_tbl_mem.base =
+			   dma_alloc_coherent(NULL, flt_tbl_mem.size,
+					   &flt_tbl_mem.phys_base, GFP_KERNEL);
+			if (!flt_tbl_mem.base) {
+				IPAERR("fail to alloc DMA buff of size %d\n",
+						flt_tbl_mem.size);
+				WARN_ON(1);
+				goto proc_err;
+			}
+
+			WARN_ON(flt_tbl_mem.phys_base &
+				IPA_FLT_ENTRY_MEMORY_ALLIGNMENT);
+			ftbl_membody = flt_tbl_mem.base;
+			memset(flt_tbl_mem.base, 0, flt_tbl_mem.size);
+			hdr = ipa_write_32(flt_tbl_mem.phys_base, hdr);
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_flt_rule_list,
+					link) {
+				if (ipa_generate_flt_hw_rule(ip, entry,
+							ftbl_membody)) {
+					IPAERR("failed to gen HW FLT rule\n");
+					WARN_ON(1);
+				}
+				ftbl_membody += entry->hw_len;
+			}
+
+			/* write the rule-set terminator */
+			ftbl_membody = ipa_write_32(0, ftbl_membody);
+			if (tbl->curr_mem.phys_base) {
+				WARN_ON(tbl->prev_mem.phys_base);
+				tbl->prev_mem = tbl->curr_mem;
+			}
+			tbl->curr_mem = flt_tbl_mem;
+		}
+	}
+
+	for (i = 0; i < IPA_NUM_PIPES; i++) {
+		tbl = &ipa_ctx->flt_tbl[i][ip];
+		if (!list_empty(&tbl->head_flt_rule_list)) {
+			/* pipe "i" is at bit "i+1" */
+			hdr_top |= (1 << (i + 1));
+			if (!tbl->in_sys) {
+				offset = body - base;
+				if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) {
+					IPAERR("ofst is not word multiple %d\n",
+					       offset);
+					goto proc_err;
+				}
+				offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+				/* rule is at an offset from base */
+				offset |= IPA_FLT_BIT_MASK;
+				hdr = ipa_write_32(offset, hdr);
+
+				/* generate the rule-set */
+				list_for_each_entry(entry,
+						&tbl->head_flt_rule_list,
+						link) {
+					if (ipa_generate_flt_hw_rule(ip, entry,
+								body)) {
+						IPAERR("fail gen FLT rule\n");
+						goto proc_err;
+					}
+					body += entry->hw_len;
+				}
+
+				/* write the rule-set terminator */
+				body = ipa_write_32(0, body);
+				if ((u32)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)
+					/* advance body to next word boundary */
+					body = body + (IPA_FLT_TABLE_WORD_SIZE -
+						((u32)body &
+					IPA_FLT_ENTRY_MEMORY_ALLIGNMENT));
+			} else {
+				WARN_ON(tbl->sz == 0);
+				/* allocate memory for the flt tbl */
+				flt_tbl_mem.size = tbl->sz;
+				flt_tbl_mem.base =
+				   dma_alloc_coherent(NULL, flt_tbl_mem.size,
+						   &flt_tbl_mem.phys_base,
+						   GFP_KERNEL);
+				if (!flt_tbl_mem.base) {
+					IPAERR("fail alloc DMA buff size %d\n",
+							flt_tbl_mem.size);
+					WARN_ON(1);
+					goto proc_err;
+				}
+
+				WARN_ON(flt_tbl_mem.phys_base &
+				IPA_FLT_ENTRY_MEMORY_ALLIGNMENT);
+
+				ftbl_membody = flt_tbl_mem.base;
+				memset(flt_tbl_mem.base, 0, flt_tbl_mem.size);
+				hdr = ipa_write_32(flt_tbl_mem.phys_base, hdr);
+
+				/* generate the rule-set */
+				list_for_each_entry(entry,
+						&tbl->head_flt_rule_list,
+						link) {
+					if (ipa_generate_flt_hw_rule(ip, entry,
+							ftbl_membody)) {
+						IPAERR("fail gen FLT rule\n");
+						WARN_ON(1);
+					}
+					ftbl_membody += entry->hw_len;
+				}
+
+				/* write the rule-set terminator */
+				ftbl_membody =
+					ipa_write_32(0, ftbl_membody);
+				if (tbl->curr_mem.phys_base) {
+					WARN_ON(tbl->prev_mem.phys_base);
+					tbl->prev_mem = tbl->curr_mem;
+				}
+				tbl->curr_mem = flt_tbl_mem;
+			}
+		}
+	}
+
+	/* now write the hdr_top */
+	ipa_write_32(hdr_top, base);
+
+	return 0;
+proc_err:
+	dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+error:
+
+	return -EPERM;
+}
+
+static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip)
+{
+	struct ipa_flt_tbl *tbl;
+	int i;
+
+	tbl = &ipa_ctx->glob_flt_tbl[ip];
+	if (tbl->prev_mem.phys_base) {
+		IPADBG("reaping glob flt tbl (prev) ip=%d\n", ip);
+		dma_free_coherent(NULL, tbl->prev_mem.size, tbl->prev_mem.base,
+				tbl->prev_mem.phys_base);
+		memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+	}
+
+	if (list_empty(&tbl->head_flt_rule_list)) {
+		if (tbl->curr_mem.phys_base) {
+			IPADBG("reaping glob flt tbl (curr) ip=%d\n", ip);
+			dma_free_coherent(NULL, tbl->curr_mem.size,
+					tbl->curr_mem.base,
+					tbl->curr_mem.phys_base);
+			memset(&tbl->curr_mem, 0, sizeof(tbl->curr_mem));
+		}
+	}
+
+	for (i = 0; i < IPA_NUM_PIPES; i++) {
+		tbl = &ipa_ctx->flt_tbl[i][ip];
+		if (tbl->prev_mem.phys_base) {
+			IPADBG("reaping flt tbl (prev) pipe=%d ip=%d\n", i, ip);
+			dma_free_coherent(NULL, tbl->prev_mem.size,
+					tbl->prev_mem.base,
+					tbl->prev_mem.phys_base);
+			memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+		}
+
+		if (list_empty(&tbl->head_flt_rule_list)) {
+			if (tbl->curr_mem.phys_base) {
+				IPADBG("reaping flt tbl (curr) pipe=%d ip=%d\n",
+						i, ip);
+				dma_free_coherent(NULL, tbl->curr_mem.size,
+						tbl->curr_mem.base,
+						tbl->curr_mem.phys_base);
+				memset(&tbl->curr_mem, 0,
+						sizeof(tbl->curr_mem));
+			}
+		}
+	}
+}
+
+static int __ipa_commit_flt(enum ipa_ip_type ip)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_mem_buffer *mem;
+	void *cmd;
+	struct ipa_ip_v4_filter_init *v4;
+	struct ipa_ip_v6_filter_init *v6;
+	u16 avail;
+	u16 size;
+
+	mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+	if (!mem) {
+		IPAERR("failed to alloc memory object\n");
+		goto fail_alloc_mem;
+	}
+
+	if (ip == IPA_IP_v4) {
+		avail = IPA_RAM_V4_FLT_SIZE;
+		size = sizeof(struct ipa_ip_v4_filter_init);
+	} else {
+		avail = IPA_RAM_V6_FLT_SIZE;
+		size = sizeof(struct ipa_ip_v6_filter_init);
+	}
+	cmd = kmalloc(size, GFP_KERNEL);
+	if (!cmd) {
+		IPAERR("failed to alloc immediate command object\n");
+		goto fail_alloc_cmd;
+	}
+
+	if (ipa_generate_flt_hw_tbl(ip, mem)) {
+		IPAERR("fail to generate FLT HW TBL ip %d\n", ip);
+		goto fail_hw_tbl_gen;
+	}
+
+	if (mem->size > avail) {
+		IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
+		goto fail_hw_tbl_gen;
+	}
+
+	if (ip == IPA_IP_v4) {
+		v4 = (struct ipa_ip_v4_filter_init *)cmd;
+		desc.opcode = IPA_IP_V4_FILTER_INIT;
+		v4->ipv4_rules_addr = mem->phys_base;
+		v4->size_ipv4_rules = mem->size;
+		v4->ipv4_addr = IPA_RAM_V4_FLT_OFST;
+	} else {
+		v6 = (struct ipa_ip_v6_filter_init *)cmd;
+		desc.opcode = IPA_IP_V6_FILTER_INIT;
+		v6->ipv6_rules_addr = mem->phys_base;
+		v6->size_ipv6_rules = mem->size;
+		v6->ipv6_addr = IPA_RAM_V6_FLT_OFST;
+	}
+
+	desc.pyld = cmd;
+	desc.len = size;
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		goto fail_send_cmd;
+	}
+
+	__ipa_reap_sys_flt_tbls(ip);
+	dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+	kfree(cmd);
+	kfree(mem);
+
+	return 0;
+
+fail_send_cmd:
+	if (mem->phys_base)
+		dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+fail_hw_tbl_gen:
+	kfree(cmd);
+fail_alloc_cmd:
+	kfree(mem);
+fail_alloc_mem:
+
+	return -EPERM;
+}
+
+static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip,
+			      const struct ipa_flt_rule *rule, u8 add_rear,
+			      u32 *rule_hdl)
+{
+	struct ipa_flt_entry *entry;
+	struct ipa_tree_node *node;
+
+	if (!rule->rt_tbl_hdl) {
+		IPAERR("flt rule does not point to valid RT tbl\n");
+		goto error;
+	}
+
+	if (ipa_search(&ipa_ctx->rt_tbl_hdl_tree, rule->rt_tbl_hdl) == NULL) {
+		IPAERR("RT tbl not found\n");
+		goto error;
+	}
+
+	if (((struct ipa_rt_tbl *)rule->rt_tbl_hdl)->cookie != IPA_COOKIE) {
+		IPAERR("flt rule cookie is invalid\n");
+		goto error;
+	}
+
+	node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+	if (!node) {
+		IPAERR("failed to alloc tree node object\n");
+		goto error;
+	}
+
+	entry = kmem_cache_zalloc(ipa_ctx->flt_rule_cache, GFP_KERNEL);
+	if (!entry) {
+		IPAERR("failed to alloc FLT rule object\n");
+		goto mem_alloc_fail;
+	}
+	INIT_LIST_HEAD(&entry->link);
+	entry->rule = *rule;
+	entry->cookie = IPA_COOKIE;
+	entry->rt_tbl = (struct ipa_rt_tbl *)rule->rt_tbl_hdl;
+	entry->tbl = tbl;
+	if (add_rear)
+		list_add_tail(&entry->link, &tbl->head_flt_rule_list);
+	else
+		list_add(&entry->link, &tbl->head_flt_rule_list);
+	tbl->rule_cnt++;
+	entry->rt_tbl->ref_cnt++;
+	*rule_hdl = (u32)entry;
+	IPADBG("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
+
+	node->hdl = *rule_hdl;
+	if (ipa_insert(&ipa_ctx->flt_rule_hdl_tree, node)) {
+		IPAERR("failed to add to tree\n");
+		WARN_ON(1);
+	}
+
+	return 0;
+
+mem_alloc_fail:
+	kmem_cache_free(ipa_ctx->tree_node_cache, node);
+error:
+
+	return -EPERM;
+}
+
+static int __ipa_del_flt_rule(u32 rule_hdl)
+{
+	struct ipa_flt_entry *entry = (struct ipa_flt_entry *)rule_hdl;
+	struct ipa_tree_node *node;
+
+	if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+		IPAERR("bad params\n");
+
+		return -EINVAL;
+	}
+	node = ipa_search(&ipa_ctx->flt_rule_hdl_tree, rule_hdl);
+	if (node == NULL) {
+		IPAERR("lookup failed\n");
+
+		return -EPERM;
+	}
+	list_del(&entry->link);
+	entry->tbl->rule_cnt--;
+	entry->rt_tbl->ref_cnt--;
+	IPADBG("del flt rule rule_cnt=%d\n", entry->tbl->rule_cnt);
+	entry->cookie = 0;
+	kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+	/* remove the handle from the database */
+	rb_erase(&node->node, &ipa_ctx->flt_rule_hdl_tree);
+	kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+	return 0;
+}
+
+static int __ipa_add_global_flt_rule(enum ipa_ip_type ip,
+		const struct ipa_flt_rule *rule, u8 add_rear, u32 *rule_hdl)
+{
+	struct ipa_flt_tbl *tbl;
+
+	tbl = &ipa_ctx->glob_flt_tbl[ip];
+	IPADBG("add global flt rule ip=%d\n", ip);
+
+	return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+}
+
+static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
+				 const struct ipa_flt_rule *rule, u8 add_rear,
+				 u32 *rule_hdl)
+{
+	struct ipa_flt_tbl *tbl;
+	int ipa_ep_idx;
+
+	if (ip >= IPA_IP_MAX || rule == NULL || rule_hdl == NULL ||
+			ep >= IPA_CLIENT_MAX) {
+		IPAERR("bad parms\n");
+
+		return -EINVAL;
+	}
+	ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, ep);
+	if (ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND ||
+				ipa_ctx->ep[ipa_ep_idx].valid == 0) {
+		IPAERR("bad parms\n");
+
+		return -EINVAL;
+	}
+	tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][ip];
+	IPADBG("add ep flt rule ip=%d ep=%d\n", ip, ep);
+
+	return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+}
+
+/**
+ * ipa_add_flt_rule() - Add the specified filtering rules to SW and optionally
+ * commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+	int i;
+	int result;
+
+	if (rules == NULL || rules->num_rules == 0 ||
+			rules->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		if (rules->global)
+			result = __ipa_add_global_flt_rule(rules->ip,
+					&rules->rules[i].rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].flt_rule_hdl);
+		else
+			result = __ipa_add_ep_flt_rule(rules->ip, rules->ep,
+					&rules->rules[i].rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].flt_rule_hdl);
+		if (result) {
+			IPAERR("failed to add flt rule %d\n", i);
+			rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (__ipa_commit_flt(rules->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_add_flt_rule);
+
+/**
+ * ipa_del_flt_rule() - Remove the specified filtering rules from SW and
+ * optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
+			IPAERR("failed to del rt rule %i\n", i);
+			hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (__ipa_commit_flt(hdls->ip)) {
+			mutex_unlock(&ipa_ctx->lock);
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_del_flt_rule);
+
+/**
+ * ipa_commit_flt() - Commit the current SW filtering table of specified type to
+ * IPA HW
+ * @ip:	[in] the family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_commit_flt(enum ipa_ip_type ip)
+{
+	int result;
+
+	mutex_lock(&ipa_ctx->lock);
+
+	if (__ipa_commit_flt(ip)) {
+		result = -EPERM;
+		goto bail;
+	}
+	result = 0;
+
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_commit_flt);
+
+/**
+ * ipa_reset_flt() - Reset the current SW filtering table of specified type
+ * (does not commit to HW)
+ * @ip:	[in] the family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_reset_flt(enum ipa_ip_type ip)
+{
+	struct ipa_flt_tbl *tbl;
+	struct ipa_flt_entry *entry;
+	struct ipa_flt_entry *next;
+	struct ipa_tree_node *node;
+	int i;
+
+	tbl = &ipa_ctx->glob_flt_tbl[ip];
+	mutex_lock(&ipa_ctx->lock);
+	IPADBG("reset flt ip=%d\n", ip);
+	list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list, link) {
+		node = ipa_search(&ipa_ctx->flt_rule_hdl_tree, (u32)entry);
+		if (node == NULL)
+			WARN_ON(1);
+		list_del(&entry->link);
+		entry->tbl->rule_cnt--;
+		entry->rt_tbl->ref_cnt--;
+		entry->cookie = 0;
+		kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+		/* remove the handle from the database */
+		rb_erase(&node->node, &ipa_ctx->flt_rule_hdl_tree);
+		kmem_cache_free(ipa_ctx->tree_node_cache, node);
+	}
+
+	for (i = 0; i < IPA_NUM_PIPES; i++) {
+		tbl = &ipa_ctx->flt_tbl[i][ip];
+		list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
+				link) {
+			node = ipa_search(&ipa_ctx->flt_rule_hdl_tree,
+					(u32)entry);
+			if (node == NULL)
+				WARN_ON(1);
+			list_del(&entry->link);
+			entry->tbl->rule_cnt--;
+			entry->rt_tbl->ref_cnt--;
+			entry->cookie = 0;
+			kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+			/* remove the handle from the database */
+			rb_erase(&node->node, &ipa_ctx->flt_rule_hdl_tree);
+			kmem_cache_free(ipa_ctx->tree_node_cache, node);
+		}
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_reset_flt);
diff --git a/drivers/platform/msm/ipa/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_hdr.c
new file mode 100644
index 0000000..4b9a500
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_hdr.c
@@ -0,0 +1,614 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 32, 64 };
+
+/**
+ * ipa_generate_hdr_hw_tbl() - generates the headers table
+ * @mem:	[out] buffer to put the header table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
+{
+	struct ipa_hdr_entry *entry;
+
+	mem->size = ipa_ctx->hdr_tbl.end;
+
+	if (mem->size == 0) {
+		IPAERR("hdr tbl empty\n");
+		return -EPERM;
+	}
+	IPADBG("tbl_sz=%d\n", ipa_ctx->hdr_tbl.end);
+
+	mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
+			GFP_KERNEL);
+	if (!mem->base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+		return -ENOMEM;
+	}
+
+	memset(mem->base, 0, mem->size);
+	list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		IPADBG("hdr of len %d ofst=%d\n", entry->hdr_len,
+				entry->offset_entry->offset);
+		memcpy(mem->base + entry->offset_entry->offset, entry->hdr,
+				entry->hdr_len);
+	}
+
+	return 0;
+}
+
+/*
+ * __ipa_commit_hdr() commits hdr to hardware
+ * This function needs to be called with a locked mutex.
+ */
+static int __ipa_commit_hdr(void)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_mem_buffer *mem;
+	struct ipa_hdr_init_local *cmd;
+	u16 len;
+
+	mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+	if (!mem) {
+		IPAERR("failed to alloc memory object\n");
+		goto fail_alloc_mem;
+	}
+
+	/* the immediate command param size is same for both local and system */
+	len = sizeof(struct ipa_hdr_init_local);
+
+	/*
+	 * we can use init_local ptr for init_system due to layout of the
+	 * struct
+	 */
+	cmd = kmalloc(len, GFP_KERNEL);
+	if (!cmd) {
+		IPAERR("failed to alloc immediate command object\n");
+		goto fail_alloc_cmd;
+	}
+
+	if (ipa_generate_hdr_hw_tbl(mem)) {
+		IPAERR("fail to generate HDR HW TBL\n");
+		goto fail_hw_tbl_gen;
+	}
+
+	if (ipa_ctx->hdr_tbl_lcl && mem->size > IPA_RAM_HDR_SIZE) {
+		IPAERR("tbl too big, needed %d avail %d\n", mem->size,
+				IPA_RAM_HDR_SIZE);
+		goto fail_hw_tbl_gen;
+	}
+
+	cmd->hdr_table_addr = mem->phys_base;
+	if (ipa_ctx->hdr_tbl_lcl) {
+		cmd->size_hdr_table = mem->size;
+		cmd->hdr_addr = IPA_RAM_HDR_OFST;
+		desc.opcode = IPA_HDR_INIT_LOCAL;
+	} else {
+		desc.opcode = IPA_HDR_INIT_SYSTEM;
+	}
+	desc.pyld = cmd;
+	desc.len = sizeof(struct ipa_hdr_init_local);
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		goto fail_send_cmd;
+	}
+
+	if (ipa_ctx->hdr_tbl_lcl) {
+		dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+	} else {
+		if (ipa_ctx->hdr_mem.phys_base) {
+			dma_free_coherent(NULL, ipa_ctx->hdr_mem.size,
+					  ipa_ctx->hdr_mem.base,
+					  ipa_ctx->hdr_mem.phys_base);
+		}
+		ipa_ctx->hdr_mem = *mem;
+	}
+	kfree(cmd);
+	kfree(mem);
+
+	return 0;
+
+fail_send_cmd:
+	if (mem->phys_base)
+		dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+fail_hw_tbl_gen:
+	kfree(cmd);
+fail_alloc_cmd:
+	kfree(mem);
+fail_alloc_mem:
+
+	return -EPERM;
+}
+
+static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
+{
+	struct ipa_hdr_entry *entry;
+	struct ipa_hdr_offset_entry *offset;
+	struct ipa_tree_node *node;
+	u32 bin;
+	struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
+
+	if (hdr->hdr_len == 0) {
+		IPAERR("bad parm\n");
+		goto error;
+	}
+
+	node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+	if (!node) {
+		IPAERR("failed to alloc tree node object\n");
+		goto error;
+	}
+
+	entry = kmem_cache_zalloc(ipa_ctx->hdr_cache, GFP_KERNEL);
+	if (!entry) {
+		IPAERR("failed to alloc hdr object\n");
+		goto hdr_alloc_fail;
+	}
+
+	INIT_LIST_HEAD(&entry->link);
+
+	memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
+	entry->hdr_len = hdr->hdr_len;
+	strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
+	entry->is_partial = hdr->is_partial;
+	entry->cookie = IPA_COOKIE;
+
+	if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
+		bin = IPA_HDR_BIN0;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
+		bin = IPA_HDR_BIN1;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
+		bin = IPA_HDR_BIN2;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
+		bin = IPA_HDR_BIN3;
+	else {
+		IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+		goto bad_hdr_len;
+	}
+
+	if (list_empty(&htbl->head_free_offset_list[bin])) {
+		offset = kmem_cache_zalloc(ipa_ctx->hdr_offset_cache,
+					   GFP_KERNEL);
+		if (!offset) {
+			IPAERR("failed to alloc hdr offset object\n");
+			goto ofst_alloc_fail;
+		}
+		INIT_LIST_HEAD(&offset->link);
+		/*
+		 * for a first item grow, set the bin and offset which are set
+		 * in stone
+		 */
+		offset->offset = htbl->end;
+		offset->bin = bin;
+		htbl->end += ipa_hdr_bin_sz[bin];
+		list_add(&offset->link,
+				&htbl->head_offset_list[bin]);
+	} else {
+		/* get the first free slot */
+		offset =
+		    list_first_entry(&htbl->head_free_offset_list[bin],
+				     struct ipa_hdr_offset_entry, link);
+		list_move(&offset->link, &htbl->head_offset_list[bin]);
+	}
+
+	entry->offset_entry = offset;
+	list_add(&entry->link, &htbl->head_hdr_entry_list);
+	htbl->hdr_cnt++;
+	IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n", hdr->hdr_len,
+			htbl->hdr_cnt, offset->offset);
+
+	hdr->hdr_hdl = (u32) entry;
+	node->hdl = hdr->hdr_hdl;
+	if (ipa_insert(&ipa_ctx->hdr_hdl_tree, node)) {
+		IPAERR("failed to add to tree\n");
+		WARN_ON(1);
+	}
+
+	return 0;
+
+ofst_alloc_fail:
+	kmem_cache_free(ipa_ctx->hdr_offset_cache, offset);
+bad_hdr_len:
+	entry->cookie = 0;
+	kmem_cache_free(ipa_ctx->hdr_cache, entry);
+hdr_alloc_fail:
+	kmem_cache_free(ipa_ctx->tree_node_cache, node);
+error:
+	return -EPERM;
+}
+
+static int __ipa_del_hdr(u32 hdr_hdl)
+{
+	struct ipa_hdr_entry *entry = (struct ipa_hdr_entry *)hdr_hdl;
+	struct ipa_tree_node *node;
+	struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
+
+	if (!entry || (entry->cookie != IPA_COOKIE) || (entry->ref_cnt != 0)) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+	node = ipa_search(&ipa_ctx->hdr_hdl_tree, hdr_hdl);
+	if (node == NULL) {
+		IPAERR("lookup failed\n");
+		return -EPERM;
+	}
+
+	IPADBG("del hdr of sz=%d hdr_cnt=%d ofst=%d\n", entry->hdr_len,
+			htbl->hdr_cnt, entry->offset_entry->offset);
+
+	/* move the offset entry to appropriate free list */
+	list_move(&entry->offset_entry->link,
+		  &htbl->head_free_offset_list[entry->offset_entry->bin]);
+	list_del(&entry->link);
+	htbl->hdr_cnt--;
+	entry->cookie = 0;
+	kmem_cache_free(ipa_ctx->hdr_cache, entry);
+
+	/* remove the handle from the database */
+	rb_erase(&node->node, &ipa_ctx->hdr_hdl_tree);
+	kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+	return 0;
+}
+
+/**
+ * ipa_add_hdr() - add the specified headers to SW and optionally commit them to
+ * IPA HW
+ * @hdrs:	[inout] set of headers to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+	int i;
+	int result = -EFAULT;
+
+	if (hdrs == NULL || hdrs->num_hdrs == 0) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < hdrs->num_hdrs; i++) {
+		if (__ipa_add_hdr(&hdrs->hdr[i])) {
+			IPAERR("failed to add hdr %d\n", i);
+			hdrs->hdr[i].status = -1;
+		} else {
+			hdrs->hdr[i].status = 0;
+		}
+	}
+
+	if (hdrs->commit) {
+		if (__ipa_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+EXPORT_SYMBOL(ipa_add_hdr);
+
+/**
+ * ipa_del_hdr() - Remove the specified headers from SW and optionally commit them
+ * to IPA HW
+ * @hdls:	[inout] set of headers to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+	int i;
+	int result = -EFAULT;
+
+	if (hdls == NULL || hdls->num_hdls == 0) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa_del_hdr(hdls->hdl[i].hdl)) {
+			IPAERR("failed to del hdr %i\n", i);
+			hdls->hdl[i].status = -1;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit) {
+		if (__ipa_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+EXPORT_SYMBOL(ipa_del_hdr);
+
+/**
+ * ipa_dump_hdr() - prints all the headers in the header table in SW
+ *
+ * Note:	Should not be called from atomic context
+ */
+void ipa_dump_hdr(void)
+{
+	struct ipa_hdr_entry *entry;
+
+	IPADBG("START\n");
+	mutex_lock(&ipa_ctx->lock);
+	list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		IPADBG("hdr_len=%4d off=%4d bin=%4d\n", entry->hdr_len,
+				entry->offset_entry->offset,
+				entry->offset_entry->bin);
+	}
+	mutex_unlock(&ipa_ctx->lock);
+	IPADBG("END\n");
+}
+
+/**
+ * ipa_commit_hdr() - commit to IPA HW the current header table in SW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_commit_hdr(void)
+{
+	int result = -EFAULT;
+
+	/*
+	 * issue a commit on the routing module since routing rules point to
+	 * header table entries
+	 */
+	if (ipa_commit_rt(IPA_IP_v4))
+		return -EPERM;
+	if (ipa_commit_rt(IPA_IP_v6))
+		return -EPERM;
+
+	mutex_lock(&ipa_ctx->lock);
+	if (__ipa_commit_hdr()) {
+		result = -EPERM;
+		goto bail;
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+EXPORT_SYMBOL(ipa_commit_hdr);
+
+/**
+ * ipa_reset_hdr() - reset the current header table in SW (does not commit to
+ * HW)
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_reset_hdr(void)
+{
+	struct ipa_hdr_entry *entry;
+	struct ipa_hdr_entry *next;
+	struct ipa_hdr_offset_entry *off_entry;
+	struct ipa_hdr_offset_entry *off_next;
+	struct ipa_tree_node *node;
+	int i;
+
+	/*
+	 * issue a reset on the routing module since routing rules point to
+	 * header table entries
+	 */
+	if (ipa_reset_rt(IPA_IP_v4))
+		IPAERR("fail to reset v4 rt\n");
+	if (ipa_reset_rt(IPA_IP_v6))
+		IPAERR("fail to reset v4 rt\n");
+
+	mutex_lock(&ipa_ctx->lock);
+	IPADBG("reset hdr\n");
+	list_for_each_entry_safe(entry, next,
+			&ipa_ctx->hdr_tbl.head_hdr_entry_list, link) {
+
+		/* do not remove the default exception header */
+		if (!strncmp(entry->name, IPA_DFLT_HDR_NAME,
+					IPA_RESOURCE_NAME_MAX))
+			continue;
+
+		node = ipa_search(&ipa_ctx->hdr_hdl_tree, (u32) entry);
+		if (node == NULL)
+			WARN_ON(1);
+		list_del(&entry->link);
+		entry->cookie = 0;
+		kmem_cache_free(ipa_ctx->hdr_cache, entry);
+
+		/* remove the handle from the database */
+		rb_erase(&node->node, &ipa_ctx->hdr_hdl_tree);
+		kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+	}
+	for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+		list_for_each_entry_safe(off_entry, off_next,
+					 &ipa_ctx->hdr_tbl.head_offset_list[i],
+					 link) {
+
+			/*
+			 * do not remove the default exception header which is
+			 * at offset 0
+			 */
+			if (off_entry->offset == 0)
+				continue;
+
+			list_del(&off_entry->link);
+			kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry);
+		}
+		list_for_each_entry_safe(off_entry, off_next,
+				&ipa_ctx->hdr_tbl.head_free_offset_list[i],
+				link) {
+			list_del(&off_entry->link);
+			kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry);
+		}
+	}
+	/* there is one header of size 8 */
+	ipa_ctx->hdr_tbl.end = 8;
+	ipa_ctx->hdr_tbl.hdr_cnt = 1;
+	mutex_unlock(&ipa_ctx->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_reset_hdr);
+
+static struct ipa_hdr_entry *__ipa_find_hdr(const char *name)
+{
+	struct ipa_hdr_entry *entry;
+
+	list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		if (!strncmp(name, entry->name, IPA_RESOURCE_NAME_MAX))
+			return entry;
+	}
+
+	return NULL;
+}
+
+/**
+ * ipa_get_hdr() - Lookup the specified header resource
+ * @lookup:	[inout] header to lookup and its handle
+ *
+ * lookup the specified header resource and return handle if it exists, if
+ * lookup succeeds the header entry ref cnt is increased
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ *		Caller should call ipa_put_hdr later if this function succeeds
+ */
+int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+	struct ipa_hdr_entry *entry;
+	int result = -1;
+
+	if (lookup == NULL) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa_ctx->lock);
+	entry = __ipa_find_hdr(lookup->name);
+	if (entry) {
+		entry->ref_cnt++;
+		lookup->hdl = (uint32_t) entry;
+		result = 0;
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_get_hdr);
+
+/**
+ * ipa_put_hdr() - Release the specified header handle
+ * @hdr_hdl:	[in] the header handle to release
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_put_hdr(u32 hdr_hdl)
+{
+	struct ipa_hdr_entry *entry = (struct ipa_hdr_entry *)hdr_hdl;
+	struct ipa_tree_node *node;
+	int result = -EFAULT;
+
+	if (entry == NULL || entry->cookie != IPA_COOKIE ||
+			entry->ref_cnt == 0) {
+		IPAERR("bad params\n");
+		return -EINVAL;
+	}
+	node = ipa_search(&ipa_ctx->hdr_hdl_tree, hdr_hdl);
+	if (node == NULL) {
+		IPAERR("lookup failed\n");
+		return -EPERM;
+	}
+	mutex_lock(&ipa_ctx->lock);
+	entry->ref_cnt--;
+	if (entry->ref_cnt == 0) {
+		if (__ipa_del_hdr(hdr_hdl)) {
+			IPAERR("fail to del hdr\n");
+			result = -EFAULT;
+			goto bail;
+		}
+		/* commit for put */
+		if (__ipa_commit_hdr()) {
+			IPAERR("fail to commit hdr\n");
+			result = -EFAULT;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+EXPORT_SYMBOL(ipa_put_hdr);
+
+/**
+ * ipa_copy_hdr() - Lookup the specified header resource and return a copy of it
+ * @copy:	[inout] header to lookup and its copy
+ *
+ * lookup the specified header resource and return a copy of it (along with its
+ * attributes) if it exists, this would be called for partial headers
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+	struct ipa_hdr_entry *entry;
+	int result = -EFAULT;
+
+	if (copy == NULL) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa_ctx->lock);
+	entry = __ipa_find_hdr(copy->name);
+	if (entry) {
+		memcpy(copy->hdr, entry->hdr, entry->hdr_len);
+		copy->hdr_len = entry->hdr_len;
+		copy->is_partial = entry->is_partial;
+		result = 0;
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_copy_hdr);
+
+
diff --git a/drivers/platform/msm/ipa/ipa_hw_defs.h b/drivers/platform/msm/ipa/ipa_hw_defs.h
new file mode 100644
index 0000000..3131a84
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_hw_defs.h
@@ -0,0 +1,258 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_HW_DEFS_H
+#define _IPA_HW_DEFS_H
+#include <linux/bitops.h>
+
+/* This header defines various HW related data types */
+
+/* immediate command op-codes */
+#define IPA_DECIPH_INIT        (1)
+#define IPA_PPP_FRM_INIT       (2)
+#define IPA_IP_V4_FILTER_INIT  (3)
+#define IPA_IP_V6_FILTER_INIT  (4)
+#define IPA_IP_V4_NAT_INIT     (5)
+#define IPA_IP_V6_NAT_INIT     (6)
+#define IPA_IP_V4_ROUTING_INIT (7)
+#define IPA_IP_V6_ROUTING_INIT (8)
+#define IPA_HDR_INIT_LOCAL     (9)
+#define IPA_HDR_INIT_SYSTEM   (10)
+#define IPA_DECIPH_SETUP      (11)
+#define IPA_INSERT_NAT_RULE   (12)
+#define IPA_DELETE_NAT_RULE   (13)
+#define IPA_NAT_DMA           (14)
+#define IPA_IP_PACKET_TAG     (15)
+#define IPA_IP_PACKET_INIT    (16)
+
+#define IPA_INTERFACE_ID_EXCEPTION         (0)
+#define IPA_INTERFACE_ID_A2_WWAN        (0x10)
+#define IPA_INTERFACE_ID_HSUSB_RMNET1   (0x21)
+#define IPA_INTERFACE_ID_HSUSB_RMNET2   (0x22)
+#define IPA_INTERFACE_ID_HSUSB_RMNET3   (0x23)
+#define IPA_INTERFACE_ID_HSIC_WLAN_WAN  (0x31)
+#define IPA_INTERFACE_ID_HSIC_WLAN_LAN1 (0x32)
+#define IPA_INTERFACE_ID_HSIC_WLAN_LAN2 (0x33)
+#define IPA_INTERFACE_ID_HSIC_RMNET1    (0x41)
+#define IPA_INTERFACE_ID_HSIC_RMNET2    (0x42)
+#define IPA_INTERFACE_ID_HSIC_RMNET3    (0x43)
+#define IPA_INTERFACE_ID_HSIC_RMNET4    (0x44)
+#define IPA_INTERFACE_ID_HSIC_RMNET5    (0x45)
+
+/**
+ * struct ipa_flt_rule_hw_hdr - HW header of IPA filter rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @action: post routing action
+ * @rt_tbl_idx: index in routing table
+ * @rsvd: reserved
+ */
+struct ipa_flt_rule_hw_hdr {
+	union {
+		u32 word;
+		struct {
+			u32 en_rule:16;
+			u32 action:5;
+			u32 rt_tbl_idx:5;
+			u32 rsvd:6;
+		} hdr;
+	} u;
+};
+
+/**
+ * struct ipa_rt_rule_hw_hdr - HW header of IPA routing rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @pipe_dest_idx: destination pipe index
+ * @system: changed from local to system due to HW change
+ * @hdr_offset: header offset
+ */
+struct ipa_rt_rule_hw_hdr {
+	union {
+		u32 word;
+		struct {
+			u32 en_rule:16;
+			u32 pipe_dest_idx:5;
+			u32 system:1;
+			u32 hdr_offset:10;
+		} hdr;
+	} u;
+};
+
+/**
+ * struct ipa_ip_v4_filter_init - IPA_IP_V4_FILTER_INIT command payload
+ * @ipv4_rules_addr: address of ipv4 rules
+ * @size_ipv4_rules: size of the above
+ * @ipv4_addr: ipv4 address
+ * @rsvd: reserved
+ */
+struct ipa_ip_v4_filter_init {
+	u64 ipv4_rules_addr:32;
+	u64 size_ipv4_rules:12;
+	u64 ipv4_addr:16;
+	u64 rsvd:4;
+};
+
+/**
+ * struct ipa_ip_v6_filter_init - IPA_IP_V6_FILTER_INIT command payload
+ * @ipv6_rules_addr: address of ipv6 rules
+ * @size_ipv6_rules: size of the above
+ * @ipv6_addr: ipv6 address
+ */
+struct ipa_ip_v6_filter_init {
+	u64 ipv6_rules_addr:32;
+	u64 size_ipv6_rules:16;
+	u64 ipv6_addr:16;
+};
+
+/**
+ * struct ipa_ip_v4_routing_init - IPA_IP_V4_ROUTING_INIT command payload
+ * @ipv4_rules_addr: address of ipv4 rules
+ * @size_ipv4_rules: size of the above
+ * @ipv4_addr: ipv4 address
+ * @rsvd: reserved
+ */
+struct ipa_ip_v4_routing_init {
+	u64 ipv4_rules_addr:32;
+	u64 size_ipv4_rules:12;
+	u64 ipv4_addr:16;
+	u64 rsvd:4;
+};
+
+/**
+ * struct ipa_ip_v6_routing_init - IPA_IP_V6_ROUTING_INIT command payload
+ * @ipv6_rules_addr: address of ipv6 rules
+ * @size_ipv6_rules: size of the above
+ * @ipv6_addr: ipv6 address
+ */
+struct ipa_ip_v6_routing_init {
+	u64 ipv6_rules_addr:32;
+	u64 size_ipv6_rules:16;
+	u64 ipv6_addr:16;
+};
+
+/**
+ * struct ipa_hdr_init_local - IPA_HDR_INIT_LOCAL command payload
+ * @hdr_table_addr: address of header table
+ * @size_hdr_table: size of the above
+ * @hdr_addr: header address
+ * @rsvd: reserved
+ */
+struct ipa_hdr_init_local {
+	u64 hdr_table_addr:32;
+	u64 size_hdr_table:12;
+	u64 hdr_addr:16;
+	u64 rsvd:4;
+};
+
+/**
+ * struct ipa_hdr_init_system - IPA_HDR_INIT_SYSTEM command payload
+ * @hdr_table_addr: address of header table
+ * @rsvd: reserved
+ */
+struct ipa_hdr_init_system {
+	u64 hdr_table_addr:32;
+	u64 rsvd:32;
+};
+
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IP		BIT(0)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_NAT		BIT(1)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT	BIT(2)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_TAG		BIT(3)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED	BIT(4)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IHL		BIT(5)
+
+/**
+ * struct ipa_a5_mux_hdr - A5 MUX header definition
+ * @interface_id: interface ID
+ * @src_pipe_index: source pipe index
+ * @flags: flags
+ * @metadata: metadata
+ *
+ * A5 MUX header is in BE, A5 runs in LE. This struct definition
+ * allows A5 SW to correctly parse the header
+ */
+struct ipa_a5_mux_hdr {
+	u16 interface_id;
+	u8 src_pipe_index;
+	u8 flags;
+	u32 metadata;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_NAT_DMA command payload
+ * @table_index: NAT table index
+ * @rsvd1: reserved
+ * @base_addr: base address
+ * @rsvd2: reserved
+ * @offset: offset
+ * @data: metadata
+ * @rsvd3: reserved
+ */
+struct ipa_nat_dma {
+	u64 table_index:3;
+	u64 rsvd1:1;
+	u64 base_addr:2;
+	u64 rsvd2:2;
+	u64 offset:32;
+	u64 data:16;
+	u64 rsvd3:8;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_IP_PACKET_INIT command payload
+ * @destination_pipe_index: destination pipe index
+ * @rsvd1: reserved
+ * @metadata: metadata
+ * @rsvd2: reserved
+ */
+struct ipa_ip_packet_init {
+	u64 destination_pipe_index:5;
+	u64 rsvd1:3;
+	u64 metadata:32;
+	u64 rsvd2:24;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_IP_V4_NAT_INIT command payload
+ * @ipv4_rules_addr: ipv4 rules address
+ * @ipv4_expansion_rules_addr: ipv4 expansion rules address
+ * @index_table_addr: index tables address
+ * @index_table_expansion_addr: index expansion table address
+ * @table_index: index in table
+ * @ipv4_rules_addr_type: ipv4 address type
+ * @ipv4_expansion_rules_addr_type: ipv4 expansion address type
+ * @index_table_addr_type: index table address type
+ * @index_table_expansion_addr_type: index expansion table type
+ * @size_base_tables: size of base tables
+ * @size_expansion_tables: size of expansion tables
+ * @rsvd2: reserved
+ * @public_ip_addr: public IP address
+ */
+struct ipa_ip_v4_nat_init {
+	u64 ipv4_rules_addr:32;
+	u64 ipv4_expansion_rules_addr:32;
+	u64 index_table_addr:32;
+	u64 index_table_expansion_addr:32;
+	u64 table_index:3;
+	u64 rsvd1:1;
+	u64 ipv4_rules_addr_type:1;
+	u64 ipv4_expansion_rules_addr_type:1;
+	u64 index_table_addr_type:1;
+	u64 index_table_expansion_addr_type:1;
+	u64 size_base_tables:12;
+	u64 size_expansion_tables:10;
+	u64 rsvd2:2;
+	u64 public_ip_addr:32;
+};
+
+#endif /* _IPA_HW_DEFS_H */
diff --git a/drivers/platform/msm/ipa/ipa_i.h b/drivers/platform/msm/ipa/ipa_i.h
new file mode 100644
index 0000000..63ef5fb
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_i.h
@@ -0,0 +1,727 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_I_H_
+#define _IPA_I_H_
+
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <mach/ipa.h>
+#include <mach/sps.h>
+#include "ipa_hw_defs.h"
+#include "ipa_ram_mmap.h"
+#include "ipa_reg.h"
+
+#define DRV_NAME "ipa"
+#define IPA_COOKIE 0xfacefeed
+
+#define IPA_NUM_PIPES 0x14
+#define IPA_SYS_DESC_FIFO_SZ (0x800)
+
+#ifdef IPA_DEBUG
+#define IPADBG(fmt, args...) \
+	pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#else
+#define IPADBG(fmt, args...)
+#endif
+
+#define IPAERR(fmt, args...) \
+	pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+#define IPA_TOS_EQ			BIT(0)
+#define IPA_PROTOCOL_EQ		BIT(1)
+#define IPA_OFFSET_MEQ32_0		BIT(2)
+#define IPA_OFFSET_MEQ32_1		BIT(3)
+#define IPA_IHL_OFFSET_RANGE16_0	BIT(4)
+#define IPA_IHL_OFFSET_RANGE16_1	BIT(5)
+#define IPA_IHL_OFFSET_EQ_16		BIT(6)
+#define IPA_IHL_OFFSET_EQ_32		BIT(7)
+#define IPA_IHL_OFFSET_MEQ32_0		BIT(8)
+#define IPA_OFFSET_MEQ128_0		BIT(9)
+#define IPA_OFFSET_MEQ128_1		BIT(10)
+#define IPA_TC_EQ			BIT(11)
+#define IPA_FL_EQ			BIT(12)
+#define IPA_IHL_OFFSET_MEQ32_1		BIT(13)
+#define IPA_METADATA_COMPARE		BIT(14)
+#define IPA_IPV4_IS_FRAG		BIT(15)
+
+#define IPA_HDR_BIN0 0
+#define IPA_HDR_BIN1 1
+#define IPA_HDR_BIN2 2
+#define IPA_HDR_BIN3 3
+#define IPA_HDR_BIN_MAX 4
+
+#define IPA_EVENT_THRESHOLD 0x10
+
+#define IPA_RX_POOL_CEIL 24
+#define IPA_RX_SKB_SIZE 2048
+
+#define IPA_DFLT_HDR_NAME "ipa_excp_hdr"
+
+#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
+#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
+#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask))
+
+#define IPA_HW_TABLE_ALIGNMENT(start_ofst) \
+	(((start_ofst) + 127) & ~127)
+#define IPA_RT_FLT_HW_RULE_BUF_SIZE	(128)
+
+/**
+ * enum ipa_sys_pipe - 5 A5-IPA pipes
+ *
+ * 5 A5-IPA pipes (all system mode)
+ */
+enum ipa_sys_pipe {
+	IPA_A5_UNUSED,
+	IPA_A5_CMD,
+	IPA_A5_LAN_WAN_OUT,
+	IPA_A5_LAN_WAN_IN,
+	IPA_A5_WLAN_AMPDU_OUT,
+	IPA_A5_SYS_MAX
+};
+
+/**
+ * enum ipa_operating_mode - IPA operating mode
+ *
+ * IPA operating mode
+ */
+enum ipa_operating_mode {
+	IPA_MODE_USB_DONGLE,
+	IPA_MODE_MSM,
+	IPA_MODE_EXT_APPS,
+	IPA_MODE_MOBILE_AP_WAN,
+	IPA_MODE_MOBILE_AP_WLAN,
+	IPA_MODE_MOBILE_AP_ETH,
+	IPA_MODE_MAX
+};
+
+/**
+ * enum ipa_bridge_dir - direction of the bridge from air interface perspective
+ *
+ * IPA bridge direction
+ */
+enum ipa_bridge_dir {
+	IPA_DL,
+	IPA_UL,
+	IPA_DIR_MAX
+};
+
+/**
+ * struct ipa_mem_buffer - IPA memory buffer
+ * @base: base
+ * @phys_base: physical base address
+ * @size: size of memory buffer
+ */
+struct ipa_mem_buffer {
+	void *base;
+	dma_addr_t phys_base;
+	u32 size;
+};
+
+/**
+ * struct ipa_flt_entry - IPA filtering table entry
+ * @link: entry's link in global filtering enrties list
+ * @rule: filter rule
+ * @cookie: cookie used for validity check
+ * @tbl: filter table
+ * @rt_tbl: routing table
+ * @hw_len: entry's size
+ */
+struct ipa_flt_entry {
+	struct list_head link;
+	struct ipa_flt_rule rule;
+	u32 cookie;
+	struct ipa_flt_tbl *tbl;
+	struct ipa_rt_tbl *rt_tbl;
+	u32 hw_len;
+};
+
+/**
+ * struct ipa_rt_tbl - IPA routing table
+ * @link: table's link in global routing tables list
+ * @head_rt_rule_list: head of routing rules list
+ * @name: routing table name
+ * @idx: routing table index
+ * @rule_cnt: number of rules in routing table
+ * @ref_cnt: reference counter of raouting table
+ * @set: collection of routing tables
+ * @cookie: cookie used for validity check
+ * @in_sys: flag indicating if the table is located in system memory
+ * @sz: the size of the routing table
+ * @curr_mem: current routing tables block in sys memory
+ * @prev_mem: previous routing table block in sys memory
+ */
+struct ipa_rt_tbl {
+	struct list_head link;
+	struct list_head head_rt_rule_list;
+	char name[IPA_RESOURCE_NAME_MAX];
+	u32 idx;
+	u32 rule_cnt;
+	u32 ref_cnt;
+	struct ipa_rt_tbl_set *set;
+	u32 cookie;
+	bool in_sys;
+	u32 sz;
+	struct ipa_mem_buffer curr_mem;
+	struct ipa_mem_buffer prev_mem;
+};
+
+/**
+ * struct ipa_hdr_entry - IPA header table entry
+ * @link: entry's link in global header table entries list
+ * @hdr: the header
+ * @hdr_len: header length
+ * @name: name of header table entry
+ * @is_partial: flag indicating if header table entry is partial
+ * @offset_entry: entry's offset
+ * @cookie: cookie used for validity check
+ * @ref_cnt: reference counter of raouting table
+ */
+struct ipa_hdr_entry {
+	struct list_head link;
+	u8 hdr[IPA_HDR_MAX_SIZE];
+	u32 hdr_len;
+	char name[IPA_RESOURCE_NAME_MAX];
+	u8 is_partial;
+	struct ipa_hdr_offset_entry *offset_entry;
+	u32 cookie;
+	u32 ref_cnt;
+};
+
+/**
+ * struct ipa_hdr_offset_entry - IPA header offset entry
+ * @link: entry's link in global header offset entries list
+ * @offset: the offset
+ * @bin: bin
+ */
+struct ipa_hdr_offset_entry {
+	struct list_head link;
+	u32 offset;
+	u32 bin;
+};
+
+/**
+ * struct ipa_hdr_tbl - IPA header table
+ * @head_hdr_entry_list: header entries list
+ * @head_offset_list: header offset list
+ * @head_free_offset_list: header free offset list
+ * @hdr_cnt: number of headers
+ * @end: the last header index
+ */
+struct ipa_hdr_tbl {
+	struct list_head head_hdr_entry_list;
+	struct list_head head_offset_list[IPA_HDR_BIN_MAX];
+	struct list_head head_free_offset_list[IPA_HDR_BIN_MAX];
+	u32 hdr_cnt;
+	u32 end;
+};
+
+/**
+ * struct ipa_flt_tbl - IPA filter table
+ * @head_flt_rule_list: filter rules list
+ * @rule_cnt: number of filter rules
+ * @in_sys: flag indicating if filter table is located in system memory
+ * @sz: the size of the filter table
+ * @end: the last header index
+ * @curr_mem: current filter tables block in sys memory
+ * @prev_mem: previous filter table block in sys memory
+ */
+struct ipa_flt_tbl {
+	struct list_head head_flt_rule_list;
+	u32 rule_cnt;
+	bool in_sys;
+	u32 sz;
+	struct ipa_mem_buffer curr_mem;
+	struct ipa_mem_buffer prev_mem;
+};
+
+/**
+ * struct ipa_rt_entry - IPA routing table entry
+ * @link: entry's link in global routing table entries list
+ * @rule: routing rule
+ * @cookie: cookie used for validity check
+ * @tbl: routing table
+ * @hdr: header table
+ * @hw_len: the length of the table
+ */
+struct ipa_rt_entry {
+	struct list_head link;
+	struct ipa_rt_rule rule;
+	u32 cookie;
+	struct ipa_rt_tbl *tbl;
+	struct ipa_hdr_entry *hdr;
+	u32 hw_len;
+};
+
+/**
+ * struct ipa_rt_tbl_set - collection of routing tables
+ * @head_rt_tbl_list: collection of routing tables
+ * @tbl_cnt: number of routing tables
+ */
+struct ipa_rt_tbl_set {
+	struct list_head head_rt_tbl_list;
+	u32 tbl_cnt;
+};
+
+/**
+ * struct ipa_tree_node - handle database entry
+ * @node: RB node
+ * @hdl: handle
+ */
+struct ipa_tree_node {
+	struct rb_node node;
+	u32 hdl;
+};
+
+/**
+ * struct ipa_ep_context - IPA end point context
+ * @valid: flag indicating id EP context is valid
+ * @client: EP client type
+ * @ep_hdl: EP's client SPS handle
+ * @cfg: EP cionfiguration
+ * @dst_pipe_index: destination pipe index
+ * @rt_tbl_idx: routing table index
+ * @connect: SPS connect
+ * @priv: user provided information
+ * @notify: user provided CB for EP events notification
+ * @desc_fifo_in_pipe_mem: flag indicating if descriptors FIFO uses pipe memory
+ * @data_fifo_in_pipe_mem: flag indicating if data FIFO uses pipe memory
+ * @desc_fifo_pipe_mem_ofst: descriptors FIFO pipe memory offset
+ * @data_fifo_pipe_mem_ofst: data FIFO pipe memory offset
+ * @desc_fifo_client_allocated: if descriptors FIFO was allocated by a client
+ * @data_fifo_client_allocated: if data FIFO was allocated by a client
+ */
+struct ipa_ep_context {
+	int valid;
+	enum ipa_client_type client;
+	struct sps_pipe *ep_hdl;
+	struct ipa_ep_cfg cfg;
+	u32 dst_pipe_index;
+	u32 rt_tbl_idx;
+	struct sps_connect connect;
+	void *priv;
+	void (*notify)(void *priv, enum ipa_dp_evt_type evt,
+		       unsigned long data);
+	bool desc_fifo_in_pipe_mem;
+	bool data_fifo_in_pipe_mem;
+	u32 desc_fifo_pipe_mem_ofst;
+	u32 data_fifo_pipe_mem_ofst;
+	bool desc_fifo_client_allocated;
+	bool data_fifo_client_allocated;
+};
+
+/**
+ * struct ipa_sys_context - IPA endpoint context for system to BAM pipes
+ * @head_desc_list: header descriptors list
+ * @len: the size of the above list
+ * @spinlock: protects the list and its size
+ * @event: used to request CALLBACK mode from SPS driver
+ * @ep: IPA EP context
+ * @wait_desc_list: used to hold completed Tx packets
+ *
+ * IPA context specific to the system-bam pipes a.k.a LAN IN/OUT and WAN
+ */
+struct ipa_sys_context {
+	struct list_head head_desc_list;
+	u32 len;
+	spinlock_t spinlock;
+	struct sps_register_event event;
+	struct ipa_ep_context *ep;
+	struct list_head wait_desc_list;
+};
+
+/**
+ * enum ipa_desc_type - IPA decriptors type
+ *
+ * IPA decriptors type, IPA supports DD and ICD but no CD
+ */
+enum ipa_desc_type {
+	IPA_DATA_DESC,
+	IPA_DATA_DESC_SKB,
+	IPA_IMM_CMD_DESC
+};
+
+/**
+ * struct ipa_tx_pkt_wrapper - IPA Tx packet wrapper
+ * @type: info for the skb or immediate command param
+ * @mem: memory buffer used by this Tx packet
+ * @work: work struct for current Tx packet
+ * @link: linked to the wrappers on that pipe
+ * @callback: IPA client provided callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @sys: corresponding IPA sys context
+ * @mult: valid only for first of a "multiple" transfer,
+ * holds info for the "sps_transfer" buffer
+ * @cnt: 1 for single transfers,
+ * >1 and <0xFFFF for first of a "multiple" tranfer,
+ * 0xFFFF for last desc, 0 for rest of "multiple' transfer
+ * @bounce: va of bounce buffer
+ */
+struct ipa_tx_pkt_wrapper {
+	enum ipa_desc_type type;
+	struct ipa_mem_buffer mem;
+	struct work_struct work;
+	struct list_head link;
+	void (*callback)(void *user1, void *user2);
+	void *user1;
+	void *user2;
+	struct ipa_sys_context *sys;
+	struct ipa_mem_buffer mult;
+	u16 cnt;
+	void *bounce;
+};
+
+/**
+ * struct ipa_desc - IPA descriptor
+ * @type: skb or immediate command or plain old data
+ * @pyld: points to skb
+ * or kmalloc'ed immediate command parameters/plain old data
+ * @len: length of the pyld
+ * @opcode: for immediate commands
+ * @callback: IPA client provided completion callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @xfer_done: completion object for sync completion
+ */
+struct ipa_desc {
+	enum ipa_desc_type type;
+	void *pyld;
+	u16 len;
+	u16 opcode;
+	void (*callback)(void *user1, void *user2);
+	void *user1;
+	void *user2;
+	struct completion xfer_done;
+};
+
+/**
+ * struct ipa_rx_pkt_wrapper - IPA Rx packet wrapper
+ * @skb: skb
+ * @dma_address: DMA address of this Rx packet
+ * @work: work struct for current Rx packet
+ * @link: linked to the Rx packets on that pipe
+ * @len: how many bytes are copied into skb's flat buffer
+ */
+struct ipa_rx_pkt_wrapper {
+	struct sk_buff *skb;
+	dma_addr_t dma_address;
+	struct work_struct work;
+	struct list_head link;
+	u16 len;
+};
+
+/**
+ * struct ipa_nat_mem - IPA NAT memory description
+ * @class: pointer to the struct class
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @dev_num: device number
+ * @vaddr: virtual address
+ * @dma_handle: DMA handle
+ * @size: NAT memory size
+ * @is_mapped: flag indicating if NAT memory is mapped
+ * @is_sys_mem: flag indicating if NAT memory is sys memory
+ * @is_dev_init: flag indicating if NAT device is initialized
+ * @lock: NAT memory mutex
+ */
+struct ipa_nat_mem {
+	struct class *class;
+	struct device *dev;
+	struct cdev cdev;
+	dev_t dev_num;
+	void *vaddr;
+	dma_addr_t dma_handle;
+	size_t size;
+	bool is_mapped;
+	bool is_sys_mem;
+	bool is_dev_init;
+	struct mutex lock;
+};
+
+/**
+ * struct ipa_context - IPA context
+ * @class: pointer to the struct class
+ * @dev_num: device number
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @bam_handle: IPA driver's BAM handle
+ * @ep: list of all end points
+ * @flt_tbl: list of all IPA filter tables
+ * @mode: IPA operating mode
+ * @mmio: iomem
+ * @ipa_wrapper_base: IPA wrapper base address
+ * @glob_flt_tbl: global filter table
+ * @hdr_tbl: IPA header table
+ * @rt_tbl_set: list of routing tables each of which is a list of rules
+ * @reap_rt_tbl_set: list of sys mem routing tables waiting to be reaped
+ * @flt_rule_cache: filter rule cache
+ * @rt_rule_cache: routing rule cache
+ * @hdr_cache: header cache
+ * @hdr_offset_cache: header offset cache
+ * @rt_tbl_cache: routing table cache
+ * @tx_pkt_wrapper_cache: Tx packets cache
+ * @rx_pkt_wrapper_cache: Rx packets cache
+ * @tree_node_cache: tree nodes cache
+ * @rt_idx_bitmap: routing table index bitmap
+ * @lock: this does NOT protect the linked lists within ipa_sys_context
+ * @sys: IPA sys context for system-bam pipes
+ * @rx_wq: Rx packets work queue
+ * @tx_wq: Tx packets work queue
+ * @smem_sz: shared memory size
+ * @hdr_hdl_tree: header handles tree
+ * @rt_rule_hdl_tree: routing rule handles tree
+ * @rt_tbl_hdl_tree: routing table handles tree
+ * @flt_rule_hdl_tree: filtering rule handles tree
+ * @nat_mem: NAT memory
+ * @excp_hdr_hdl: exception header handle
+ * @dflt_v4_rt_rule_hdl: default v4 routing rule handle
+ * @dflt_v6_rt_rule_hdl: default v6 routing rule handle
+ * @polling_mode: 1 - pure polling mode; 0 - interrupt+polling mode
+ * @aggregation_type: aggregation type used on USB client endpoint
+ * @aggregation_byte_limit: aggregation byte limit used on USB client endpoint
+ * @aggregation_time_limit: aggregation time limit used on USB client endpoint
+ * @curr_polling_state: current polling state
+ * @poll_work: polling work
+ * @hdr_tbl_lcl: where hdr tbl resides 1-local, 0-system
+ * @hdr_mem: header memory
+ * @ip4_rt_tbl_lcl: where ip4 rt tables reside 1-local; 0-system
+ * @ip6_rt_tbl_lcl: where ip6 rt tables reside 1-local; 0-system
+ * @ip4_flt_tbl_lcl: where ip4 flt tables reside 1-local; 0-system
+ * @ip6_flt_tbl_lcl: where ip6 flt tables reside 1-local; 0-system
+ * @empty_rt_tbl_mem: empty routing tables memory
+ * @pipe_mem_pool: pipe memory pool
+ * @one_kb_no_straddle_pool: one kb no straddle pool
+ *
+ * IPA context - holds all relevant info about IPA driver and its state
+ */
+struct ipa_context {
+	struct class *class;
+	dev_t dev_num;
+	struct device *dev;
+	struct cdev cdev;
+	u32 bam_handle;
+	struct ipa_ep_context ep[IPA_NUM_PIPES];
+	struct ipa_flt_tbl flt_tbl[IPA_NUM_PIPES][IPA_IP_MAX];
+	enum ipa_operating_mode mode;
+	void __iomem *mmio;
+	u32 ipa_wrapper_base;
+	struct ipa_flt_tbl glob_flt_tbl[IPA_IP_MAX];
+	struct ipa_hdr_tbl hdr_tbl;
+	struct ipa_rt_tbl_set rt_tbl_set[IPA_IP_MAX];
+	struct ipa_rt_tbl_set reap_rt_tbl_set[IPA_IP_MAX];
+	struct kmem_cache *flt_rule_cache;
+	struct kmem_cache *rt_rule_cache;
+	struct kmem_cache *hdr_cache;
+	struct kmem_cache *hdr_offset_cache;
+	struct kmem_cache *rt_tbl_cache;
+	struct kmem_cache *tx_pkt_wrapper_cache;
+	struct kmem_cache *rx_pkt_wrapper_cache;
+	struct kmem_cache *tree_node_cache;
+	unsigned long rt_idx_bitmap[IPA_IP_MAX];
+	struct mutex lock;
+	struct ipa_sys_context sys[IPA_A5_SYS_MAX];
+	struct workqueue_struct *rx_wq;
+	struct workqueue_struct *tx_wq;
+	u16 smem_sz;
+	struct rb_root hdr_hdl_tree;
+	struct rb_root rt_rule_hdl_tree;
+	struct rb_root rt_tbl_hdl_tree;
+	struct rb_root flt_rule_hdl_tree;
+	struct ipa_nat_mem nat_mem;
+	u32 excp_hdr_hdl;
+	u32 dflt_v4_rt_rule_hdl;
+	u32 dflt_v6_rt_rule_hdl;
+	bool polling_mode;
+	uint aggregation_type;
+	uint aggregation_byte_limit;
+	uint aggregation_time_limit;
+	uint curr_polling_state;
+	struct delayed_work poll_work;
+	bool hdr_tbl_lcl;
+	struct ipa_mem_buffer hdr_mem;
+	bool ip4_rt_tbl_lcl;
+	bool ip6_rt_tbl_lcl;
+	bool ip4_flt_tbl_lcl;
+	bool ip6_flt_tbl_lcl;
+	struct ipa_mem_buffer empty_rt_tbl_mem;
+	struct gen_pool *pipe_mem_pool;
+	struct dma_pool *one_kb_no_straddle_pool;
+	atomic_t ipa_active_clients;
+	u32 clnt_hdl_cmd;
+	u32 clnt_hdl_data_in;
+	u32 clnt_hdl_data_out;
+	u8 a5_pipe_index;
+};
+
+/**
+ * struct ipa_route - IPA route
+ * @route_dis: route disable
+ * @route_def_pipe: route default pipe
+ * @route_def_hdr_table: route default header table
+ * @route_def_hdr_ofst: route default header offset table
+ */
+struct ipa_route {
+	u32 route_dis;
+	u32 route_def_pipe;
+	u32 route_def_hdr_table;
+	u32 route_def_hdr_ofst;
+};
+
+/**
+ * enum ipa_pipe_mem_type - IPA pipe memory type
+ * @IPA_SPS_PIPE_MEM: Default, SPS dedicated pipe memory
+ * @IPA_PRIVATE_MEM: IPA's private memory
+ * @IPA_SYSTEM_MEM: System RAM, requires allocation
+ */
+enum ipa_pipe_mem_type {
+	IPA_SPS_PIPE_MEM = 0,
+	IPA_PRIVATE_MEM  = 1,
+	IPA_SYSTEM_MEM   = 2,
+};
+
+/**
+ * enum a2_mux_pipe_direction - IPA-A2 pipe direction
+ */
+enum a2_mux_pipe_direction {
+	A2_TO_IPA = 0,
+	IPA_TO_A2 = 1
+};
+
+/**
+ * struct a2_mux_pipe_connection - A2 MUX pipe connection
+ * @src_phy_addr: source physical address
+ * @src_pipe_index: source pipe index
+ * @dst_phy_addr: destination physical address
+ * @dst_pipe_index: destination pipe index
+ * @mem_type: pipe memory type
+ * @data_fifo_base_offset: data FIFO base offset
+ * @data_fifo_size: data FIFO size
+ * @desc_fifo_base_offset: descriptors FIFO base offset
+ * @desc_fifo_size: descriptors FIFO size
+ */
+struct a2_mux_pipe_connection {
+	int			src_phy_addr;
+	int			src_pipe_index;
+	int			dst_phy_addr;
+	int			dst_pipe_index;
+	enum ipa_pipe_mem_type	mem_type;
+	int			data_fifo_base_offset;
+	int			data_fifo_size;
+	int			desc_fifo_base_offset;
+	int			desc_fifo_size;
+};
+
+extern struct ipa_context *ipa_ctx;
+
+int ipa_get_a2_mux_pipe_info(enum a2_mux_pipe_direction pipe_dir,
+				struct a2_mux_pipe_connection *pipe_connect);
+void rmnet_bridge_get_client_handles(u32 *producer_handle,
+		u32 *consumer_handle);
+int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc);
+int ipa_send(struct ipa_sys_context *sys, u16 num_desc, struct ipa_desc *desc);
+int ipa_get_ep_mapping(enum ipa_operating_mode mode,
+		       enum ipa_client_type client);
+int ipa_generate_hw_rule(enum ipa_ip_type ip,
+			 const struct ipa_rule_attrib *attrib,
+			 u8 **buf,
+			 u16 *en_rule);
+u8 *ipa_write_32(u32 w, u8 *dest);
+u8 *ipa_write_16(u16 hw, u8 *dest);
+u8 *ipa_write_8(u8 b, u8 *dest);
+u8 *ipa_pad_to_32(u8 *dest);
+int ipa_init_hw(void);
+struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name);
+void ipa_dump(void);
+int ipa_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem);
+int ipa_generate_rt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem);
+int ipa_generate_flt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem);
+void ipa_debugfs_init(void);
+void ipa_debugfs_remove(void);
+
+/*
+ * below functions read from/write to IPA local memory a.k.a. device memory.
+ * the order of the arguments is deliberately different from the ipa_write*
+ * functions which operate on system memory
+ */
+void ipa_write_dev_8(u8 val, u16 ofst_ipa_sram);
+void ipa_write_dev_16(u16 val, u16 ofst_ipa_sram);
+void ipa_write_dev_32(u32 val, u16 ofst_ipa_sram);
+unsigned int ipa_read_dev_8(u16 ofst_ipa_sram);
+unsigned int ipa_read_dev_16(u16 ofst_ipa_sram);
+unsigned int ipa_read_dev_32(u16 ofst_ipa_sram);
+void ipa_write_dev_8rep(u16 ofst_ipa_sram, const void *buf,
+		unsigned long count);
+void ipa_write_dev_16rep(u16 ofst_ipa_sram, const void *buf,
+		unsigned long count);
+void ipa_write_dev_32rep(u16 ofst_ipa_sram, const void *buf,
+		unsigned long count);
+void ipa_read_dev_8rep(u16 ofst_ipa_sram, void *buf, unsigned long count);
+void ipa_read_dev_16rep(u16 ofst_ipa_sram, void *buf, unsigned long count);
+void ipa_read_dev_32rep(u16 ofst_ipa_sram, void *buf, unsigned long count);
+void ipa_memset_dev(u16 ofst_ipa_sram, u8 value, unsigned int count);
+void ipa_memcpy_from_dev(void *dest, u16 ofst_ipa_sram, unsigned int count);
+void ipa_memcpy_to_dev(u16 ofst_ipa_sram, void *source, unsigned int count);
+
+int ipa_insert(struct rb_root *root, struct ipa_tree_node *data);
+struct ipa_tree_node *ipa_search(struct rb_root *root, u32 hdl);
+void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
+
+#ifdef IPA_DEBUG
+#define IPA_DUMP_BUFF(base, phy_base, size) \
+	ipa_dump_buff_internal(base, phy_base, size)
+#else
+#define IPA_DUMP_BUFF(base, phy_base, size)
+#endif
+
+int ipa_cfg_route(struct ipa_route *route);
+int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr);
+void ipa_replenish_rx_cache(void);
+void ipa_cleanup_rx(void);
+int ipa_cfg_filter(u32 disable);
+void ipa_write_done(struct work_struct *work);
+void ipa_handle_rx(struct work_struct *work);
+void ipa_handle_rx_core(void);
+int ipa_pipe_mem_init(u32 start_ofst, u32 size);
+int ipa_pipe_mem_alloc(u32 *ofst, u32 size);
+int ipa_pipe_mem_free(u32 ofst, u32 size);
+int ipa_straddle_boundary(u32 start, u32 end, u32 boundary);
+struct ipa_context *ipa_get_ctx(void);
+void ipa_enable_clks(void);
+void ipa_disable_clks(void);
+
+static inline u32 ipa_read_reg(void *base, u32 offset)
+{
+	u32 val = ioread32(base + offset);
+	IPADBG("0x%x(va) read reg 0x%x r_val 0x%x.\n",
+		(u32)base, offset, val);
+	return val;
+}
+
+static inline void ipa_write_reg(void *base, u32 offset, u32 val)
+{
+	iowrite32(val, base + offset);
+	IPADBG("0x%x(va) write reg 0x%x w_val 0x%x.\n",
+		(u32)base, offset, val);
+}
+
+int ipa_bridge_init(void);
+void ipa_bridge_cleanup(void);
+int ipa_bridge_setup(enum ipa_bridge_dir dir);
+int ipa_bridge_teardown(enum ipa_bridge_dir dir);
+
+#endif /* _IPA_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_nat.c b/drivers/platform/msm/ipa/ipa_nat.c
new file mode 100644
index 0000000..c13c53a
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_nat.c
@@ -0,0 +1,466 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include "ipa_i.h"
+
+#define IPA_NAT_PHYS_MEM_OFFSET  0
+#define IPA_NAT_PHYS_MEM_SIZE  IPA_RAM_NAT_SIZE
+
+#define IPA_NAT_SYSTEM_MEMORY  0
+#define IPA_NAT_SHARED_MEMORY  1
+
+static int ipa_nat_vma_fault_remap(
+	 struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	IPADBG("\n");
+	vmf->page = NULL;
+
+	return VM_FAULT_SIGBUS;
+}
+
+/* VMA related file operations functions */
+static struct vm_operations_struct ipa_nat_remap_vm_ops = {
+	.fault = ipa_nat_vma_fault_remap,
+};
+
+static int ipa_nat_open(struct inode *inode, struct file *filp)
+{
+	struct ipa_nat_mem *nat_ctx;
+	IPADBG("\n");
+	nat_ctx = container_of(inode->i_cdev, struct ipa_nat_mem, cdev);
+	filp->private_data = nat_ctx;
+	IPADBG("return\n");
+	return 0;
+}
+
+static int ipa_nat_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	unsigned long vsize = vma->vm_end - vma->vm_start;
+	struct ipa_nat_mem *nat_ctx = (struct ipa_nat_mem *)filp->private_data;
+	unsigned long phys_addr;
+	int result;
+
+	mutex_lock(&nat_ctx->lock);
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	if (nat_ctx->is_sys_mem) {
+		IPADBG("Mapping system memory\n");
+		if (nat_ctx->is_mapped) {
+			IPAERR("mapping already exists, only 1 supported\n");
+			result = -EINVAL;
+			goto bail;
+		}
+		IPADBG("map sz=0x%x\n", nat_ctx->size);
+		result =
+			dma_mmap_coherent(
+				 NULL, vma,
+				 nat_ctx->vaddr, nat_ctx->dma_handle,
+				 nat_ctx->size);
+
+		if (result) {
+			IPAERR("unable to map memory. Err:%d\n", result);
+			goto bail;
+		}
+	} else {
+		IPADBG("Mapping shared(local) memory\n");
+		IPADBG("map sz=0x%lx\n", vsize);
+		phys_addr = ipa_ctx->ipa_wrapper_base + IPA_REG_BASE_OFST +
+			IPA_SRAM_DIRECT_ACCESS_n_OFST(IPA_NAT_PHYS_MEM_OFFSET);
+
+		if (remap_pfn_range(
+			 vma, vma->vm_start,
+			 phys_addr >> PAGE_SHIFT, vsize, vma->vm_page_prot)) {
+			IPAERR("remap failed\n");
+			result = -EAGAIN;
+			goto bail;
+		}
+
+	}
+	nat_ctx->is_mapped = true;
+	vma->vm_ops = &ipa_nat_remap_vm_ops;
+	IPADBG("return\n");
+	result = 0;
+bail:
+	mutex_unlock(&nat_ctx->lock);
+	return result;
+}
+
+static const struct file_operations ipa_nat_fops = {
+	.owner = THIS_MODULE,
+	.open = ipa_nat_open,
+	.mmap = ipa_nat_mmap
+};
+
+/**
+ * allocate_nat_device() - Allocates memory for the NAT device
+ * @mem:	[in/out] memory parameters
+ *
+ * Called by NAT client driver to allocate memory for the NAT entries. Based on
+ * the request size either shared or system memory will be used.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+	struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem);
+	int gfp_flags = GFP_KERNEL | __GFP_ZERO;
+	int result;
+
+	IPADBG("passed memory size %d\n", mem->size);
+
+	mutex_lock(&nat_ctx->lock);
+	if (mem->size <= 0 || !strlen(mem->dev_name)
+			|| nat_ctx->is_dev_init == true) {
+		IPADBG("Invalid Parameters or device is already init\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (mem->size > IPA_NAT_PHYS_MEM_SIZE) {
+		IPADBG("Allocating system memory\n");
+		nat_ctx->is_sys_mem = true;
+		nat_ctx->vaddr =
+		   dma_alloc_coherent(NULL, mem->size, &nat_ctx->dma_handle,
+				       gfp_flags);
+		if (nat_ctx->vaddr == NULL) {
+			IPAERR("memory alloc failed\n");
+			result = -ENOMEM;
+			goto bail;
+		}
+		nat_ctx->size = mem->size;
+	} else {
+		IPADBG("using shared(local) memory\n");
+		nat_ctx->is_sys_mem = false;
+	}
+
+	nat_ctx->class = class_create(THIS_MODULE, mem->dev_name);
+	if (IS_ERR(nat_ctx->class)) {
+		IPAERR("unable to create the class\n");
+		result = -ENODEV;
+		goto vaddr_alloc_fail;
+	}
+	result = alloc_chrdev_region(&nat_ctx->dev_num,
+					0,
+					1,
+					mem->dev_name);
+	if (result) {
+		IPAERR("alloc_chrdev_region err.\n");
+		result = -ENODEV;
+		goto alloc_chrdev_region_fail;
+	}
+
+	nat_ctx->dev =
+	   device_create(nat_ctx->class, NULL, nat_ctx->dev_num, nat_ctx,
+			 mem->dev_name);
+
+	if (IS_ERR(nat_ctx->dev)) {
+		IPAERR("device_create err:%ld\n", PTR_ERR(nat_ctx->dev));
+		result = -ENODEV;
+		goto device_create_fail;
+	}
+
+	cdev_init(&nat_ctx->cdev, &ipa_nat_fops);
+	nat_ctx->cdev.owner = THIS_MODULE;
+	nat_ctx->cdev.ops = &ipa_nat_fops;
+
+	result = cdev_add(&nat_ctx->cdev, nat_ctx->dev_num, 1);
+	if (result) {
+		IPAERR("cdev_add err=%d\n", -result);
+		goto cdev_add_fail;
+	}
+	nat_ctx->is_dev_init = true;
+	IPADBG("IPA NAT driver init successfully\n");
+	result = 0;
+	goto bail;
+
+cdev_add_fail:
+	device_destroy(nat_ctx->class, nat_ctx->dev_num);
+device_create_fail:
+	unregister_chrdev_region(nat_ctx->dev_num, 1);
+alloc_chrdev_region_fail:
+	class_destroy(nat_ctx->class);
+vaddr_alloc_fail:
+	if (nat_ctx->vaddr) {
+		IPADBG("Releasing system memory\n");
+		dma_free_coherent(
+			 NULL, nat_ctx->size,
+			 nat_ctx->vaddr, nat_ctx->dma_handle);
+		nat_ctx->vaddr = NULL;
+		nat_ctx->dma_handle = 0;
+		nat_ctx->size = 0;
+	}
+bail:
+	mutex_unlock(&nat_ctx->lock);
+
+	return result;
+}
+
+/* IOCTL function handlers */
+/**
+ * ipa_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW
+ * @init:	[in] initialization command attributes
+ *
+ * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_ip_v4_nat_init *cmd;
+	u16 size = sizeof(struct ipa_ip_v4_nat_init);
+	int result;
+
+	IPADBG("\n");
+	if (init->tbl_index < 0 || init->table_entries <= 0) {
+		IPADBG("Table index or entries is zero\n");
+		result = -EPERM;
+		goto bail;
+	}
+	cmd = kmalloc(size, GFP_KERNEL);
+	if (!cmd) {
+		IPAERR("Failed to alloc immediate command object\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+	if (ipa_ctx->nat_mem.vaddr) {
+		IPADBG("using system memory for nat table\n");
+		cmd->ipv4_rules_addr_type = IPA_NAT_SYSTEM_MEMORY;
+		cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SYSTEM_MEMORY;
+		cmd->index_table_addr_type = IPA_NAT_SYSTEM_MEMORY;
+		cmd->index_table_expansion_addr_type = IPA_NAT_SYSTEM_MEMORY;
+
+		cmd->ipv4_rules_addr =
+			ipa_ctx->nat_mem.dma_handle + init->ipv4_rules_offset;
+		IPADBG("ipv4_rules_offset:0x%x\n", init->ipv4_rules_offset);
+
+		cmd->ipv4_expansion_rules_addr =
+		   ipa_ctx->nat_mem.dma_handle + init->expn_rules_offset;
+		IPADBG("expn_rules_offset:0x%x\n", init->expn_rules_offset);
+
+		cmd->index_table_addr =
+			ipa_ctx->nat_mem.dma_handle + init->index_offset;
+		IPADBG("index_offset:0x%x\n", init->index_offset);
+
+		cmd->index_table_expansion_addr =
+		   ipa_ctx->nat_mem.dma_handle + init->index_expn_offset;
+		IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset);
+	} else {
+		IPADBG("using shared(local) memory for nat table\n");
+		cmd->ipv4_rules_addr_type = IPA_NAT_SHARED_MEMORY;
+		cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SHARED_MEMORY;
+		cmd->index_table_addr_type = IPA_NAT_SHARED_MEMORY;
+		cmd->index_table_expansion_addr_type = IPA_NAT_SHARED_MEMORY;
+
+		cmd->ipv4_rules_addr =
+			init->ipv4_rules_offset + IPA_RAM_NAT_OFST;
+
+		cmd->ipv4_expansion_rules_addr =
+			init->expn_rules_offset + IPA_RAM_NAT_OFST;
+
+		cmd->index_table_addr = init->index_offset + IPA_RAM_NAT_OFST;
+
+		cmd->index_table_expansion_addr =
+			init->index_expn_offset + IPA_RAM_NAT_OFST;
+	}
+	cmd->table_index = init->tbl_index;
+	IPADBG("Table index:0x%x\n", cmd->table_index);
+	cmd->size_base_tables = init->table_entries;
+	IPADBG("Base Table size:0x%x\n", cmd->size_base_tables);
+	cmd->size_expansion_tables = init->expn_table_entries;
+	IPADBG("Expansion Table size:0x%x\n", cmd->size_expansion_tables);
+	cmd->public_ip_addr = init->ip_addr;
+	IPADBG("Public ip address:0x%x\n", cmd->public_ip_addr);
+	desc.opcode = IPA_IP_V4_NAT_INIT;
+	desc.type = IPA_IMM_CMD_DESC;
+	desc.callback = NULL;
+	desc.user1 = NULL;
+	desc.user2 = NULL;
+	desc.pyld = (void *)cmd;
+	desc.len = size;
+	IPADBG("posting v4 init command\n");
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("Fail to send immediate command\n");
+		result = -EPERM;
+		goto free_cmd;
+	}
+
+	IPADBG("return\n");
+	result = 0;
+free_cmd:
+	kfree(cmd);
+bail:
+	return result;
+}
+
+/**
+ * ipa_nat_dma_cmd() - Post NAT_DMA command to IPA HW
+ * @dma:	[in] initialization command attributes
+ *
+ * Called by NAT client driver to post NAT_DMA command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+	struct ipa_nat_dma *cmd = NULL;
+	struct ipa_desc *desc = NULL;
+	u16 size = 0, cnt = 0;
+	int ret = 0;
+
+	IPADBG("\n");
+	if (dma->entries <= 0) {
+		IPADBG("Invalid number of commands\n");
+		ret = -EPERM;
+		goto bail;
+	}
+	size = sizeof(struct ipa_desc) * dma->entries;
+	desc = kmalloc(size, GFP_KERNEL);
+	if (desc == NULL) {
+		IPAERR("Failed to alloc memory\n");
+		ret = -ENOMEM;
+		goto bail;
+	}
+	size = sizeof(struct ipa_nat_dma) * dma->entries;
+	cmd = kmalloc(size, GFP_KERNEL);
+	if (cmd == NULL) {
+		IPAERR("Failed to alloc memory\n");
+		ret = -ENOMEM;
+		goto bail;
+	}
+	for (cnt = 0; cnt < dma->entries; cnt++) {
+		cmd[cnt].table_index = dma->dma[cnt].table_index;
+		cmd[cnt].base_addr = dma->dma[cnt].base_addr;
+		cmd[cnt].offset = dma->dma[cnt].offset;
+		cmd[cnt].data = dma->dma[cnt].data;
+		desc[cnt].type = IPA_IMM_CMD_DESC;
+		desc[cnt].opcode = IPA_NAT_DMA;
+		desc[cnt].callback = NULL;
+		desc[cnt].user1 = NULL;
+
+		desc[cnt].user2 = NULL;
+
+		desc[cnt].len = sizeof(struct ipa_nat_dma);
+		desc[cnt].pyld = (void *)&cmd[cnt];
+	}
+	IPADBG("posting dma command with entries %d\n", dma->entries);
+	ret = ipa_send_cmd(dma->entries, desc);
+	if (ret == -EPERM)
+		IPAERR("Fail to send immediate command\n");
+
+bail:
+	kfree(cmd);
+	kfree(desc);
+
+	return ret;
+}
+
+/**
+ * ipa_nat_free_mem_and_device() - free the NAT memory and remove the device
+ * @nat_ctx:	[in] the IPA NAT memory to free
+ *
+ * Called by NAT client driver to free the NAT memory and remove the device
+ */
+void ipa_nat_free_mem_and_device(struct ipa_nat_mem *nat_ctx)
+{
+	IPADBG("\n");
+	mutex_lock(&nat_ctx->lock);
+
+	if (nat_ctx->is_sys_mem) {
+		IPADBG("freeing the dma memory\n");
+		dma_free_coherent(
+			 NULL, nat_ctx->size,
+			 nat_ctx->vaddr, nat_ctx->dma_handle);
+		nat_ctx->size = 0;
+		nat_ctx->vaddr = NULL;
+	}
+	nat_ctx->is_mapped = false;
+	nat_ctx->is_sys_mem = false;
+	cdev_del(&nat_ctx->cdev);
+	device_destroy(nat_ctx->class, nat_ctx->dev_num);
+	unregister_chrdev_region(nat_ctx->dev_num, 1);
+	class_destroy(nat_ctx->class);
+	nat_ctx->is_dev_init = false;
+
+	mutex_unlock(&nat_ctx->lock);
+	IPADBG("return\n");
+	return;
+}
+
+/**
+ * ipa_nat_del_cmd() - Delete a NAT table
+ * @del:	[in] delete table table table parameters
+ *
+ * Called by NAT client driver to delete the nat table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_ip_v4_nat_init *cmd;
+	u16 size = sizeof(struct ipa_ip_v4_nat_init);
+	u8 mem_type = IPA_NAT_SHARED_MEMORY;
+	u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET;
+	int result;
+
+	IPADBG("\n");
+	if (del->table_index < 0 || del->public_ip_addr == 0) {
+		IPADBG("Bad Parameter\n");
+		result = -EPERM;
+		goto bail;
+	}
+	cmd = kmalloc(size, GFP_KERNEL);
+	if (cmd == NULL) {
+		IPAERR("Failed to alloc immediate command object\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+	cmd->table_index = del->table_index;
+	cmd->ipv4_rules_addr = base_addr;
+	cmd->ipv4_rules_addr_type = mem_type;
+	cmd->ipv4_expansion_rules_addr = base_addr;
+	cmd->ipv4_expansion_rules_addr_type = mem_type;
+	cmd->index_table_addr = base_addr;
+	cmd->index_table_addr_type = mem_type;
+	cmd->index_table_expansion_addr = base_addr;
+	cmd->index_table_expansion_addr_type = mem_type;
+	cmd->size_base_tables = 0;
+	cmd->size_expansion_tables = 0;
+	cmd->public_ip_addr = del->public_ip_addr;
+
+	desc.opcode = IPA_IP_V4_NAT_INIT;
+	desc.type = IPA_IMM_CMD_DESC;
+	desc.callback = NULL;
+	desc.user1 = NULL;
+	desc.user2 = NULL;
+	desc.pyld = (void *)cmd;
+	desc.len = size;
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("Fail to send immediate command\n");
+		result = -EPERM;
+		goto free_mem;
+	}
+
+	ipa_nat_free_mem_and_device(&ipa_ctx->nat_mem);
+	IPADBG("return\n");
+	result = 0;
+free_mem:
+	kfree(cmd);
+bail:
+	return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_ram_mmap.h b/drivers/platform/msm/ipa/ipa_ram_mmap.h
new file mode 100644
index 0000000..000718b
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_ram_mmap.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RAM_MMAP_H_
+#define _IPA_RAM_MMAP_H_
+
+/*
+ * This header defines the memory map of the IPA RAM (not all 8K is available
+ * for SW use) the first 2K are set aside for NAT
+ */
+
+#define IPA_RAM_NAT_OFST    0
+#define IPA_RAM_NAT_SIZE    2048
+#define IPA_RAM_HDR_OFST    2048
+#define IPA_RAM_HDR_SIZE    256
+#define IPA_RAM_V4_FLT_OFST (IPA_RAM_HDR_OFST + IPA_RAM_HDR_SIZE)
+#define IPA_RAM_V4_FLT_SIZE 1024
+#define IPA_RAM_V4_RT_OFST  (IPA_RAM_V4_FLT_OFST + IPA_RAM_V4_FLT_SIZE)
+#define IPA_RAM_V4_RT_SIZE  1024
+#define IPA_RAM_V6_FLT_OFST (IPA_RAM_V4_RT_OFST + IPA_RAM_V4_RT_SIZE)
+#define IPA_RAM_V6_FLT_SIZE 1024
+#define IPA_RAM_V6_RT_OFST  (IPA_RAM_V6_FLT_OFST + IPA_RAM_V6_FLT_SIZE)
+#define IPA_RAM_V6_RT_SIZE  1024
+#define IPA_RAM_END_OFST    (IPA_RAM_V6_RT_OFST + IPA_RAM_V6_RT_SIZE)
+
+#endif /* _IPA_RAM_MMAP_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_reg.h b/drivers/platform/msm/ipa/ipa_reg.h
new file mode 100644
index 0000000..61913b6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_reg.h
@@ -0,0 +1,223 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __IPA_REG_H__
+#define __IPA_REG_H__
+
+/*
+ * IPA's BAM specific registers
+ */
+
+#define IPA_BAM_REG_BASE_OFST 0x00004000
+
+#define IPA_BAM_CNFG_BITS_OFST 0x7c
+#define IPA_BAM_REMAP_SIZE (0x1000)
+
+/*
+ * IPA's core specific regtisters
+ */
+
+#define IPA_REG_BASE_OFST 0x00020000
+
+#define IPA_COMP_HW_VERSION_OFST 0x00000030
+#define IPA_COMP_HW_VERSION_RMSK 0xffffffff
+#define IPA_COMP_HW_VERSION_MAJOR_BMSK 0xff000000
+#define IPA_COMP_HW_VERSION_MAJOR_SHFT 0x18
+#define IPA_COMP_HW_VERSION_MINOR_BMSK 0xff0000
+#define IPA_COMP_HW_VERSION_MINOR_SHFT 0x10
+#define IPA_COMP_HW_VERSION_STEP_BMSK 0xffff
+#define IPA_COMP_HW_VERSION_STEP_SHFT 0x0
+
+#define IPA_VERSION_OFST 0x00000034
+#define IPA_VERSION_RMSK 0xffffffff
+#define IPA_VERSION_IPA_R_REV_BMSK 0xff000000
+#define IPA_VERSION_IPA_R_REV_SHFT 0x18
+#define IPA_VERSION_IPA_Q_REV_BMSK 0xff0000
+#define IPA_VERSION_IPA_Q_REV_SHFT 0x10
+#define IPA_VERSION_IPA_P_REV_BMSK 0xff00
+#define IPA_VERSION_IPA_P_REV_SHFT 0x8
+#define IPA_VERSION_IPA_ECO_REV_BMSK 0xff
+#define IPA_VERSION_IPA_ECO_REV_SHFT 0x0
+
+#define IPA_COMP_CFG_OFST 0x00000038
+#define IPA_COMP_CFG_RMSK 0x1
+#define IPA_COMP_CFG_ENABLE_BMSK 0x1
+#define IPA_COMP_CFG_ENABLE_SHFT 0x0
+
+#define IPA_COMP_SW_RESET_OFST 0x0000003c
+#define IPA_COMP_SW_RESET_RMSK 0x1
+#define IPA_COMP_SW_RESET_SW_RESET_BMSK 0x1
+#define IPA_COMP_SW_RESET_SW_RESET_SHFT 0x0
+
+#define IPA_CLKON_CFG_OFST 0x00000040
+#define IPA_CLKON_CFG_RMSK 0xf
+#define IPA_CLKON_CFG_CGC_OPEN_MISC_BMSK 0x8
+#define IPA_CLKON_CFG_CGC_OPEN_MISC_SHFT 0x3
+#define IPA_CLKON_CFG_CGC_OPEN_TX_BMSK 0x4
+#define IPA_CLKON_CFG_CGC_OPEN_TX_SHFT 0x2
+#define IPA_CLKON_CFG_CGC_OPEN_PROC_BMSK 0x2
+#define IPA_CLKON_CFG_CGC_OPEN_PROC_SHFT 0x1
+#define IPA_CLKON_CFG_CGC_OPEN_RX_BMSK 0x1
+#define IPA_CLKON_CFG_CGC_OPEN_RX_SHFT 0x0
+
+#define IPA_HEAD_OF_LINE_BLOCK_EN_OFST 0x00000044
+#define IPA_HEAD_OF_LINE_BLOCK_EN_RMSK 0x1
+#define IPA_HEAD_OF_LINE_BLOCK_EN_EN_BMSK 0x1
+#define IPA_HEAD_OF_LINE_BLOCK_EN_EN_SHFT 0x0
+
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_OFST 0x00000048
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_RMSK 0x1ff
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_TIMER_BMSK 0x1ff
+#define IPA_HEAD_OF_LINE_BLOCK_TIMER_TIMER_SHFT 0x0
+
+#define IPA_ROUTE_OFST 0x0000004c
+#define IPA_ROUTE_RMSK 0x1ffff
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0x40
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
+#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
+#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
+#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1
+#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0
+
+#define IPA_FILTER_OFST 0x00000050
+#define IPA_FILTER_RMSK 0x1
+#define IPA_FILTER_FILTER_EN_BMSK 0x1
+#define IPA_FILTER_FILTER_EN_SHFT 0x0
+
+#define IPA_MASTER_PRIORITY_OFST 0x00000054
+#define IPA_MASTER_PRIORITY_RMSK 0xffffffff
+#define IPA_MASTER_PRIORITY_MASTER_7_WR_BMSK 0xc0000000
+#define IPA_MASTER_PRIORITY_MASTER_7_WR_SHFT 0x1e
+#define IPA_MASTER_PRIORITY_MASTER_7_RD_BMSK 0x30000000
+#define IPA_MASTER_PRIORITY_MASTER_7_RD_SHFT 0x1c
+#define IPA_MASTER_PRIORITY_MASTER_6_WR_BMSK 0xc000000
+#define IPA_MASTER_PRIORITY_MASTER_6_WR_SHFT 0x1a
+#define IPA_MASTER_PRIORITY_MASTER_6_RD_BMSK 0x3000000
+#define IPA_MASTER_PRIORITY_MASTER_6_RD_SHFT 0x18
+#define IPA_MASTER_PRIORITY_MASTER_5_WR_BMSK 0xc00000
+#define IPA_MASTER_PRIORITY_MASTER_5_WR_SHFT 0x16
+#define IPA_MASTER_PRIORITY_MASTER_5_RD_BMSK 0x300000
+#define IPA_MASTER_PRIORITY_MASTER_5_RD_SHFT 0x14
+#define IPA_MASTER_PRIORITY_MASTER_4_WR_BMSK 0xc0000
+#define IPA_MASTER_PRIORITY_MASTER_4_WR_SHFT 0x12
+#define IPA_MASTER_PRIORITY_MASTER_4_RD_BMSK 0x30000
+#define IPA_MASTER_PRIORITY_MASTER_4_RD_SHFT 0x10
+#define IPA_MASTER_PRIORITY_MASTER_3_WR_BMSK 0xc000
+#define IPA_MASTER_PRIORITY_MASTER_3_WR_SHFT 0xe
+#define IPA_MASTER_PRIORITY_MASTER_3_RD_BMSK 0x3000
+#define IPA_MASTER_PRIORITY_MASTER_3_RD_SHFT 0xc
+#define IPA_MASTER_PRIORITY_MASTER_2_WR_BMSK 0xc00
+#define IPA_MASTER_PRIORITY_MASTER_2_WR_SHFT 0xa
+#define IPA_MASTER_PRIORITY_MASTER_2_RD_BMSK 0x300
+#define IPA_MASTER_PRIORITY_MASTER_2_RD_SHFT 0x8
+#define IPA_MASTER_PRIORITY_MASTER_1_WR_BMSK 0xc0
+#define IPA_MASTER_PRIORITY_MASTER_1_WR_SHFT 0x6
+#define IPA_MASTER_PRIORITY_MASTER_1_RD_BMSK 0x30
+#define IPA_MASTER_PRIORITY_MASTER_1_RD_SHFT 0x4
+#define IPA_MASTER_PRIORITY_MASTER_0_WR_BMSK 0xc
+#define IPA_MASTER_PRIORITY_MASTER_0_WR_SHFT 0x2
+#define IPA_MASTER_PRIORITY_MASTER_0_RD_BMSK 0x3
+#define IPA_MASTER_PRIORITY_MASTER_0_RD_SHFT 0x0
+
+#define IPA_SHARED_MEM_SIZE_OFST 0x00000058
+#define IPA_SHARED_MEM_SIZE_RMSK 0x1fff
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK 0x1fff
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT 0x0
+
+#define IPA_NAT_TIMER_OFST 0x0000005c
+#define IPA_NAT_TIMER_RMSK 0xffffff
+#define IPA_NAT_TIMER_NAT_TIMER_BMSK 0xffffff
+#define IPA_NAT_TIMER_NAT_TIMER_SHFT 0x0
+
+#define IPA_NAT_TIMER_RESET_OFST 0x00000060
+#define IPA_NAT_TIMER_RESET_RMSK 0x1
+#define IPA_NAT_TIMER_RESET_NAT_TIMER_RESET_BMSK 0x1
+#define IPA_NAT_TIMER_RESET_NAT_TIMER_RESET_SHFT 0x0
+
+#define IPA_ENDP_INIT_NAT_n_OFST(n) (0x00000080 + 0x4 * (n))
+#define IPA_ENDP_INIT_NAT_n_RMSK 0x3
+#define IPA_ENDP_INIT_NAT_n_MAXn 19
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK 0x3
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_HDR_n_OFST(n) (0x000000e0 + 0x4 * (n))
+#define IPA_ENDP_INIT_HDR_n_RMSK 0x7ffffff
+#define IPA_ENDP_INIT_HDR_n_MAXn 19
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK 0x4000000
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT 0x1a
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT 0x14
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK 0x1f80
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT 0x7
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK 0x40
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT 0x6
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK 0x3f
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT 0x0
+
+#define IPA_ENDP_INIT_MODE_n_OFST(n) (0x00000140 + 0x4 * (n))
+#define IPA_ENDP_INIT_MODE_n_RMSK 0x7f
+#define IPA_ENDP_INIT_MODE_n_MAXn 19
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK 0x7c
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT 0x2
+#define IPA_ENDP_INIT_MODE_n_MODE_BMSK 0x3
+#define IPA_ENDP_INIT_MODE_n_MODE_SHFT 0x0
+
+#define IPA_ENDP_INIT_AGGR_n_OFST(n) (0x000001a0 + 0x4 * (n))
+#define IPA_ENDP_INIT_AGGR_n_RMSK 0x7fff
+#define IPA_ENDP_INIT_AGGR_n_MAXn 19
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK 0x7c00
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT 0xa
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK 0x3e0
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT 0x5
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK 0x1c
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT 0x2
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK 0x3
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_ROUTE_n_OFST(n) (0x00000200 + 0x4 * (n))
+#define IPA_ENDP_INIT_ROUTE_n_RMSK 0x1f
+#define IPA_ENDP_INIT_ROUTE_n_MAXn 19
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK 0x1f
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT 0x0
+
+#define IPA_AGGREGATION_SPARE_REG_1_OFST 0x00002090
+#define IPA_AGGREGATION_SPARE_REG_1_RMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_1_GENERAL_CONFIG_BMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_1_GENERAL_CONFIG_SHFT 0x0
+
+#define IPA_AGGREGATION_SPARE_REG_2_OFST 0x00002094
+#define IPA_AGGREGATION_SPARE_REG_2_RMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_2_GENERAL_CONFIG_BMSK 0xffffffff
+#define IPA_AGGREGATION_SPARE_REG_2_GENERAL_CONFIG_SHFT 0x0
+
+#define IPA_AGGREGATION_MODE_MSK 0x1
+#define IPA_AGGREGATION_MODE_SHFT 31
+#define IPA_AGGREGATION_MODE_BMSK 0x7fffffff
+#define IPA_AGGREGATION_QCNCM_SIG0_SHFT 16
+#define IPA_AGGREGATION_QCNCM_SIG1_SHFT 8
+#define IPA_AGGREGATION_QCNCM_SIG_BMSK 0xff000000
+#define IPA_AGGREGATION_SINGLE_NDP_MSK 0x1
+#define IPA_AGGREGATION_SINGLE_NDP_BMSK 0xfffffffe
+
+#define IPA_SRAM_DIRECT_ACCESS_n_OFST(n) (0x00004000 + 0x4 * (n))
+#define IPA_SRAM_DIRECT_ACCESS_n_RMSK 0xffffffff
+#define IPA_SRAM_DIRECT_ACCESS_n_MAXn 2047
+#define IPA_SRAM_DIRECT_ACCESS_n_DATA_WORD_BMSK 0xffffffff
+#define IPA_SRAM_DIRECT_ACCESS_n_DATA_WORD_SHFT 0x0
+
+#endif /* __IPA_REG_H__ */
diff --git a/drivers/platform/msm/ipa/ipa_rt.c b/drivers/platform/msm/ipa/ipa_rt.c
new file mode 100644
index 0000000..c69e1fb
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rt.c
@@ -0,0 +1,964 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include "ipa_i.h"
+
+#define IPA_RT_TABLE_INDEX_NOT_FOUND	(-1)
+#define IPA_RT_TABLE_WORD_SIZE		(4)
+#define IPA_RT_INDEX_BITMAP_SIZE	(32)
+#define IPA_RT_TABLE_MEMORY_ALLIGNMENT	(127)
+#define IPA_RT_ENTRY_MEMORY_ALLIGNMENT	(3)
+#define IPA_RT_BIT_MASK			(0x1)
+#define IPA_RT_STATUS_OF_ADD_FAILED	(-1)
+#define IPA_RT_STATUS_OF_DEL_FAILED	(-1)
+
+/**
+ * ipa_generate_rt_hw_rule() - generates the routing hardware rule
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ *		caller wants to know the size of the rule as seen
+ *		by HW so they did not pass a valid buffer, we will use a
+ *		scratch buffer instead.
+ *		With this scheme we are going to
+ *		generate the rule twice, once to know size using scratch
+ *		buffer and second to write the rule to the actual caller
+ *		supplied buffer which is of required size
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
+		struct ipa_rt_entry *entry, u8 *buf)
+{
+	struct ipa_rt_rule_hw_hdr *rule_hdr;
+	const struct ipa_rt_rule *rule =
+		(const struct ipa_rt_rule *)&entry->rule;
+	u16 en_rule = 0;
+	u8 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE];
+	u8 *start;
+	int pipe_idx;
+
+	memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
+	if (buf == NULL)
+		buf = tmp;
+
+	start = buf;
+	rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf;
+	pipe_idx = ipa_get_ep_mapping(ipa_ctx->mode,
+			entry->rule.dst);
+	if (pipe_idx == -1) {
+		IPAERR("Wrong destination pipe specified in RT rule\n");
+		WARN_ON(1);
+		return -EPERM;
+	}
+	rule_hdr->u.hdr.pipe_dest_idx = pipe_idx;
+	rule_hdr->u.hdr.system = !ipa_ctx->hdr_tbl_lcl;
+	if (entry->hdr)
+		rule_hdr->u.hdr.hdr_offset =
+			entry->hdr->offset_entry->offset >> 2;
+	else
+		rule_hdr->u.hdr.hdr_offset = 0;
+
+	buf += sizeof(struct ipa_rt_rule_hw_hdr);
+	if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
+		IPAERR("fail to generate hw rule\n");
+		return -EPERM;
+	}
+
+	IPADBG("en_rule 0x%x\n", en_rule);
+
+	rule_hdr->u.hdr.en_rule = en_rule;
+	ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr);
+
+	if (entry->hw_len == 0) {
+		entry->hw_len = buf - start;
+	} else if (entry->hw_len != (buf - start)) {
+			IPAERR(
+			"hw_len differs b/w passes passed=0x%x calc=0x%x\n",
+			entry->hw_len,
+			(buf - start));
+			return -EPERM;
+		}
+
+	return 0;
+}
+
+/**
+ * ipa_get_rt_hw_tbl_size() - returns the size of HW routing table
+ * @ip: the ip address family type
+ * @hdr_sz: header size
+ * @max_rt_idx: maximal index
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ * the MSB set in rt_idx_bitmap indicates the size of hdr of routing tbl
+ */
+static int ipa_get_rt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz,
+		int *max_rt_idx)
+{
+	struct ipa_rt_tbl_set *set;
+	struct ipa_rt_tbl *tbl;
+	struct ipa_rt_entry *entry;
+	u32 total_sz = 0;
+	u32 tbl_sz;
+	u32 bitmap = ipa_ctx->rt_idx_bitmap[ip];
+	int highest_bit_set = IPA_RT_TABLE_INDEX_NOT_FOUND;
+	int i;
+
+	*hdr_sz = 0;
+	set = &ipa_ctx->rt_tbl_set[ip];
+
+	for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
+		if (bitmap & IPA_RT_BIT_MASK)
+			highest_bit_set = i;
+		bitmap >>= 1;
+	}
+
+	*max_rt_idx = highest_bit_set;
+	if (highest_bit_set == IPA_RT_TABLE_INDEX_NOT_FOUND) {
+		IPAERR("no rt tbls present\n");
+		total_sz = IPA_RT_TABLE_WORD_SIZE;
+		*hdr_sz = IPA_RT_TABLE_WORD_SIZE;
+		return total_sz;
+	}
+
+	*hdr_sz = (highest_bit_set + 1) * IPA_RT_TABLE_WORD_SIZE;
+	total_sz += *hdr_sz;
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		tbl_sz = 0;
+		list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+			if (ipa_generate_rt_hw_rule(ip, entry, NULL)) {
+				IPAERR("failed to find HW RT rule size\n");
+				return -EPERM;
+			}
+			tbl_sz += entry->hw_len;
+		}
+
+		if (tbl_sz)
+			tbl->sz = tbl_sz + IPA_RT_TABLE_WORD_SIZE;
+
+		if (tbl->in_sys)
+			continue;
+
+		if (tbl_sz) {
+			/* add the terminator */
+			total_sz += (tbl_sz + IPA_RT_TABLE_WORD_SIZE);
+			/* every rule-set should start at word boundary */
+			total_sz = (total_sz + IPA_RT_ENTRY_MEMORY_ALLIGNMENT) &
+						~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
+		}
+	}
+
+	IPADBG("RT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
+
+	return total_sz;
+}
+
+/**
+ * ipa_generate_rt_hw_tbl() - generates the routing hardware table
+ * @ip:	[in] the ip address family type
+ * @mem:	[out] buffer to put the filtering table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_generate_rt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem)
+{
+	struct ipa_rt_tbl *tbl;
+	struct ipa_rt_entry *entry;
+	struct ipa_rt_tbl_set *set;
+	u32 hdr_sz;
+	u32 offset;
+	u8 *hdr;
+	u8 *body;
+	u8 *base;
+	struct ipa_mem_buffer rt_tbl_mem;
+	u8 *rt_tbl_mem_body;
+	int max_rt_idx;
+	int i;
+
+	mem->size = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
+	mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) &
+				~IPA_RT_TABLE_MEMORY_ALLIGNMENT;
+
+	if (mem->size == 0) {
+		IPAERR("rt tbl empty ip=%d\n", ip);
+		goto error;
+	}
+	mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
+			GFP_KERNEL);
+	if (!mem->base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+		goto error;
+	}
+
+	memset(mem->base, 0, mem->size);
+
+	/* build the rt tbl in the DMA buffer to submit to IPA HW */
+	base = hdr = (u8 *)mem->base;
+	body = base + hdr_sz;
+
+	/* setup all indices to point to the empty sys rt tbl */
+	for (i = 0; i <= max_rt_idx; i++)
+		ipa_write_32(ipa_ctx->empty_rt_tbl_mem.phys_base,
+				hdr + (i * IPA_RT_TABLE_WORD_SIZE));
+
+	set = &ipa_ctx->rt_tbl_set[ip];
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		offset = body - base;
+		if (offset & IPA_RT_ENTRY_MEMORY_ALLIGNMENT) {
+			IPAERR("offset is not word multiple %d\n", offset);
+			goto proc_err;
+		}
+
+		if (!tbl->in_sys) {
+			/* convert offset to words from bytes */
+			offset &= ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
+			/* rule is at an offset from base */
+			offset |= IPA_RT_BIT_MASK;
+
+			/* update the hdr at the right index */
+			ipa_write_32(offset, hdr +
+					(tbl->idx * IPA_RT_TABLE_WORD_SIZE));
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_rt_rule_list,
+					link) {
+				if (ipa_generate_rt_hw_rule(ip, entry, body)) {
+					IPAERR("failed to gen HW RT rule\n");
+					goto proc_err;
+				}
+				body += entry->hw_len;
+			}
+
+			/* write the rule-set terminator */
+			body = ipa_write_32(0, body);
+			if ((u32)body & IPA_RT_ENTRY_MEMORY_ALLIGNMENT)
+				/* advance body to next word boundary */
+				body = body + (IPA_RT_TABLE_WORD_SIZE -
+					      ((u32)body &
+					      IPA_RT_ENTRY_MEMORY_ALLIGNMENT));
+		} else {
+			WARN_ON(tbl->sz == 0);
+			/* allocate memory for the RT tbl */
+			rt_tbl_mem.size = tbl->sz;
+			rt_tbl_mem.base =
+			   dma_alloc_coherent(NULL, rt_tbl_mem.size,
+					   &rt_tbl_mem.phys_base, GFP_KERNEL);
+			if (!rt_tbl_mem.base) {
+				IPAERR("fail to alloc DMA buff of size %d\n",
+						rt_tbl_mem.size);
+				WARN_ON(1);
+				goto proc_err;
+			}
+
+			WARN_ON(rt_tbl_mem.phys_base &
+					IPA_RT_ENTRY_MEMORY_ALLIGNMENT);
+			rt_tbl_mem_body = rt_tbl_mem.base;
+			memset(rt_tbl_mem.base, 0, rt_tbl_mem.size);
+			/* update the hdr at the right index */
+			ipa_write_32(rt_tbl_mem.phys_base,
+					hdr + (tbl->idx *
+					IPA_RT_TABLE_WORD_SIZE));
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_rt_rule_list,
+					link) {
+				if (ipa_generate_rt_hw_rule(ip, entry,
+							rt_tbl_mem_body)) {
+					IPAERR("failed to gen HW RT rule\n");
+					WARN_ON(1);
+					goto rt_table_mem_alloc_failed;
+				}
+				rt_tbl_mem_body += entry->hw_len;
+			}
+
+			/* write the rule-set terminator */
+			rt_tbl_mem_body = ipa_write_32(0, rt_tbl_mem_body);
+
+			if (tbl->curr_mem.phys_base) {
+				WARN_ON(tbl->prev_mem.phys_base);
+				tbl->prev_mem = tbl->curr_mem;
+			}
+			tbl->curr_mem = rt_tbl_mem;
+		}
+	}
+
+	return 0;
+
+rt_table_mem_alloc_failed:
+	dma_free_coherent(NULL, rt_tbl_mem.size,
+			  rt_tbl_mem.base, rt_tbl_mem.phys_base);
+proc_err:
+	dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+error:
+	return -EPERM;
+}
+
+static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
+{
+	struct ipa_rt_tbl *tbl;
+	struct ipa_rt_tbl *next;
+	struct ipa_rt_tbl_set *set;
+
+	set = &ipa_ctx->rt_tbl_set[ip];
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		if (tbl->prev_mem.phys_base) {
+			IPADBG("reaping rt tbl name=%s ip=%d\n", tbl->name, ip);
+			dma_free_coherent(NULL, tbl->prev_mem.size,
+					tbl->prev_mem.base,
+					tbl->prev_mem.phys_base);
+			memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+		}
+	}
+
+	set = &ipa_ctx->reap_rt_tbl_set[ip];
+	list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) {
+		list_del(&tbl->link);
+		WARN_ON(tbl->prev_mem.phys_base != 0);
+		if (tbl->curr_mem.phys_base) {
+			IPADBG("reaping sys rt tbl name=%s ip=%d\n", tbl->name,
+					ip);
+			dma_free_coherent(NULL, tbl->curr_mem.size,
+					tbl->curr_mem.base,
+					tbl->curr_mem.phys_base);
+			kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
+		}
+	}
+}
+
+static int __ipa_commit_rt(enum ipa_ip_type ip)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_mem_buffer *mem;
+	void *cmd;
+	struct ipa_ip_v4_routing_init *v4;
+	struct ipa_ip_v6_routing_init *v6;
+	u16 avail;
+	u16 size;
+
+	mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+	if (!mem) {
+		IPAERR("failed to alloc memory object\n");
+		goto fail_alloc_mem;
+	}
+
+	if (ip == IPA_IP_v4) {
+		avail = IPA_RAM_V4_RT_SIZE;
+		size = sizeof(struct ipa_ip_v4_routing_init);
+	} else {
+		avail = IPA_RAM_V6_RT_SIZE;
+		size = sizeof(struct ipa_ip_v6_routing_init);
+	}
+	cmd = kmalloc(size, GFP_KERNEL);
+	if (!cmd) {
+		IPAERR("failed to alloc immediate command object\n");
+		goto fail_alloc_cmd;
+	}
+
+	if (ipa_generate_rt_hw_tbl(ip, mem)) {
+		IPAERR("fail to generate RT HW TBL ip %d\n", ip);
+		goto fail_hw_tbl_gen;
+	}
+
+	if (mem->size > avail) {
+		IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
+		goto fail_hw_tbl_gen;
+	}
+
+	if (ip == IPA_IP_v4) {
+		v4 = (struct ipa_ip_v4_routing_init *)cmd;
+		desc.opcode = IPA_IP_V4_ROUTING_INIT;
+		v4->ipv4_rules_addr = mem->phys_base;
+		v4->size_ipv4_rules = mem->size;
+		v4->ipv4_addr = IPA_RAM_V4_RT_OFST;
+	} else {
+		v6 = (struct ipa_ip_v6_routing_init *)cmd;
+		desc.opcode = IPA_IP_V6_ROUTING_INIT;
+		v6->ipv6_rules_addr = mem->phys_base;
+		v6->size_ipv6_rules = mem->size;
+		v6->ipv6_addr = IPA_RAM_V6_RT_OFST;
+	}
+
+	desc.pyld = cmd;
+	desc.len = size;
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		goto fail_send_cmd;
+	}
+
+	__ipa_reap_sys_rt_tbls(ip);
+	dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+	kfree(cmd);
+	kfree(mem);
+
+	return 0;
+
+fail_send_cmd:
+	if (mem->phys_base)
+		dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
+fail_hw_tbl_gen:
+	kfree(cmd);
+fail_alloc_cmd:
+	kfree(mem);
+fail_alloc_mem:
+	return -EPERM;
+}
+
+/**
+ * __ipa_find_rt_tbl() - find the routing table
+ *			which name is given as parameter
+ * @ip:	[in] the ip address family type of the wanted routing table
+ * @name:	[in] the name of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name)
+{
+	struct ipa_rt_tbl *entry;
+	struct ipa_rt_tbl_set *set;
+
+	set = &ipa_ctx->rt_tbl_set[ip];
+	list_for_each_entry(entry, &set->head_rt_tbl_list, link) {
+		if (!strncmp(name, entry->name, IPA_RESOURCE_NAME_MAX))
+			return entry;
+	}
+
+	return NULL;
+}
+
+static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
+		const char *name)
+{
+	struct ipa_rt_tbl *entry;
+	struct ipa_rt_tbl_set *set;
+	struct ipa_tree_node *node;
+	int i;
+
+	node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+	if (!node) {
+		IPAERR("failed to alloc tree node object\n");
+		goto node_alloc_fail;
+	}
+
+	if (ip >= IPA_IP_MAX || name == NULL) {
+		IPAERR("bad parm\n");
+		goto error;
+	}
+
+	set = &ipa_ctx->rt_tbl_set[ip];
+	/* check if this table exists */
+	entry = __ipa_find_rt_tbl(ip, name);
+	if (!entry) {
+		entry = kmem_cache_zalloc(ipa_ctx->rt_tbl_cache, GFP_KERNEL);
+		if (!entry) {
+			IPAERR("failed to alloc RT tbl object\n");
+			goto error;
+		}
+		/* find a routing tbl index */
+		for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
+			if (!test_bit(i, &ipa_ctx->rt_idx_bitmap[ip])) {
+				entry->idx = i;
+				set_bit(i, &ipa_ctx->rt_idx_bitmap[ip]);
+				break;
+			}
+		}
+		if (i == IPA_RT_INDEX_BITMAP_SIZE) {
+			IPAERR("not free RT tbl indices left\n");
+			goto fail_rt_idx_alloc;
+		}
+
+		INIT_LIST_HEAD(&entry->head_rt_rule_list);
+		INIT_LIST_HEAD(&entry->link);
+		strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
+		entry->set = set;
+		entry->cookie = IPA_COOKIE;
+		entry->in_sys = (ip == IPA_IP_v4) ?
+			!ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl;
+		set->tbl_cnt++;
+		list_add(&entry->link, &set->head_rt_tbl_list);
+
+		IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx,
+				set->tbl_cnt, ip);
+
+		node->hdl = (u32)entry;
+		if (ipa_insert(&ipa_ctx->rt_tbl_hdl_tree, node)) {
+			IPAERR("failed to add to tree\n");
+			WARN_ON(1);
+		}
+	}
+
+	return entry;
+
+fail_rt_idx_alloc:
+	entry->cookie = 0;
+	kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
+error:
+	kmem_cache_free(ipa_ctx->tree_node_cache, node);
+node_alloc_fail:
+	return NULL;
+}
+
+static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry)
+{
+	struct ipa_tree_node *node;
+	enum ipa_ip_type ip = IPA_IP_MAX;
+
+	if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+		IPAERR("bad parms\n");
+		return -EINVAL;
+	}
+	node = ipa_search(&ipa_ctx->rt_tbl_hdl_tree, (u32)entry);
+	if (node == NULL) {
+		IPAERR("lookup failed\n");
+		return -EPERM;
+	}
+
+	if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
+		ip = IPA_IP_v4;
+	else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
+		ip = IPA_IP_v6;
+	else
+		WARN_ON(1);
+
+	if (!entry->in_sys) {
+		list_del(&entry->link);
+		clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
+		entry->set->tbl_cnt--;
+		IPADBG("del rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
+				entry->set->tbl_cnt);
+		kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
+	} else {
+		list_move(&entry->link,
+				&ipa_ctx->reap_rt_tbl_set[ip].head_rt_tbl_list);
+		clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
+		entry->set->tbl_cnt--;
+		IPADBG("del sys rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
+				entry->set->tbl_cnt);
+	}
+
+	/* remove the handle from the database */
+	rb_erase(&node->node, &ipa_ctx->rt_tbl_hdl_tree);
+	kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+	return 0;
+}
+
+static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
+		const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
+{
+	struct ipa_rt_tbl *tbl;
+	struct ipa_rt_entry *entry;
+	struct ipa_tree_node *node;
+
+	if (rule->hdr_hdl &&
+	    ((ipa_search(&ipa_ctx->hdr_hdl_tree, rule->hdr_hdl) == NULL) ||
+	     ((struct ipa_hdr_entry *)rule->hdr_hdl)->cookie != IPA_COOKIE)) {
+		IPAERR("rt rule does not point to valid hdr\n");
+		goto error;
+	}
+
+	node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
+	if (!node) {
+		IPAERR("failed to alloc tree node object\n");
+		goto error;
+	}
+
+	tbl = __ipa_add_rt_tbl(ip, name);
+	if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
+		IPAERR("bad params\n");
+		goto fail_rt_tbl_sanity;
+	}
+	/*
+	 * do not allow any rules to be added at end of the "default" routing
+	 * tables
+	 */
+	if (!strncmp(tbl->name, IPA_DFLT_RT_TBL_NAME, IPA_RESOURCE_NAME_MAX) &&
+	    (tbl->rule_cnt > 0) && (at_rear != 0)) {
+		IPAERR("cannot add rule at end of tbl rule_cnt=%d at_rear=%d\n",
+		       tbl->rule_cnt, at_rear);
+		goto fail_rt_tbl_sanity;
+	}
+
+	entry = kmem_cache_zalloc(ipa_ctx->rt_rule_cache, GFP_KERNEL);
+	if (!entry) {
+		IPAERR("failed to alloc RT rule object\n");
+		goto fail_rt_tbl_sanity;
+	}
+	INIT_LIST_HEAD(&entry->link);
+	entry->cookie = IPA_COOKIE;
+	entry->rule = *rule;
+	entry->tbl = tbl;
+	entry->hdr = (struct ipa_hdr_entry *)rule->hdr_hdl;
+	if (at_rear)
+		list_add_tail(&entry->link, &tbl->head_rt_rule_list);
+	else
+		list_add(&entry->link, &tbl->head_rt_rule_list);
+	tbl->rule_cnt++;
+	if (entry->hdr)
+		entry->hdr->ref_cnt++;
+	IPADBG("add rt rule tbl_idx=%d rule_cnt=%d\n", tbl->idx, tbl->rule_cnt);
+	*rule_hdl = (u32)entry;
+
+	node->hdl = *rule_hdl;
+	if (ipa_insert(&ipa_ctx->rt_rule_hdl_tree, node)) {
+		IPAERR("failed to add to tree\n");
+		WARN_ON(1);
+		goto ipa_insert_failed;
+	}
+
+	return 0;
+
+ipa_insert_failed:
+	list_del(&entry->link);
+	kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
+fail_rt_tbl_sanity:
+	kmem_cache_free(ipa_ctx->tree_node_cache, node);
+error:
+	return -EPERM;
+}
+
+/**
+ * ipa_add_rt_rule() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+	int i;
+	int ret;
+
+	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+					&rules->rules[i].rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].rt_rule_hdl)) {
+			IPAERR("failed to add rt rule %d\n", i);
+			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (__ipa_commit_rt(rules->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_rt_rule);
+
+static int __ipa_del_rt_rule(u32 rule_hdl)
+{
+	struct ipa_rt_entry *entry = (struct ipa_rt_entry *)rule_hdl;
+	struct ipa_tree_node *node;
+
+	if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+		IPAERR("bad params\n");
+		return -EINVAL;
+	}
+	node = ipa_search(&ipa_ctx->rt_rule_hdl_tree, rule_hdl);
+	if (node == NULL) {
+		IPAERR("lookup failed\n");
+		return -EPERM;
+	}
+
+	if (entry->hdr)
+		entry->hdr->ref_cnt--;
+	list_del(&entry->link);
+	entry->tbl->rule_cnt--;
+	IPADBG("del rt rule tbl_idx=%d rule_cnt=%d\n", entry->tbl->idx,
+			entry->tbl->rule_cnt);
+	if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
+		if (__ipa_del_rt_tbl(entry->tbl))
+			IPAERR("fail to del RT tbl\n");
+	}
+	entry->cookie = 0;
+	kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
+
+	/* remove the handle from the database */
+	rb_erase(&node->node, &ipa_ctx->rt_rule_hdl_tree);
+	kmem_cache_free(ipa_ctx->tree_node_cache, node);
+
+	return 0;
+}
+
+/**
+ * ipa_del_rt_rule() - Remove the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @hdls:	[inout] set of routing rules to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+	int i;
+	int ret;
+
+	if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa_del_rt_rule(hdls->hdl[i].hdl)) {
+			IPAERR("failed to del rt rule %i\n", i);
+			hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (__ipa_commit_rt(hdls->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_del_rt_rule);
+
+/**
+ * ipa_commit_rt_rule() - Commit the current SW routing table of specified type
+ * to IPA HW
+ * @ip:	The family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_commit_rt(enum ipa_ip_type ip)
+{
+	int ret;
+	/*
+	 * issue a commit on the filtering module of same IP type since
+	 * filtering rules point to routing tables
+	 */
+	if (ipa_commit_flt(ip))
+		return -EPERM;
+
+	mutex_lock(&ipa_ctx->lock);
+	if (__ipa_commit_rt(ip)) {
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_commit_rt);
+
+/**
+ * ipa_reset_rt() - reset the current SW routing table of specified type
+ * (does not commit to HW)
+ * @ip:	The family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_reset_rt(enum ipa_ip_type ip)
+{
+	struct ipa_rt_tbl *tbl;
+	struct ipa_rt_tbl *tbl_next;
+	struct ipa_rt_tbl_set *set;
+	struct ipa_rt_entry *rule;
+	struct ipa_rt_entry *rule_next;
+	struct ipa_tree_node *node;
+	struct ipa_rt_tbl_set *rset;
+
+	/*
+	 * issue a reset on the filtering module of same IP type since
+	 * filtering rules point to routing tables
+	 */
+	if (ipa_reset_flt(ip))
+		IPAERR("fail to reset flt ip=%d\n", ip);
+
+	set = &ipa_ctx->rt_tbl_set[ip];
+	rset = &ipa_ctx->reap_rt_tbl_set[ip];
+	mutex_lock(&ipa_ctx->lock);
+	IPADBG("reset rt ip=%d\n", ip);
+	list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) {
+		list_for_each_entry_safe(rule, rule_next,
+					 &tbl->head_rt_rule_list, link) {
+			node = ipa_search(&ipa_ctx->rt_rule_hdl_tree,
+					  (u32)rule);
+			if (node == NULL)
+				WARN_ON(1);
+
+			/*
+			 * for the "default" routing tbl, remove all but the
+			 *  last rule
+			 */
+			if (tbl->idx == 0 && tbl->rule_cnt == 1)
+				continue;
+
+			list_del(&rule->link);
+			tbl->rule_cnt--;
+			if (rule->hdr)
+				rule->hdr->ref_cnt--;
+			rule->cookie = 0;
+			kmem_cache_free(ipa_ctx->rt_rule_cache, rule);
+
+			/* remove the handle from the database */
+			rb_erase(&node->node, &ipa_ctx->rt_rule_hdl_tree);
+			kmem_cache_free(ipa_ctx->tree_node_cache, node);
+		}
+
+		node = ipa_search(&ipa_ctx->rt_tbl_hdl_tree, (u32)tbl);
+		if (node  == NULL)
+			WARN_ON(1);
+
+		/* do not remove the "default" routing tbl which has index 0 */
+		if (tbl->idx != 0) {
+			if (!tbl->in_sys) {
+				list_del(&tbl->link);
+				set->tbl_cnt--;
+				clear_bit(tbl->idx,
+					  &ipa_ctx->rt_idx_bitmap[ip]);
+				IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
+						tbl->idx, set->tbl_cnt);
+				kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
+			} else {
+				list_move(&tbl->link, &rset->head_rt_tbl_list);
+				clear_bit(tbl->idx,
+					  &ipa_ctx->rt_idx_bitmap[ip]);
+				set->tbl_cnt--;
+				IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n",
+						tbl->idx, set->tbl_cnt);
+			}
+			/* remove the handle from the database */
+			rb_erase(&node->node, &ipa_ctx->rt_tbl_hdl_tree);
+			kmem_cache_free(ipa_ctx->tree_node_cache, node);
+		}
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_reset_rt);
+
+/**
+ * ipa_get_rt_tbl() - lookup the specified routing table and return handle if it
+ * exists, if lookup succeeds the routing table ref cnt is increased
+ * @lookup:	[inout] routing table to lookup and its handle
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ *	Caller should call ipa_put_rt_tbl later if this function succeeds
+ */
+int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+	struct ipa_rt_tbl *entry;
+	int result = -EFAULT;
+
+	if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa_ctx->lock);
+	entry = __ipa_add_rt_tbl(lookup->ip, lookup->name);
+	if (entry && entry->cookie == IPA_COOKIE) {
+		entry->ref_cnt++;
+		lookup->hdl = (uint32_t)entry;
+
+		/* commit for get */
+		if (__ipa_commit_rt(lookup->ip))
+			IPAERR("fail to commit RT tbl\n");
+
+		result = 0;
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_get_rt_tbl);
+
+/**
+ * ipa_put_rt_tbl() - Release the specified routing table handle
+ * @rt_tbl_hdl:	[in] the routing table handle to release
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_put_rt_tbl(u32 rt_tbl_hdl)
+{
+	struct ipa_rt_tbl *entry = (struct ipa_rt_tbl *)rt_tbl_hdl;
+	struct ipa_tree_node *node;
+	enum ipa_ip_type ip = IPA_IP_MAX;
+
+	if (entry == NULL || (entry->cookie != IPA_COOKIE) ||
+			entry->ref_cnt == 0) {
+		IPAERR("bad parms\n");
+		return -EINVAL;
+	}
+	node = ipa_search(&ipa_ctx->rt_tbl_hdl_tree, rt_tbl_hdl);
+	if (node == NULL) {
+		IPAERR("lookup failed\n");
+		return -EPERM;
+	}
+
+	if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
+		ip = IPA_IP_v4;
+	else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
+		ip = IPA_IP_v6;
+	else
+		WARN_ON(1);
+
+	mutex_lock(&ipa_ctx->lock);
+	entry->ref_cnt--;
+	if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
+		if (__ipa_del_rt_tbl(entry))
+			IPAERR("fail to del RT tbl\n");
+		/* commit for put */
+		if (__ipa_commit_rt(ip))
+			IPAERR("fail to commit RT tbl\n");
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_put_rt_tbl);
diff --git a/drivers/platform/msm/ipa/ipa_utils.c b/drivers/platform/msm/ipa/ipa_utils.c
new file mode 100644
index 0000000..d5d5566
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_utils.c
@@ -0,0 +1,1353 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <net/ip.h>
+#include <linux/genalloc.h>	/* gen_pool_alloc() */
+#include <linux/io.h>
+#include "ipa_i.h"
+
+static const int ipa_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
+					IPA_OFFSET_MEQ32_1, -1 };
+static const int ipa_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
+					IPA_OFFSET_MEQ128_1, -1 };
+static const int ipa_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0,
+					IPA_IHL_OFFSET_RANGE16_1, -1 };
+static const int ipa_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
+					IPA_IHL_OFFSET_MEQ32_1, -1 };
+
+static const int ep_mapping[IPA_MODE_MAX][IPA_CLIENT_MAX] = {
+	{ -1, -1, -1, -1, -1, 11, -1, 8, 6, 2, 1, 5, -1, -1, -1, -1, -1, 10, 9, 7, 3, 4 },
+	{ -1, -1, -1, -1, -1, 11, -1, 8, 6, 2, 1, 5, -1, -1, -1, -1, -1, 10, 9, 7, 3, 4 },
+	{ 11, 13, 15, 17, 19, -1, -1, 8, 6, 2, 1, 5, 10, 12, 14, 16, 18, -1, 9, 7, 3, 4 },
+	{ 19, -1, -1, -1, -1, 11, 15, 8, 6, 2, 1, 5, 14, 16, 17, 18, -1, 10, 9, 7, 3, 4 },
+	{ 19, -1, -1, -1, -1, 11, 15, 8, 6, 2, 1, 5, 14, 16, 17, 18, -1, 10, 9, 7, 3, 4 },
+	{ 19, -1, -1, -1, -1, 11, 15, 8, 6, 2, 1, 5, 14, 16, 17, 18, -1, 10, 9, 7, 3, 4 },
+};
+
+/**
+ * ipa_cfg_route() - configure IPA route
+ * @route: IPA route
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_cfg_route(struct ipa_route *route)
+{
+	ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST,
+		     IPA_SETFIELD(route->route_dis,
+				  IPA_ROUTE_ROUTE_DIS_SHFT,
+				  IPA_ROUTE_ROUTE_DIS_BMSK) |
+			IPA_SETFIELD(route->route_def_pipe,
+				     IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
+				     IPA_ROUTE_ROUTE_DEF_PIPE_BMSK) |
+			IPA_SETFIELD(route->route_def_hdr_table,
+				     IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
+				     IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK) |
+			IPA_SETFIELD(route->route_def_hdr_ofst,
+				     IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
+				     IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK));
+
+	return 0;
+}
+/**
+ * ipa_cfg_filter() - configure filter
+ * @disable: disable value
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_cfg_filter(u32 disable)
+{
+	ipa_write_reg(ipa_ctx->mmio, IPA_FILTER_OFST,
+		     IPA_SETFIELD(!disable,
+				  IPA_FILTER_FILTER_EN_SHFT,
+				  IPA_FILTER_FILTER_EN_BMSK));
+	return 0;
+}
+
+/**
+ * ipa_init_hw() - initialize HW
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_init_hw(void)
+{
+	u32 ipa_version = 0;
+
+	/* do soft reset of IPA */
+	ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 1);
+	ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 0);
+
+	/* enable IPA */
+	ipa_write_reg(ipa_ctx->mmio, IPA_COMP_CFG_OFST, 1);
+
+	/* Read IPA version and make sure we have access to the registers */
+	ipa_version = ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST);
+	if (ipa_version == 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+/**
+ * ipa_get_ep_mapping() - provide endpoint mapping
+ * @mode: IPA operating mode
+ * @client: client type
+ *
+ * Return value: endpoint mapping
+ */
+int ipa_get_ep_mapping(enum ipa_operating_mode mode,
+		enum ipa_client_type client)
+{
+	return ep_mapping[mode][client];
+}
+
+/**
+ * ipa_write_32() - convert 32 bit value to byte array
+ * @w: 32 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_32(u32 w, u8 *dest)
+{
+	*dest++ = (u8)((w) & 0xFF);
+	*dest++ = (u8)((w >> 8) & 0xFF);
+	*dest++ = (u8)((w >> 16) & 0xFF);
+	*dest++ = (u8)((w >> 24) & 0xFF);
+
+	return dest;
+}
+
+/**
+ * ipa_write_16() - convert 16 bit value to byte array
+ * @hw: 16 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_16(u16 hw, u8 *dest)
+{
+	*dest++ = (u8)((hw) & 0xFF);
+	*dest++ = (u8)((hw >> 8) & 0xFF);
+
+	return dest;
+}
+
+/**
+ * ipa_write_8() - convert 8 bit value to byte array
+ * @hw: 8 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_8(u8 b, u8 *dest)
+{
+	*dest++ = (b) & 0xFF;
+
+	return dest;
+}
+
+/**
+ * ipa_pad_to_32() - pad byte array to 32 bit value
+ * @dest: byte array
+ *
+ * Return value: padded value
+ */
+u8 *ipa_pad_to_32(u8 *dest)
+{
+	int i = (u32)dest & 0x3;
+	int j;
+
+	if (i)
+		for (j = 0; j < (4 - i); j++)
+			*dest++ = 0;
+
+	return dest;
+}
+
+/**
+ * ipa_generate_hw_rule() - generate HW rule
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ * @buf: output buffer
+ * @en_rule: rule
+ *
+ * Return codes:
+ * 0: success
+ * -EPERM: wrong input
+ */
+int ipa_generate_hw_rule(enum ipa_ip_type ip,
+		const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule)
+{
+	u8 ofst_meq32 = 0;
+	u8 ihl_ofst_rng16 = 0;
+	u8 ihl_ofst_meq32 = 0;
+	u8 ofst_meq128 = 0;
+
+	if (ip == IPA_IP_v4) {
+
+		/* error check */
+		if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
+		    attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
+		    IPA_FLT_FLOW_LABEL) {
+			IPAERR("v6 attrib's specified for v4 rule\n");
+			return -EPERM;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TOS) {
+			*en_rule |= IPA_TOS_EQ;
+			*buf = ipa_write_8(attrib->u.v4.tos, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+			*en_rule |= IPA_PROTOCOL_EQ;
+			*buf = ipa_write_8(attrib->u.v4.protocol, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+			if (ipa_ofst_meq32[ofst_meq32] == -1) {
+				IPAERR("ran out of meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq32[ofst_meq32];
+			/* 12 => offset of src ip in v4 header */
+			*buf = ipa_write_8(12, *buf);
+			*buf = ipa_write_32(attrib->u.v4.src_addr_mask, *buf);
+			*buf = ipa_write_32(attrib->u.v4.src_addr, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+			if (ipa_ofst_meq32[ofst_meq32] == -1) {
+				IPAERR("ran out of meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq32[ofst_meq32];
+			/* 16 => offset of dst ip in v4 header */
+			*buf = ipa_write_8(16, *buf);
+			*buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf);
+			*buf = ipa_write_32(attrib->u.v4.dst_addr, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			if (attrib->src_port_hi < attrib->src_port_lo) {
+				IPAERR("bad src port range param\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 0  => offset of src port after v4 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_16(attrib->src_port_hi, *buf);
+			*buf = ipa_write_16(attrib->src_port_lo, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			if (attrib->dst_port_hi < attrib->dst_port_lo) {
+				IPAERR("bad dst port range param\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 2  => offset of dst port after v4 header */
+			*buf = ipa_write_8(2, *buf);
+			*buf = ipa_write_16(attrib->dst_port_hi, *buf);
+			*buf = ipa_write_16(attrib->dst_port_lo, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TYPE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 0  => offset of type after v4 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_32(0xFF, *buf);
+			*buf = ipa_write_32(attrib->type, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_CODE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 1  => offset of code after v4 header */
+			*buf = ipa_write_8(1, *buf);
+			*buf = ipa_write_32(0xFF, *buf);
+			*buf = ipa_write_32(attrib->code, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SPI) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 0  => offset of SPI after v4 header FIXME */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_32(0xFFFFFFFF, *buf);
+			*buf = ipa_write_32(attrib->spi, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 0  => offset of src port after v4 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_16(attrib->src_port, *buf);
+			*buf = ipa_write_16(attrib->src_port, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 2  => offset of dst port after v4 header */
+			*buf = ipa_write_8(2, *buf);
+			*buf = ipa_write_16(attrib->dst_port, *buf);
+			*buf = ipa_write_16(attrib->dst_port, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+			*en_rule |= IPA_METADATA_COMPARE;
+			*buf = ipa_write_8(0, *buf);    /* offset, reserved */
+			*buf = ipa_write_32(attrib->meta_data_mask, *buf);
+			*buf = ipa_write_32(attrib->meta_data, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+			*en_rule |= IPA_IPV4_IS_FRAG;
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+	} else if (ip == IPA_IP_v6) {
+
+		/* v6 code below assumes no extension headers TODO: fix this */
+
+		/* error check */
+		if (attrib->attrib_mask & IPA_FLT_TOS ||
+		    attrib->attrib_mask & IPA_FLT_PROTOCOL ||
+		    attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+			IPAERR("v4 attrib's specified for v6 rule\n");
+			return -EPERM;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+			*en_rule |= IPA_PROTOCOL_EQ;
+			*buf = ipa_write_8(attrib->u.v6.next_hdr, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TYPE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 0  => offset of type after v6 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_32(0xFF, *buf);
+			*buf = ipa_write_32(attrib->type, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_CODE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 1  => offset of code after v6 header */
+			*buf = ipa_write_8(1, *buf);
+			*buf = ipa_write_32(0xFF, *buf);
+			*buf = ipa_write_32(attrib->code, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SPI) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 0  => offset of SPI after v6 header FIXME */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_32(0xFFFFFFFF, *buf);
+			*buf = ipa_write_32(attrib->spi, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 0  => offset of src port after v6 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_16(attrib->src_port, *buf);
+			*buf = ipa_write_16(attrib->src_port, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 2  => offset of dst port after v6 header */
+			*buf = ipa_write_8(2, *buf);
+			*buf = ipa_write_16(attrib->dst_port, *buf);
+			*buf = ipa_write_16(attrib->dst_port, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			if (attrib->src_port_hi < attrib->src_port_lo) {
+				IPAERR("bad src port range param\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 0  => offset of src port after v6 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_16(attrib->src_port_hi, *buf);
+			*buf = ipa_write_16(attrib->src_port_lo, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			if (attrib->dst_port_hi < attrib->dst_port_lo) {
+				IPAERR("bad dst port range param\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 2  => offset of dst port after v6 header */
+			*buf = ipa_write_8(2, *buf);
+			*buf = ipa_write_16(attrib->dst_port_hi, *buf);
+			*buf = ipa_write_16(attrib->dst_port_lo, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+			/* 8 => offset of src ip in v6 header */
+			*buf = ipa_write_8(8, *buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr_mask[0],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr_mask[1],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr_mask[2],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr_mask[3],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr[0], *buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr[1], *buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr[2], *buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr[3], *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+			/* 24 => offset of dst ip in v6 header */
+			*buf = ipa_write_8(24, *buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr_mask[0],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr_mask[1],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr_mask[2],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr_mask[3],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr[0], *buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr[1], *buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr[2], *buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr[3], *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TC) {
+			*en_rule |= IPA_FLT_TC;
+			*buf = ipa_write_8(attrib->u.v6.tc, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+			*en_rule |= IPA_FLT_FLOW_LABEL;
+			 /* FIXME FL is only 20 bits */
+			*buf = ipa_write_32(attrib->u.v6.flow_label, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+			*en_rule |= IPA_METADATA_COMPARE;
+			*buf = ipa_write_8(0, *buf);    /* offset, reserved */
+			*buf = ipa_write_32(attrib->meta_data_mask, *buf);
+			*buf = ipa_write_32(attrib->meta_data, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+	} else {
+		IPAERR("unsupported ip %d\n", ip);
+		return -EPERM;
+	}
+
+	/*
+	 * default "rule" means no attributes set -> map to
+	 * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+	 */
+	if (attrib->attrib_mask == 0) {
+		if (ipa_ofst_meq32[ofst_meq32] == -1) {
+			IPAERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= ipa_ofst_meq32[ofst_meq32];
+		*buf = ipa_write_8(0, *buf);    /* offset */
+		*buf = ipa_write_32(0, *buf);   /* mask */
+		*buf = ipa_write_32(0, *buf);   /* val */
+		*buf = ipa_pad_to_32(*buf);
+		ofst_meq32++;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa_cfg_ep - IPA end-point configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * This includes nat, header, mode, aggregation and route settings and is a one
+ * shot API to configure the IPA end-point fully
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+	int result = -EINVAL;
+
+	if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+			ipa_ep_cfg == NULL) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	result = ipa_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr);
+	if (result)
+		return result;
+
+	result = ipa_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr);
+	if (result)
+		return result;
+
+	if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) {
+		result = ipa_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat);
+		if (result)
+			return result;
+
+		result = ipa_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode);
+		if (result)
+			return result;
+
+		result = ipa_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route);
+		if (result)
+			return result;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep);
+
+/**
+ * ipa_cfg_ep_nat() - IPA end-point NAT configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg)
+{
+	if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+			ipa_ep_cfg == NULL) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+		IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+	/* copy over EP cfg */
+	ipa_ctx->ep[clnt_hdl].cfg.nat = *ipa_ep_cfg;
+	/* clnt_hdl is used as pipe_index */
+	ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_NAT_n_OFST(clnt_hdl),
+		      IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.nat.nat_en,
+				   IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT,
+				   IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK));
+	return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_nat);
+
+/**
+ * ipa_cfg_ep_hdr() -  IPA end-point header configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg)
+{
+	u32 val;
+	struct ipa_ep_context *ep;
+
+	if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+			ipa_ep_cfg == NULL) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+
+	/* copy over EP cfg */
+	ep->cfg.hdr = *ipa_ep_cfg;
+
+	val = IPA_SETFIELD(ep->cfg.hdr.hdr_len,
+		   IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT,
+		   IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK) |
+	      IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_metadata_valid,
+		   IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT,
+		   IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK) |
+	      IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_metadata,
+		   IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT,
+		   IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK) |
+	      IPA_SETFIELD(ep->cfg.hdr.hdr_additional_const_len,
+		   IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT,
+		   IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK) |
+	      IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_pkt_size_valid,
+		   IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT,
+		   IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK) |
+	      IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_pkt_size,
+		   IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT,
+		   IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK) |
+	      IPA_SETFIELD(ep->cfg.hdr.hdr_a5_mux,
+		   IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT,
+		   IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_HDR_n_OFST(clnt_hdl), val);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_hdr);
+
+/**
+ * ipa_cfg_ep_mode() - IPA end-point mode configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg)
+{
+	u32 val;
+
+	if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+			ipa_ep_cfg == NULL) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+		IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	/* copy over EP cfg */
+	ipa_ctx->ep[clnt_hdl].cfg.mode = *ipa_ep_cfg;
+	ipa_ctx->ep[clnt_hdl].dst_pipe_index = ipa_get_ep_mapping(ipa_ctx->mode,
+			ipa_ep_cfg->dst);
+
+	val = IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.mode.mode,
+			   IPA_ENDP_INIT_MODE_n_MODE_SHFT,
+			   IPA_ENDP_INIT_MODE_n_MODE_BMSK) |
+	      IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].dst_pipe_index,
+			   IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT,
+			   IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_MODE_n_OFST(clnt_hdl), val);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_mode);
+
+/**
+ * ipa_cfg_ep_aggr() - IPA end-point aggregation configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg)
+{
+	u32 val;
+
+	if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+			ipa_ep_cfg == NULL) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+	/* copy over EP cfg */
+	ipa_ctx->ep[clnt_hdl].cfg.aggr = *ipa_ep_cfg;
+
+	val = IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr_en,
+			   IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT,
+			   IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) |
+	      IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr,
+			   IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT,
+			   IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK) |
+	      IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr_byte_limit,
+			   IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT,
+			   IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK) |
+	      IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr_time_limit,
+			   IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT,
+			   IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_AGGR_n_OFST(clnt_hdl), val);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_aggr);
+
+/**
+ * ipa_cfg_ep_route() - IPA end-point routing configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg)
+{
+	if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
+			ipa_ep_cfg == NULL) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+		IPAERR("ROUTE does not apply to IPA out EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	/*
+	 * if DMA mode was configured previously for this EP, return with
+	 * success
+	 */
+	if (ipa_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) {
+		IPADBG("DMA mode for EP %d\n", clnt_hdl);
+		return 0;
+	}
+
+	if (ipa_ep_cfg->rt_tbl_hdl)
+		IPAERR("client specified non-zero RT TBL hdl - ignore it\n");
+
+	/* always use the "default" routing tables whose indices are 0 */
+	ipa_ctx->ep[clnt_hdl].rt_tbl_idx = 0;
+
+	ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_ROUTE_n_OFST(clnt_hdl),
+		      IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].rt_tbl_idx,
+			   IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT,
+			   IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK));
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_route);
+
+/**
+ * ipa_dump_buff_internal() - dumps buffer for debug purposes
+ * @base: buffer base address
+ * @phy_base: buffer physical base address
+ * @size: size of the buffer
+ */
+void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
+{
+	int i;
+	u32 *cur = (u32 *)base;
+	u8 *byt;
+	IPADBG("START phys=%x\n", phy_base);
+	for (i = 0; i < size / 4; i++) {
+		byt = (u8 *)(cur + i);
+		IPADBG("%2d %08x   %02x %02x %02x %02x\n", i, *(cur + i),
+				byt[0], byt[1], byt[2], byt[3]);
+	}
+	IPADBG("END\n");
+}
+
+/**
+ * ipa_dump() - dumps part of driver data structures for debug purposes
+ */
+void ipa_dump(void)
+{
+	struct ipa_mem_buffer hdr_mem = { 0 };
+	struct ipa_mem_buffer rt_mem = { 0 };
+	struct ipa_mem_buffer flt_mem = { 0 };
+
+	mutex_lock(&ipa_ctx->lock);
+
+	if (ipa_generate_hdr_hw_tbl(&hdr_mem))
+		IPAERR("fail\n");
+	if (ipa_generate_rt_hw_tbl(IPA_IP_v4, &rt_mem))
+		IPAERR("fail\n");
+	if (ipa_generate_flt_hw_tbl(IPA_IP_v4, &flt_mem))
+		IPAERR("fail\n");
+	IPAERR("PHY hdr=%x rt=%x flt=%x\n", hdr_mem.phys_base, rt_mem.phys_base,
+			flt_mem.phys_base);
+	IPAERR("VIRT hdr=%x rt=%x flt=%x\n", (u32)hdr_mem.base,
+			(u32)rt_mem.base, (u32)flt_mem.base);
+	IPAERR("SIZE hdr=%d rt=%d flt=%d\n", hdr_mem.size, rt_mem.size,
+			flt_mem.size);
+	IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
+	IPA_DUMP_BUFF(rt_mem.base, rt_mem.phys_base, rt_mem.size);
+	IPA_DUMP_BUFF(flt_mem.base, flt_mem.phys_base, flt_mem.size);
+	if (hdr_mem.phys_base)
+		dma_free_coherent(NULL, hdr_mem.size, hdr_mem.base,
+				hdr_mem.phys_base);
+	if (rt_mem.phys_base)
+		dma_free_coherent(NULL, rt_mem.size, rt_mem.base,
+				rt_mem.phys_base);
+	if (flt_mem.phys_base)
+		dma_free_coherent(NULL, flt_mem.size, flt_mem.base,
+				flt_mem.phys_base);
+	mutex_unlock(&ipa_ctx->lock);
+}
+
+/*
+ * TODO: add swap if needed, for now assume LE is ok for device memory
+ * even though IPA registers are assumed to be BE
+ */
+/**
+ * ipa_write_dev_8() - writes 8 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ */
+void ipa_write_dev_8(u8 val, u16 ofst_ipa_sram)
+{
+	iowrite8(val, (u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_write_dev_16() - writes 16 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ *
+ */
+void ipa_write_dev_16(u16 val, u16 ofst_ipa_sram)
+{
+	iowrite16(val, (u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_write_dev_32() - writes 32 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ */
+void ipa_write_dev_32(u32 val, u16 ofst_ipa_sram)
+{
+	iowrite32(val, (u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_read_dev_8() - reads 8 bit value
+ * @ofst_ipa_sram: address to read from
+ *
+ * Return value: value read
+ */
+unsigned int ipa_read_dev_8(u16 ofst_ipa_sram)
+{
+	return ioread8((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_read_dev_16() - reads 16 bit value
+ * @ofst_ipa_sram: address to read from
+ *
+ * Return value: value read
+ */
+unsigned int ipa_read_dev_16(u16 ofst_ipa_sram)
+{
+	return ioread16((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_read_dev_32() - reads 32 bit value
+ * @ofst_ipa_sram: address to read from
+ *
+ * Return value: value read
+ */
+unsigned int ipa_read_dev_32(u16 ofst_ipa_sram)
+{
+	return ioread32((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram);
+}
+
+/**
+ * ipa_write_dev_8rep() - writes 8 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ * @count: num of bytes to write
+ */
+void ipa_write_dev_8rep(u16 ofst_ipa_sram, const void *buf, unsigned long count)
+{
+	iowrite8_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+			count);
+}
+
+/**
+ * ipa_write_dev_16rep() - writes 16 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ * @count: num of bytes to write
+ */
+void ipa_write_dev_16rep(u16 ofst_ipa_sram, const void *buf,
+		unsigned long count)
+{
+	iowrite16_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram),
+			buf, count);
+}
+
+/**
+ * ipa_write_dev_32rep() - writes 32 bit value
+ * @val: value
+ * @ofst_ipa_sram: address to write to
+ * @count: num of bytes to write
+ */
+void ipa_write_dev_32rep(u16 ofst_ipa_sram, const void *buf,
+		unsigned long count)
+{
+	iowrite32_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram),
+			buf, count);
+}
+
+/**
+ * ipa_read_dev_8rep() - reads 8 bit value
+ * @ofst_ipa_sram: address to read from
+ * @buf: buffer to read to
+ * @count: number of bytes to read
+ */
+void ipa_read_dev_8rep(u16 ofst_ipa_sram, void *buf, unsigned long count)
+{
+	ioread8_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+			count);
+}
+
+/**
+ * ipa_read_dev_16rep() - reads 16 bit value
+ * @ofst_ipa_sram: address to read from
+ * @buf: buffer to read to
+ * @count: number of bytes to read
+ */
+void ipa_read_dev_16rep(u16 ofst_ipa_sram, void *buf, unsigned long count)
+{
+	ioread16_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+			count);
+}
+
+/**
+ * ipa_read_dev_32rep() - reads 32 bit value
+ * @ofst_ipa_sram: address to read from
+ * @buf: buffer to read to
+ * @count: number of bytes to read
+ */
+void ipa_read_dev_32rep(u16 ofst_ipa_sram, void *buf, unsigned long count)
+{
+	ioread32_rep((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), buf,
+			count);
+}
+
+/**
+ * ipa_memset_dev() - memset IO
+ * @ofst_ipa_sram: address to set
+ * @value: value
+ * @count: number of bytes to set
+ */
+void ipa_memset_dev(u16 ofst_ipa_sram, u8 value, unsigned int count)
+{
+	memset_io((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram), value,
+			count);
+}
+
+/**
+ * ipa_memcpy_from_dev() - copy memory from device
+ * @dest: buffer to copy to
+ * @ofst_ipa_sram: address
+ * @count: number of bytes to copy
+ */
+void ipa_memcpy_from_dev(void *dest, u16 ofst_ipa_sram, unsigned int count)
+{
+	memcpy_fromio(dest, (void *)((u32)ipa_ctx->mmio + 0x4000 +
+				ofst_ipa_sram), count);
+}
+
+/**
+ * ipa_memcpy_to_dev() - copy memory to device
+ * @ofst_ipa_sram: address
+ * @source: buffer to copy from
+ * @count: number of bytes to copy
+ */
+void ipa_memcpy_to_dev(u16 ofst_ipa_sram, void *source, unsigned int count)
+{
+	memcpy_toio((void *)((u32)ipa_ctx->mmio + 0x4000 + ofst_ipa_sram),
+			source, count);
+}
+
+/**
+ * ipa_defrag() - handle de-frag for bridging type of cases
+ * @skb: skb
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_defrag(struct sk_buff *skb)
+{
+	/*
+	 * Reassemble IP fragments. TODO: need to setup network_header to
+	 * point to start of IP header
+	 */
+	if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
+		if (ip_defrag(skb, IP_DEFRAG_CONNTRACK_IN))
+			return -EINPROGRESS;
+	}
+
+	/* skb is not fully assembled, send it back out */
+	return 0;
+}
+
+/**
+ * ipa_search() - search for handle in RB tree
+ * @root: tree root
+ * @hdl: handle
+ *
+ * Return value: tree node corresponding to the handle
+ */
+struct ipa_tree_node *ipa_search(struct rb_root *root, u32 hdl)
+{
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct ipa_tree_node *data = container_of(node,
+				struct ipa_tree_node, node);
+
+		if (hdl < data->hdl)
+			node = node->rb_left;
+		else if (hdl > data->hdl)
+			node = node->rb_right;
+		else
+			return data;
+	}
+	return NULL;
+}
+
+/**
+ * ipa_insert() - insert new node to RB tree
+ * @root: tree root
+ * @data: new data to insert
+ *
+ * Return value:
+ * 0: success
+ * -EPERM: tree already contains the node with provided handle
+ */
+int ipa_insert(struct rb_root *root, struct ipa_tree_node *data)
+{
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+	/* Figure out where to put new node */
+	while (*new) {
+		struct ipa_tree_node *this = container_of(*new,
+				struct ipa_tree_node, node);
+
+		parent = *new;
+		if (data->hdl < this->hdl)
+			new = &((*new)->rb_left);
+		else if (data->hdl > this->hdl)
+			new = &((*new)->rb_right);
+		else
+			return -EPERM;
+	}
+
+	/* Add new node and rebalance tree. */
+	rb_link_node(&data->node, parent, new);
+	rb_insert_color(&data->node, root);
+
+	return 0;
+}
+
+/**
+ * ipa_pipe_mem_init() - initialize the pipe memory
+ * @start_ofst: start offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ * -ENOMEM: no memory
+ */
+int ipa_pipe_mem_init(u32 start_ofst, u32 size)
+{
+	int res;
+	u32 aligned_start_ofst;
+	u32 aligned_size;
+	struct gen_pool *pool;
+
+	if (!size) {
+		IPAERR("no IPA pipe mem alloted\n");
+		goto fail;
+	}
+
+	aligned_start_ofst = IPA_HW_TABLE_ALIGNMENT(start_ofst);
+	aligned_size = size - (aligned_start_ofst - start_ofst);
+
+	IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n",
+	       start_ofst, aligned_start_ofst, size, aligned_size);
+
+	/* allocation order of 8 i.e. 128 bytes, global pool */
+	pool = gen_pool_create(8, -1);
+	if (!pool) {
+		IPAERR("Failed to create a new memory pool.\n");
+		goto fail;
+	}
+
+	res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1);
+	if (res) {
+		IPAERR("Failed to add memory to IPA pipe pool\n");
+		goto err_pool_add;
+	}
+
+	ipa_ctx->pipe_mem_pool = pool;
+	return 0;
+
+err_pool_add:
+	gen_pool_destroy(pool);
+fail:
+	return -ENOMEM;
+}
+
+/**
+ * ipa_pipe_mem_alloc() - allocate pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_pipe_mem_alloc(u32 *ofst, u32 size)
+{
+	u32 vaddr;
+	int res = -1;
+
+	if (!ipa_ctx->pipe_mem_pool || !size) {
+		IPAERR("failed size=%u pipe_mem_pool=%p\n", size,
+				ipa_ctx->pipe_mem_pool);
+		return res;
+	}
+
+	vaddr = gen_pool_alloc(ipa_ctx->pipe_mem_pool, size);
+
+	if (vaddr) {
+		*ofst = vaddr;
+		res = 0;
+		IPADBG("size=%u ofst=%u\n", size, vaddr);
+	} else {
+		IPAERR("size=%u failed\n", size);
+	}
+
+	return res;
+}
+
+/**
+ * ipa_pipe_mem_free() - free pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_pipe_mem_free(u32 ofst, u32 size)
+{
+	IPADBG("size=%u ofst=%u\n", size, ofst);
+	if (ipa_ctx->pipe_mem_pool && size)
+		gen_pool_free(ipa_ctx->pipe_mem_pool, ofst, size);
+	return 0;
+}
+
+/**
+ * ipa_set_aggr_mode() - Set the aggregation mode which is a global setting
+ * @mode:	[in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
+ * etc
+ *
+ * Returns:	0 on success
+ */
+int ipa_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+	u32 reg_val;
+	reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST);
+	ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST,
+			((mode & IPA_AGGREGATION_MODE_MSK) <<
+				IPA_AGGREGATION_MODE_SHFT) |
+			(reg_val & IPA_AGGREGATION_MODE_BMSK));
+	return 0;
+}
+EXPORT_SYMBOL(ipa_set_aggr_mode);
+
+/**
+ * ipa_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
+ * mode
+ * @sig:	[in] the first 3 bytes of QCNCM NDP signature (expected to be
+ * "QND")
+ *
+ * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
+ * (expected to be 'P') needs to be set using the header addition mechanism
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_set_qcncm_ndp_sig(char sig[3])
+{
+	u32 reg_val;
+
+	if (sig == NULL) {
+		IPAERR("bad argument for ipa_set_qcncm_ndp_sig/n");
+		return -EINVAL;
+	}
+	reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST);
+	ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_2_OFST, sig[0] <<
+			IPA_AGGREGATION_QCNCM_SIG0_SHFT |
+			(sig[1] << IPA_AGGREGATION_QCNCM_SIG1_SHFT) |
+			sig[2] | (reg_val & IPA_AGGREGATION_QCNCM_SIG_BMSK));
+	return 0;
+}
+EXPORT_SYMBOL(ipa_set_qcncm_ndp_sig);
+
+/**
+ * ipa_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
+ * configuration
+ * @enable:	[in] true for single NDP/MBIM; false otherwise
+ *
+ * Returns:	0 on success
+ */
+int ipa_set_single_ndp_per_mbim(bool enable)
+{
+	u32 reg_val;
+	reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST);
+	ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST, (enable &
+			IPA_AGGREGATION_SINGLE_NDP_MSK) |
+			(reg_val & IPA_AGGREGATION_SINGLE_NDP_BMSK));
+	return 0;
+}
+EXPORT_SYMBOL(ipa_set_single_ndp_per_mbim);
+
+/**
+ * ipa_straddle_boundary() - Checks whether a memory buffer straddles a boundary
+ * @start: start address of the memory buffer
+ * @end: end address of the memory buffer
+ * @boundary: boundary
+ *
+ * Return value:
+ * 1: if the interval [start, end] straddles boundary
+ * 0: otherwise
+ */
+int ipa_straddle_boundary(u32 start, u32 end, u32 boundary)
+{
+	u32 next_start;
+	u32 prev_end;
+
+	IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary);
+
+	next_start = (start + (boundary - 1)) & ~(boundary - 1);
+	prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary;
+
+	while (next_start < prev_end)
+		next_start += boundary;
+
+	if (next_start == prev_end)
+		return 1;
+	else
+		return 0;
+}
+
diff --git a/drivers/platform/msm/ipa/rmnet_bridge.c b/drivers/platform/msm/ipa/rmnet_bridge.c
new file mode 100644
index 0000000..3c7f5ca
--- /dev/null
+++ b/drivers/platform/msm/ipa/rmnet_bridge.c
@@ -0,0 +1,122 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <mach/bam_dmux.h>
+#include <mach/ipa.h>
+#include <mach/sps.h>
+#include "a2_service.h"
+#include "ipa_i.h"
+
+static struct rmnet_bridge_cb_type {
+	u32 producer_handle;
+	u32 consumer_handle;
+	bool is_connected;
+} rmnet_bridge_cb;
+
+/**
+* rmnet_bridge_init() - Initialize RmNet bridge module
+*
+* Return codes:
+* 0: success
+*/
+int rmnet_bridge_init(void)
+{
+	memset(&rmnet_bridge_cb, 0, sizeof(struct rmnet_bridge_cb_type));
+
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_bridge_init);
+
+/**
+* rmnet_bridge_disconnect() - Disconnect RmNet bridge module
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int rmnet_bridge_disconnect(void)
+{
+	int ret = 0;
+	if (false == rmnet_bridge_cb.is_connected) {
+		pr_err("%s: trying to disconnect already disconnected RmNet bridge\n",
+		       __func__);
+		goto bail;
+	}
+
+	rmnet_bridge_cb.is_connected = false;
+
+	ret = ipa_bridge_teardown(IPA_DL);
+	ret = ipa_bridge_teardown(IPA_UL);
+bail:
+	return ret;
+}
+EXPORT_SYMBOL(rmnet_bridge_disconnect);
+
+/**
+* rmnet_bridge_connect() - Connect RmNet bridge module
+* @producer_hdl:	IPA producer handle
+* @consumer_hdl:	IPA consumer handle
+* @wwan_logical_channel_id:	WWAN logical channel ID
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int rmnet_bridge_connect(u32 producer_hdl,
+			 u32 consumer_hdl,
+			 int wwan_logical_channel_id)
+{
+	int ret = 0;
+
+	if (true == rmnet_bridge_cb.is_connected) {
+		ret = 0;
+		pr_err("%s: trying to connect already connected RmNet bridge\n",
+		       __func__);
+		goto bail;
+	}
+
+	rmnet_bridge_cb.consumer_handle = consumer_hdl;
+	rmnet_bridge_cb.producer_handle = producer_hdl;
+	rmnet_bridge_cb.is_connected = true;
+
+	ret = ipa_bridge_setup(IPA_DL);
+	if (ret) {
+		pr_err("%s: IPA DL bridge setup failure\n", __func__);
+		goto bail_dl;
+	}
+	ret = ipa_bridge_setup(IPA_UL);
+	if (ret) {
+		pr_err("%s: IPA UL bridge setup failure\n", __func__);
+		goto bail_ul;
+	}
+	return 0;
+bail_ul:
+	ipa_bridge_teardown(IPA_DL);
+bail_dl:
+	rmnet_bridge_cb.is_connected = false;
+bail:
+	return ret;
+}
+EXPORT_SYMBOL(rmnet_bridge_connect);
+
+void rmnet_bridge_get_client_handles(u32 *producer_handle,
+		u32 *consumer_handle)
+{
+	if (producer_handle == NULL || consumer_handle == NULL)
+		return;
+
+	*producer_handle = rmnet_bridge_cb.producer_handle;
+	*consumer_handle = rmnet_bridge_cb.consumer_handle;
+}
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index bf78b1c..8f97531 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -23,11 +23,12 @@
 #include <linux/usb/msm_hsusb.h>
 #include <mach/usb_bam.h>
 #include <mach/sps.h>
+#include <mach/ipa.h>
 #include <linux/workqueue.h>
 #include <linux/dma-mapping.h>
 
 #define USB_SUMMING_THRESHOLD 512
-#define CONNECTIONS_NUM	4
+#define CONNECTIONS_NUM	8
 
 static struct sps_bam_props usb_props;
 static struct sps_pipe *sps_pipes[CONNECTIONS_NUM][2];
@@ -49,7 +50,8 @@
 	u32 *src_pipe;
 	u32 *dst_pipe;
 	struct usb_bam_wake_event_info peer_event;
-	bool enabled;
+	bool src_enabled;
+	bool dst_enabled;
 };
 
 static struct usb_bam_connect_info usb_bam_connections[CONNECTIONS_NUM];
@@ -206,7 +208,7 @@
 
 	ret = sps_connect(*pipe, connection);
 	if (ret < 0) {
-		pr_err("%s: tx connect error %d\n", __func__, ret);
+		pr_err("%s: sps_connect failed %d\n", __func__, ret);
 		goto error;
 	}
 	return 0;
@@ -218,9 +220,119 @@
 	return ret;
 }
 
+static int connect_pipe_ipa(
+			struct usb_bam_connect_ipa_params *connection_params)
+{
+	int ret;
+	u8 conn_idx = connection_params->idx;
+	enum usb_bam_pipe_dir pipe_dir = connection_params->dir;
+	struct sps_pipe **pipe = &sps_pipes[conn_idx][pipe_dir];
+	struct sps_connect *connection =
+		&sps_connections[conn_idx][pipe_dir];
+	struct msm_usb_bam_platform_data *pdata =
+		usb_bam_pdev->dev.platform_data;
+	struct usb_bam_pipe_connect *pipe_connection =
+				&msm_usb_bam_connections_info
+				[pdata->usb_active_bam][conn_idx][pipe_dir];
 
-static int disconnect_pipe(u8 connection_idx, enum usb_bam_pipe_dir pipe_dir,
-						u32 *usb_pipe_idx)
+	struct ipa_connect_params ipa_in_params;
+	struct ipa_sps_params sps_out_params;
+	u32 usb_handle, usb_phy_addr;
+	u32 clnt_hdl = 0;
+
+	memset(&ipa_in_params, 0, sizeof(ipa_in_params));
+	memset(&sps_out_params, 0, sizeof(sps_out_params));
+
+	if (pipe_dir == USB_TO_PEER_PERIPHERAL) {
+		usb_phy_addr = pipe_connection->src_phy_addr;
+		ipa_in_params.client_ep_idx = pipe_connection->src_pipe_index;
+	} else {
+		usb_phy_addr = pipe_connection->dst_phy_addr;
+		ipa_in_params.client_ep_idx = pipe_connection->dst_pipe_index;
+	}
+	/* Get HSUSB / HSIC bam handle */
+	ret = sps_phy2h(usb_phy_addr, &usb_handle);
+	if (ret) {
+		pr_err("%s: sps_phy2h failed (HSUSB/HSIC BAM) %d\n",
+			   __func__, ret);
+		goto get_config_failed;
+	}
+
+	/* IPA input parameters */
+	ipa_in_params.client_bam_hdl = usb_handle;
+	ipa_in_params.desc_fifo_sz = pipe_connection->desc_fifo_size;
+	ipa_in_params.data_fifo_sz = pipe_connection->data_fifo_size;
+	ipa_in_params.notify = connection_params->notify;
+	ipa_in_params.priv = connection_params->priv;
+	ipa_in_params.client = connection_params->client;
+	if (pipe_connection->mem_type != SYSTEM_MEM)
+		ipa_in_params.pipe_mem_preferred = true;
+
+	memcpy(&ipa_in_params.ipa_ep_cfg, &connection_params->ipa_ep_cfg,
+		   sizeof(struct ipa_ep_cfg));
+
+	ret = ipa_connect(&ipa_in_params, &sps_out_params, &clnt_hdl);
+	if (ret) {
+		pr_err("%s: ipa_connect failed\n", __func__);
+		return ret;
+	}
+
+	*pipe = sps_alloc_endpoint();
+	if (*pipe == NULL) {
+		pr_err("%s: sps_alloc_endpoint failed\n", __func__);
+		ret = -ENOMEM;
+		goto disconnect_ipa;
+	}
+
+	ret = sps_get_config(*pipe, connection);
+	if (ret) {
+		pr_err("%s: tx get config failed %d\n", __func__, ret);
+		goto get_config_failed;
+	}
+
+	if (pipe_dir == USB_TO_PEER_PERIPHERAL) {
+		/* USB src IPA dest */
+		connection->mode = SPS_MODE_SRC;
+		connection_params->cons_clnt_hdl = clnt_hdl;
+		connection->source = usb_handle;
+		connection->destination = sps_out_params.ipa_bam_hdl;
+		connection->src_pipe_index = pipe_connection->src_pipe_index;
+		connection->dest_pipe_index = sps_out_params.ipa_ep_idx;
+		*(connection_params->src_pipe) = connection->src_pipe_index;
+	} else {
+		/* IPA src, USB dest */
+		connection->mode = SPS_MODE_DEST;
+		connection_params->prod_clnt_hdl = clnt_hdl;
+		connection->source = sps_out_params.ipa_bam_hdl;
+		connection->destination = usb_handle;
+		connection->src_pipe_index = sps_out_params.ipa_ep_idx;
+		connection->dest_pipe_index = pipe_connection->dst_pipe_index;
+		*(connection_params->dst_pipe) = connection->dest_pipe_index;
+	}
+
+	connection->data = sps_out_params.data;
+	connection->desc = sps_out_params.desc;
+	connection->event_thresh = 16;
+	connection->options = SPS_O_AUTO_ENABLE;
+
+	ret = sps_connect(*pipe, connection);
+	if (ret < 0) {
+		pr_err("%s: sps_connect failed %d\n", __func__, ret);
+		goto error;
+	}
+
+	return 0;
+
+error:
+	sps_disconnect(*pipe);
+get_config_failed:
+	sps_free_endpoint(*pipe);
+disconnect_ipa:
+	ipa_disconnect(clnt_hdl);
+	return ret;
+}
+
+static int disconnect_pipe(u8 connection_idx, enum usb_bam_pipe_dir pipe_dir)
 {
 	struct msm_usb_bam_platform_data *pdata =
 				usb_bam_pdev->dev.platform_data;
@@ -269,7 +381,7 @@
 		return -EINVAL;
 	}
 
-	if (connection->enabled) {
+	if (connection->src_enabled && connection->dst_enabled) {
 		pr_debug("%s: connection %d was already established\n",
 			__func__, idx);
 		return 0;
@@ -281,22 +393,66 @@
 	if (src_pipe_idx) {
 		/* open USB -> Peripheral pipe */
 		ret = connect_pipe(connection->idx, USB_TO_PEER_PERIPHERAL,
-			connection->src_pipe);
+						   connection->src_pipe);
 		if (ret) {
 			pr_err("%s: src pipe connection failure\n", __func__);
 			return ret;
 		}
 	}
+	connection->src_enabled = 1;
+
 	if (dst_pipe_idx) {
 		/* open Peripheral -> USB pipe */
 		ret = connect_pipe(connection->idx, PEER_PERIPHERAL_TO_USB,
-			connection->dst_pipe);
+						   connection->dst_pipe);
 		if (ret) {
 			pr_err("%s: dst pipe connection failure\n", __func__);
 			return ret;
 		}
 	}
-	connection->enabled = 1;
+	connection->dst_enabled = 1;
+
+	return 0;
+}
+
+int usb_bam_connect_ipa(struct usb_bam_connect_ipa_params *ipa_params)
+{
+	u8 idx = ipa_params->idx;
+	struct usb_bam_connect_info *connection = &usb_bam_connections[idx];
+	int ret;
+
+	if (idx >= CONNECTIONS_NUM) {
+		pr_err("%s: Invalid connection index\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if ((connection->src_enabled &&
+		 ipa_params->dir == USB_TO_PEER_PERIPHERAL) ||
+		 (connection->dst_enabled &&
+		  ipa_params->dir == PEER_PERIPHERAL_TO_USB)) {
+		pr_debug("%s: connection %d was already established\n",
+			__func__, idx);
+		return 0;
+	}
+
+	if (ipa_params->dir == USB_TO_PEER_PERIPHERAL)
+		connection->src_pipe = ipa_params->src_pipe;
+	else
+		connection->dst_pipe = ipa_params->dst_pipe;
+
+	connection->idx = idx;
+
+	ret = connect_pipe_ipa(ipa_params);
+	if (ret) {
+		pr_err("%s: dst pipe connection failure\n", __func__);
+		return ret;
+	}
+
+	if (ipa_params->dir == USB_TO_PEER_PERIPHERAL)
+		connection->src_enabled = 1;
+	else
+		connection->dst_enabled = 1;
 
 	return 0;
 }
@@ -364,39 +520,78 @@
 		return -EINVAL;
 	}
 
-	if (!connection->enabled) {
+	if (!connection->src_enabled && !connection->dst_enabled) {
 		pr_debug("%s: connection %d isn't enabled\n",
 			__func__, idx);
 		return 0;
 	}
 
-	if (connection->src_pipe) {
+	if (connection->src_enabled) {
 		/* close USB -> Peripheral pipe */
-		ret = disconnect_pipe(connection->idx, USB_TO_PEER_PERIPHERAL,
-						   connection->src_pipe);
+		ret = disconnect_pipe(connection->idx, USB_TO_PEER_PERIPHERAL);
 		if (ret) {
 			pr_err("%s: src pipe connection failure\n", __func__);
 			return ret;
 		}
-
+		connection->src_enabled = 0;
 	}
-	if (connection->dst_pipe) {
+	if (connection->dst_enabled) {
 		/* close Peripheral -> USB pipe */
-		ret = disconnect_pipe(connection->idx, PEER_PERIPHERAL_TO_USB,
-			connection->dst_pipe);
+		ret = disconnect_pipe(connection->idx, PEER_PERIPHERAL_TO_USB);
 		if (ret) {
 			pr_err("%s: dst pipe connection failure\n", __func__);
 			return ret;
 		}
+		connection->dst_enabled = 0;
 	}
 
 	connection->src_pipe = 0;
 	connection->dst_pipe = 0;
-	connection->enabled = 0;
 
 	return 0;
 }
+int usb_bam_disconnect_ipa(u8 idx,
+		struct usb_bam_connect_ipa_params *ipa_params)
+{
+	struct usb_bam_connect_info *connection = &usb_bam_connections[idx];
+	int ret;
 
+	if (!usb_bam_pdev) {
+		pr_err("%s: usb_bam device not found\n", __func__);
+		return -ENODEV;
+	}
+
+	if (idx >= CONNECTIONS_NUM) {
+		pr_err("%s: Invalid connection index\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	/* Currently just calls ipa_disconnect, no sps pipes
+	   disconenction support */
+
+	/* close IPA -> USB pipe */
+	if (connection->dst_pipe) {
+		ret = ipa_disconnect(ipa_params->prod_clnt_hdl);
+		if (ret) {
+			pr_err("%s: dst pipe disconnection failure\n",
+				__func__);
+			return ret;
+		}
+	}
+	/* close USB -> IPA pipe */
+	if (connection->src_pipe) {
+		ret = ipa_disconnect(ipa_params->cons_clnt_hdl);
+		if (ret) {
+			pr_err("%s: src pipe disconnection failure\n",
+				__func__);
+			return ret;
+		}
+	}
+
+	return 0;
+
+}
 static int update_connections_info(struct device_node *node, int bam,
 	int conn_num, int dir, enum usb_pipe_mem_type mem_type)
 {
@@ -412,33 +607,28 @@
 
 	key = "qcom,src-bam-physical-address";
 	rc = of_property_read_u32(node, key, &val);
-	if (rc)
-		goto err;
-	pipe_connection->src_phy_addr = val;
+	if (!rc)
+		pipe_connection->src_phy_addr = val;
 
 	key = "qcom,src-bam-pipe-index";
 	rc = of_property_read_u32(node, key, &val);
-	if (rc)
-		goto err;
-	pipe_connection->src_pipe_index = val;
+	if (!rc)
+		pipe_connection->src_pipe_index = val;
 
 	key = "qcom,dst-bam-physical-address";
 	rc = of_property_read_u32(node, key, &val);
-	if (rc)
-		goto err;
-	pipe_connection->dst_phy_addr = val;
+	if (!rc)
+		pipe_connection->dst_phy_addr = val;
 
 	key = "qcom,dst-bam-pipe-index";
 	rc = of_property_read_u32(node, key, &val);
-	if (rc)
-		goto err;
-	pipe_connection->dst_pipe_index = val;
+	if (!rc)
+		pipe_connection->dst_pipe_index = val;
 
 	key = "qcom,data-fifo-offset";
 	rc = of_property_read_u32(node, key, &val);
-	if (rc)
-		goto err;
-	pipe_connection->data_fifo_base_offset = val;
+	if (!rc)
+		pipe_connection->data_fifo_base_offset = val;
 
 	key = "qcom,data-fifo-size";
 	rc = of_property_read_u32(node, key, &val);
@@ -448,9 +638,8 @@
 
 	key = "qcom,descriptor-fifo-offset";
 	rc = of_property_read_u32(node, key, &val);
-	if (rc)
-		goto err;
-	pipe_connection->desc_fifo_base_offset = val;
+	if (!rc)
+		pipe_connection->desc_fifo_base_offset = val;
 
 	key = "qcom,descriptor-fifo-size";
 	rc = of_property_read_u32(node, key, &val);
@@ -539,10 +728,8 @@
 
 	rc = of_property_read_u32(node, "qcom,usb-base-address",
 		&pdata->usb_base_address);
-	if (rc) {
-		pr_err("Invalid usb base address property\n");
-		return NULL;
-	}
+	if (rc)
+		pr_debug("%s: Invalid usb base address property\n", __func__);
 
 	pdata->ignore_core_reset_ack = of_property_read_bool(node,
 					"qcom,ignore-core-reset-ack");
@@ -588,22 +775,28 @@
 		if (rc)
 			goto err;
 
+		if (mem_type == USB_PRIVATE_MEM &&
+			!pdata->usb_base_address)
+			goto err;
+
 		rc = of_property_read_string(node, "label", &str);
 		if (rc) {
 			pr_err("Cannot read string\n");
 			goto err;
 		}
 
-		if (strstr(str, "usb-to-peri"))
+		if (strnstr(str, "usb-to", 30))
 			dir = USB_TO_PEER_PERIPHERAL;
-		else if (strstr(str, "peri-to-usb"))
+		else if (strnstr(str, "to-usb", 30))
 			dir = PEER_PERIPHERAL_TO_USB;
 		else
 			goto err;
 
-		/* Check if connection type is suported */
+		/* Check if connection type is supported */
 		if (!strcmp(str, "usb-to-peri-qdss-dwc3") ||
 			!strcmp(str, "peri-to-usb-qdss-dwc3") ||
+			!strcmp(str, "usb-to-ipa") ||
+			!strcmp(str, "ipa-to-usb") ||
 			!strcmp(str, "usb-to-peri-qdss-hsusb") ||
 			!strcmp(str, "peri-to-usb-qdss-hsusb"))
 				conn_num = 0;
@@ -772,7 +965,8 @@
 	dev_dbg(&pdev->dev, "usb_bam_probe\n");
 
 	for (i = 0; i < CONNECTIONS_NUM; i++) {
-		usb_bam_connections[i].enabled = 0;
+		usb_bam_connections[i].src_enabled = 0;
+		usb_bam_connections[i].dst_enabled = 0;
 		INIT_WORK(&usb_bam_connections[i].peer_event.wake_w,
 			usb_bam_wake_work);
 	}
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
index 81be519..703aca9 100644
--- a/drivers/power/pm8921-bms.c
+++ b/drivers/power/pm8921-bms.c
@@ -87,7 +87,7 @@
 struct pm8921_bms_chip {
 	struct device		*dev;
 	struct dentry		*dent;
-	unsigned int		r_sense;
+	int			r_sense_uohm;
 	unsigned int		v_cutoff;
 	unsigned int		fcc;
 	struct single_row_lut	*fcc_temp_lut;
@@ -500,15 +500,15 @@
 #define SLEEP_CLK_HZ		32764
 #define SECONDS_PER_HOUR	3600
 /**
- * ccmicrovolt_to_nvh -
+ * ccmicrovolt_to_uvh -
  * @cc_uv:  coulumb counter converted to uV
  *
- * RETURNS:	coulumb counter based charge in nVh
- *		(nano Volt Hour)
+ * RETURNS:	coulumb counter based charge in uVh
+ *		(micro Volt Hour)
  */
-static s64 ccmicrovolt_to_nvh(s64 cc_uv)
+static s64 ccmicrovolt_to_uvh(s64 cc_uv)
 {
-	return div_s64(cc_uv * CC_READING_TICKS * 1000,
+	return div_s64(cc_uv * CC_READING_TICKS,
 			SLEEP_CLK_HZ * SECONDS_PER_HOUR);
 }
 
@@ -618,7 +618,7 @@
 
 	convert_vbatt_raw_to_uv(the_chip, usb_chg, vbat_raw, vbat_uv);
 	convert_vsense_to_uv(the_chip, vsense_raw, &vsense_uv);
-	*ibat_ua = vsense_uv * 1000 / (int)the_chip->r_sense;
+	*ibat_ua = div_s64((s64)vsense_uv * 1000000LL, the_chip->r_sense_uohm);
 
 	pr_debug("vsense_raw = 0x%x vbat_raw = 0x%x"
 			" ibat_ua = %d vbat_uv = %d\n",
@@ -840,7 +840,7 @@
  */
 static void calculate_cc_uah(struct pm8921_bms_chip *chip, int cc, int *val)
 {
-	int64_t cc_voltage_uv, cc_nvh, cc_uah;
+	int64_t cc_voltage_uv, cc_uvh, cc_uah;
 
 	cc_voltage_uv = cc;
 	cc_voltage_uv -= chip->cc_reading_at_100;
@@ -850,9 +850,9 @@
 	cc_voltage_uv = cc_to_microvolt(chip, cc_voltage_uv);
 	cc_voltage_uv = pm8xxx_cc_adjust_for_gain(cc_voltage_uv);
 	pr_debug("cc_voltage_uv = %lld microvolts\n", cc_voltage_uv);
-	cc_nvh = ccmicrovolt_to_nvh(cc_voltage_uv);
-	pr_debug("cc_nvh = %lld nano_volt_hour\n", cc_nvh);
-	cc_uah = div_s64(cc_nvh, chip->r_sense);
+	cc_uvh = ccmicrovolt_to_uvh(cc_voltage_uv);
+	pr_debug("cc_uvh = %lld micro_volt_hour\n", cc_uvh);
+	cc_uah = div_s64(cc_uvh * 1000000LL, chip->r_sense_uohm);
 	*val = cc_uah;
 }
 
@@ -2071,25 +2071,25 @@
 
 int pm8921_bms_get_battery_current(int *result_ua)
 {
-	int vsense;
+	int vsense_uv;
 
 	if (!the_chip) {
 		pr_err("called before initialization\n");
 		return -EINVAL;
 	}
-	if (the_chip->r_sense == 0) {
+	if (the_chip->r_sense_uohm == 0) {
 		pr_err("r_sense is zero\n");
 		return -EINVAL;
 	}
 
 	mutex_lock(&the_chip->bms_output_lock);
 	pm_bms_lock_output_data(the_chip);
-	read_vsense_avg(the_chip, &vsense);
+	read_vsense_avg(the_chip, &vsense_uv);
 	pm_bms_unlock_output_data(the_chip);
 	mutex_unlock(&the_chip->bms_output_lock);
-	pr_debug("vsense=%duV\n", vsense);
+	pr_debug("vsense=%duV\n", vsense_uv);
 	/* cast for signed division */
-	*result_ua = vsense * 1000 / (int)the_chip->r_sense;
+	*result_ua = div_s64(vsense_uv * 1000000LL, the_chip->r_sense_uohm);
 	pr_debug("ibat=%duA\n", *result_ua);
 	return 0;
 }
@@ -2897,7 +2897,7 @@
 	mutex_init(&chip->bms_output_lock);
 	mutex_init(&chip->last_ocv_uv_mutex);
 	chip->dev = &pdev->dev;
-	chip->r_sense = pdata->r_sense;
+	chip->r_sense_uohm = pdata->r_sense_uohm;
 	chip->v_cutoff = pdata->v_cutoff;
 	chip->max_voltage_uv = pdata->max_voltage_uv;
 	chip->chg_term_ua = pdata->chg_term_ua;
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index cb6b23e..3995cf7 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -291,6 +291,7 @@
 	u8				active_path;
 	int				recent_reported_soc;
 	int				battery_less_hardware;
+	int				ibatmax_max_adj_ma;
 };
 
 /* user space parameter to limit usb current */
@@ -635,10 +636,26 @@
 }
 
 #define PM8921_CHG_IBATMAX_MIN	325
-#define PM8921_CHG_IBATMAX_MAX	2000
+#define PM8921_CHG_IBATMAX_MAX	3025
 #define PM8921_CHG_I_MIN_MA	225
 #define PM8921_CHG_I_STEP_MA	50
 #define PM8921_CHG_I_MASK	0x3F
+static int pm_chg_ibatmax_get(struct pm8921_chg_chip *chip, int *ibat_ma)
+{
+	u8 temp;
+	int rc;
+
+	rc = pm8xxx_readb(chip->dev->parent, CHG_IBAT_MAX, &temp);
+	if (rc) {
+		pr_err("rc = %d while reading ibat max\n", rc);
+		*ibat_ma = 0;
+		return rc;
+	}
+	*ibat_ma = (int)(temp & PM8921_CHG_I_MASK) * PM8921_CHG_I_STEP_MA
+							+ PM8921_CHG_I_MIN_MA;
+	return 0;
+}
+
 static int pm_chg_ibatmax_set(struct pm8921_chg_chip *chip, int chg_current)
 {
 	u8 temp;
@@ -930,6 +947,11 @@
 			break;
 	}
 
+	if (i < 0) {
+		pr_err("can't find %d in usb_ma_table. Use min.\n", temp);
+		i = 0;
+	}
+
 	*mA = usb_ma_table[i].usb_ma;
 
 	return rc;
@@ -1109,31 +1131,6 @@
 					PM8921_CHG_LED_SRC_CONFIG_MASK, temp);
 }
 
-static void enable_input_voltage_regulation(struct pm8921_chg_chip *chip)
-{
-	u8 temp;
-	int rc;
-
-	rc = pm8xxx_writeb(chip->dev->parent, CHG_BUCK_CTRL_TEST3, 0x70);
-	if (rc) {
-		pr_err("Failed to write 0x70 to CTRL_TEST3 rc = %d\n", rc);
-		return;
-	}
-	rc = pm8xxx_readb(chip->dev->parent, CHG_BUCK_CTRL_TEST3, &temp);
-	if (rc) {
-		pr_err("Failed to read CTRL_TEST3 rc = %d\n", rc);
-		return;
-	}
-	/* unset the input voltage disable bit */
-	temp &= 0xFE;
-	/* set the write bit */
-	temp |= 0x80;
-	rc = pm8xxx_writeb(chip->dev->parent, CHG_BUCK_CTRL_TEST3, temp);
-	if (rc) {
-		pr_err("Failed to write 0x%x to CTRL_TEST3 rc=%d\n", temp, rc);
-		return;
-	}
-}
 
 static int64_t read_battery_id(struct pm8921_chg_chip *chip)
 {
@@ -1517,6 +1514,34 @@
 	return pm_chg_get_rt_status(chip, BATT_INSERTED_IRQ);
 }
 
+static int get_prop_batt_status(struct pm8921_chg_chip *chip)
+{
+	int batt_state = POWER_SUPPLY_STATUS_DISCHARGING;
+	int fsm_state = pm_chg_get_fsm_state(chip);
+	int i;
+
+	if (chip->ext_psy) {
+		if (chip->ext_charge_done)
+			return POWER_SUPPLY_STATUS_FULL;
+		if (chip->ext_charging)
+			return POWER_SUPPLY_STATUS_CHARGING;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(map); i++)
+		if (map[i].fsm_state == fsm_state)
+			batt_state = map[i].batt_state;
+
+	if (fsm_state == FSM_STATE_ON_CHG_HIGHI_1) {
+		if (!pm_chg_get_rt_status(chip, BATT_INSERTED_IRQ)
+			|| !pm_chg_get_rt_status(chip, BAT_TEMP_OK_IRQ)
+			|| pm_chg_get_rt_status(chip, CHGHOT_IRQ)
+			|| pm_chg_get_rt_status(chip, VBATDET_LOW_IRQ))
+
+			batt_state = POWER_SUPPLY_STATUS_NOT_CHARGING;
+	}
+	return batt_state;
+}
+
 static int get_prop_batt_capacity(struct pm8921_chg_chip *chip)
 {
 	int percent_soc;
@@ -1536,8 +1561,8 @@
 		pr_warn_ratelimited("low battery charge = %d%%\n",
 						percent_soc);
 
-	if (chip->recent_reported_soc == (chip->resume_charge_percent + 1)
-			&& percent_soc == chip->resume_charge_percent) {
+	if (percent_soc <= chip->resume_charge_percent
+		&& get_prop_batt_status(chip) == POWER_SUPPLY_STATUS_FULL) {
 		pr_debug("soc fell below %d. charging enabled.\n",
 						chip->resume_charge_percent);
 		if (chip->is_bat_warm)
@@ -1666,34 +1691,6 @@
 	return POWER_SUPPLY_CHARGE_TYPE_NONE;
 }
 
-static int get_prop_batt_status(struct pm8921_chg_chip *chip)
-{
-	int batt_state = POWER_SUPPLY_STATUS_DISCHARGING;
-	int fsm_state = pm_chg_get_fsm_state(chip);
-	int i;
-
-	if (chip->ext_psy) {
-		if (chip->ext_charge_done)
-			return POWER_SUPPLY_STATUS_FULL;
-		if (chip->ext_charging)
-			return POWER_SUPPLY_STATUS_CHARGING;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(map); i++)
-		if (map[i].fsm_state == fsm_state)
-			batt_state = map[i].batt_state;
-
-	if (fsm_state == FSM_STATE_ON_CHG_HIGHI_1) {
-		if (!pm_chg_get_rt_status(chip, BATT_INSERTED_IRQ)
-			|| !pm_chg_get_rt_status(chip, BAT_TEMP_OK_IRQ)
-			|| pm_chg_get_rt_status(chip, CHGHOT_IRQ)
-			|| pm_chg_get_rt_status(chip, VBATDET_LOW_IRQ))
-
-			batt_state = POWER_SUPPLY_STATUS_NOT_CHARGING;
-	}
-	return batt_state;
-}
-
 #define MAX_TOLERABLE_BATT_TEMP_DDC	680
 static int get_prop_batt_temp(struct pm8921_chg_chip *chip)
 {
@@ -1815,7 +1812,7 @@
 		return;
 	}
 
-	if (mA >= 0 && mA <= 2) {
+	if (mA <= 2) {
 		usb_chg_current = 0;
 		rc = pm_chg_iusbmax_set(the_chip, 0);
 		if (rc) {
@@ -1833,6 +1830,12 @@
 				break;
 		}
 
+		if (i < 0) {
+			pr_err("can't find %dmA in usb_ma_table. Use min.\n",
+			       mA);
+			i = 0;
+		}
+
 		/* Check if IUSB_FINE_RES is available */
 		while ((usb_ma_table[i].value & PM8917_IUSB_FINE_RES)
 				&& !the_chip->iusb_fine_res)
@@ -2248,7 +2251,6 @@
 		usb_target_ma = 0;
 		pm8921_chg_disable_irq(chip, CHG_GONE_IRQ);
 	}
-	enable_input_voltage_regulation(chip);
 	bms_notify_check(chip);
 }
 
@@ -2530,6 +2532,13 @@
 		while (!the_chip->iusb_fine_res && i > 0
 			&& (usb_ma_table[i].value & PM8917_IUSB_FINE_RES))
 			i--;
+
+		if (i < 0) {
+			pr_err("can't find %dmA in usb_ma_table. Use min.\n",
+			       *value);
+			i = 0;
+		}
+
 		*value = usb_ma_table[i].usb_ma;
 	}
 }
@@ -2891,6 +2900,30 @@
 	return IRQ_HANDLED;
 }
 
+struct ibatmax_max_adj_entry {
+	int ibat_max_ma;
+	int max_adj_ma;
+};
+
+static struct ibatmax_max_adj_entry ibatmax_adj_table[] = {
+	{975, 300},
+	{1475, 150},
+	{1975, 200},
+	{2475, 250},
+};
+
+static int find_ibat_max_adj_ma(int ibat_target_ma)
+{
+	int i = 0;
+
+	for (i = ARRAY_SIZE(ibatmax_adj_table) - 1; i >= 0; i--) {
+		if (ibat_target_ma <= ibatmax_adj_table[i].ibat_max_ma)
+			break;
+	}
+
+	return ibatmax_adj_table[i].max_adj_ma;
+}
+
 static irqreturn_t fastchg_irq_handler(int irq, void *data)
 {
 	struct pm8921_chg_chip *chip = data;
@@ -3194,6 +3227,22 @@
 	pm_chg_vddmax_set(chip, adj_vdd_max_mv);
 }
 
+static void set_appropriate_vbatdet(struct pm8921_chg_chip *chip)
+{
+	if (chip->is_bat_cool)
+		pm_chg_vbatdet_set(the_chip,
+			the_chip->cool_bat_voltage
+			- the_chip->resume_voltage_delta);
+	else if (chip->is_bat_warm)
+		pm_chg_vbatdet_set(the_chip,
+			the_chip->warm_bat_voltage
+			- the_chip->resume_voltage_delta);
+	else
+		pm_chg_vbatdet_set(the_chip,
+			the_chip->max_voltage_mv
+			- the_chip->resume_voltage_delta);
+}
+
 enum {
 	CHG_IN_PROGRESS,
 	CHG_NOT_IN_PROGRESS,
@@ -3332,8 +3381,7 @@
 
 	if (end == CHG_NOT_IN_PROGRESS) {
 		count = 0;
-		wake_unlock(&chip->eoc_wake_lock);
-		return;
+		goto eoc_worker_stop;
 	}
 
 	/* If the disable hw clock switching
@@ -3357,21 +3405,6 @@
 	if (count == CONSECUTIVE_COUNT) {
 		count = 0;
 		pr_info("End of Charging\n");
-		/* set the vbatdet back, in case it was changed
-		 * to trigger charging */
-		if (chip->is_bat_cool) {
-			pm_chg_vbatdet_set(the_chip,
-				the_chip->cool_bat_voltage
-				- the_chip->resume_voltage_delta);
-		} else if (chip->is_bat_warm) {
-			pm_chg_vbatdet_set(the_chip,
-				the_chip->warm_bat_voltage
-				- the_chip->resume_voltage_delta);
-		} else {
-			pm_chg_vbatdet_set(the_chip,
-				the_chip->max_voltage_mv
-				- the_chip->resume_voltage_delta);
-		}
 
 		pm_chg_auto_enable(chip, 0);
 
@@ -3384,14 +3417,19 @@
 			chip->bms_notify.is_battery_full = 1;
 		/* declare end of charging by invoking chgdone interrupt */
 		chgdone_irq_handler(chip->pmic_chg_irq[CHGDONE_IRQ], chip);
-		wake_unlock(&chip->eoc_wake_lock);
 	} else {
 		adjust_vdd_max_for_fastchg(chip, vbat_batt_terminal_uv);
 		pr_debug("EOC count = %d\n", count);
 		schedule_delayed_work(&chip->eoc_work,
 			      round_jiffies_relative(msecs_to_jiffies
 						     (EOC_CHECK_PERIOD_MS)));
+		return;
 	}
+
+eoc_worker_stop:
+	wake_unlock(&chip->eoc_wake_lock);
+	/* set the vbatdet back, in case it was changed to trigger charging */
+	set_appropriate_vbatdet(chip);
 }
 
 static void btm_configure_work(struct work_struct *work)
@@ -3432,19 +3470,13 @@
 	if (enter) {
 		btm_config.low_thr_temp =
 			the_chip->cool_temp_dc + TEMP_HYSTERISIS_DEGC;
-		set_appropriate_battery_current(the_chip);
 		pm_chg_vddmax_set(the_chip, the_chip->cool_bat_voltage);
-		pm_chg_vbatdet_set(the_chip,
-			the_chip->cool_bat_voltage
-			- the_chip->resume_voltage_delta);
 	} else {
 		btm_config.low_thr_temp = the_chip->cool_temp_dc;
-		set_appropriate_battery_current(the_chip);
 		pm_chg_vddmax_set(the_chip, the_chip->max_voltage_mv);
-		pm_chg_vbatdet_set(the_chip,
-			the_chip->max_voltage_mv
-			- the_chip->resume_voltage_delta);
 	}
+	set_appropriate_battery_current(the_chip);
+	set_appropriate_vbatdet(the_chip);
 	schedule_work(&btm_config_work);
 }
 
@@ -3457,19 +3489,13 @@
 	if (enter) {
 		btm_config.high_thr_temp =
 			the_chip->warm_temp_dc - TEMP_HYSTERISIS_DEGC;
-		set_appropriate_battery_current(the_chip);
 		pm_chg_vddmax_set(the_chip, the_chip->warm_bat_voltage);
-		pm_chg_vbatdet_set(the_chip,
-			the_chip->warm_bat_voltage
-			- the_chip->resume_voltage_delta);
 	} else {
 		btm_config.high_thr_temp = the_chip->warm_temp_dc;
-		set_appropriate_battery_current(the_chip);
 		pm_chg_vddmax_set(the_chip, the_chip->max_voltage_mv);
-		pm_chg_vbatdet_set(the_chip,
-			the_chip->max_voltage_mv
-			- the_chip->resume_voltage_delta);
 	}
+	set_appropriate_battery_current(the_chip);
+	set_appropriate_vbatdet(the_chip);
 	schedule_work(&btm_config_work);
 }
 
@@ -4207,6 +4233,81 @@
 }
 DEFINE_SIMPLE_ATTRIBUTE(reg_fops, get_reg, set_reg, "0x%02llx\n");
 
+static int reg_loop;
+#define MAX_REG_LOOP_CHAR	10
+static int get_reg_loop_param(char *buf, struct kernel_param *kp)
+{
+	u8 temp;
+
+	if (!the_chip) {
+		pr_err("called before init\n");
+		return -EINVAL;
+	}
+	temp = pm_chg_get_regulation_loop(the_chip);
+	return snprintf(buf, MAX_REG_LOOP_CHAR, "%d", temp);
+}
+module_param_call(reg_loop, NULL, get_reg_loop_param,
+					&reg_loop, 0644);
+
+static int max_chg_ma;
+#define MAX_MA_CHAR	10
+static int get_max_chg_ma_param(char *buf, struct kernel_param *kp)
+{
+	if (!the_chip) {
+		pr_err("called before init\n");
+		return -EINVAL;
+	}
+	return snprintf(buf, MAX_MA_CHAR, "%d", the_chip->max_bat_chg_current);
+}
+module_param_call(max_chg_ma, NULL, get_max_chg_ma_param,
+					&max_chg_ma, 0644);
+static int ibatmax_ma;
+static int set_ibat_max(const char *val, struct kernel_param *kp)
+{
+	int rc;
+
+	if (!the_chip) {
+		pr_err("called before init\n");
+		return -EINVAL;
+	}
+
+	rc = param_set_int(val, kp);
+	if (rc) {
+		pr_err("error setting value %d\n", rc);
+		return rc;
+	}
+
+	if (abs(ibatmax_ma - the_chip->max_bat_chg_current)
+				<= the_chip->ibatmax_max_adj_ma) {
+		rc = pm_chg_ibatmax_set(the_chip, ibatmax_ma);
+		if (rc) {
+			pr_err("Failed to set ibatmax rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+static int get_ibat_max(char *buf, struct kernel_param *kp)
+{
+	int ibat_ma;
+	int rc;
+
+	if (!the_chip) {
+		pr_err("called before init\n");
+		return -EINVAL;
+	}
+
+	rc = pm_chg_ibatmax_get(the_chip, &ibat_ma);
+	if (rc) {
+		pr_err("ibatmax_get error = %d\n", rc);
+		return rc;
+	}
+
+	return snprintf(buf, MAX_MA_CHAR, "%d", ibat_ma);
+}
+module_param_call(ibatmax_ma, set_ibat_max, get_ibat_max,
+					&ibatmax_ma, 0644);
 enum {
 	BAT_WARM_ZONE,
 	BAT_COOL_ZONE,
@@ -4371,6 +4472,9 @@
 		enable_irq_wake(chip->pmic_chg_irq[LOOP_CHANGE_IRQ]);
 	}
 
+	rc = pm8xxx_batt_alarm_enable(PM8XXX_BATT_ALARM_LOWER_COMPARATOR);
+	if (rc < 0)
+		pr_err("Failed to enable lower comparator\n");
 	return 0;
 }
 static int __devinit pm8921_charger_probe(struct platform_device *pdev)
@@ -4445,6 +4549,9 @@
 	if (chip->battery_less_hardware)
 		charging_disabled = 1;
 
+	chip->ibatmax_max_adj_ma = find_ibat_max_adj_ma(
+					chip->max_bat_chg_current);
+
 	rc = pm8921_chg_hw_init(chip);
 	if (rc) {
 		pr_err("couldn't init hardware rc=%d\n", rc);
diff --git a/drivers/power/pm8xxx-ccadc.c b/drivers/power/pm8xxx-ccadc.c
index 1b9426a..a586f3d8 100644
--- a/drivers/power/pm8xxx-ccadc.c
+++ b/drivers/power/pm8xxx-ccadc.c
@@ -72,7 +72,7 @@
 	unsigned int		revision;
 	unsigned int		calib_delay_ms;
 	int			eoc_irq;
-	int			r_sense;
+	int			r_sense_uohm;
 	struct delayed_work	calib_ccadc_work;
 };
 
@@ -562,7 +562,8 @@
 		return rc;
 	}
 
-	*bat_current_ua = voltage_uv * 1000/the_chip->r_sense;
+	*bat_current_ua = div_s64((s64)voltage_uv * 1000000LL,
+						the_chip->r_sense_uohm);
 	/*
 	 * ccadc reads +ve current when the battery is charging
 	 * We need to return -ve if the battery is charging
@@ -675,7 +676,7 @@
 	chip->dev = &pdev->dev;
 	chip->revision = pm8xxx_get_revision(chip->dev->parent);
 	chip->eoc_irq = res->start;
-	chip->r_sense = pdata->r_sense;
+	chip->r_sense_uohm = pdata->r_sense_uohm;
 	chip->calib_delay_ms = pdata->calib_delay_ms;
 
 	calib_ccadc_read_offset_and_gain(chip,
diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
index 7b4d97e..1955ff4 100644
--- a/drivers/power/qpnp-bms.c
+++ b/drivers/power/qpnp-bms.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,7 +10,7 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt)	"%s: " fmt, __func__
+#define pr_fmt(fmt)	"BMS: %s: " fmt, __func__
 
 #include <linux/module.h>
 #include <linux/types.h>
@@ -21,26 +21,17 @@
 #include <linux/of_device.h>
 #include <linux/power_supply.h>
 #include <linux/spmi.h>
-
-/* Interrupt offsets */
-#define INT_RT_STS(base)		(base + 0x10)
-#define INT_SET_TYPE(base)		(base + 0x11)
-#define INT_POLARITY_HIGH(base)		(base + 0x12)
-#define INT_POLARITY_LOW(base)		(base + 0x13)
-#define INT_LATCHED_CLR(base)		(base + 0x14)
-#define INT_EN_SET(base)		(base + 0x15)
-#define INT_EN_CLR(base)		(base + 0x16)
-#define INT_LATCHED_STS(base)		(base + 0x18)
-#define INT_PENDING_STS(base)		(base + 0x19)
-#define INT_MID_SEL(base)		(base + 0x1A)
-#define INT_PRIORITY(base)		(base + 0x1B)
+#include <linux/rtc.h>
+#include <linux/delay.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/mfd/pm8xxx/batterydata-lib.h>
 
 /* BMS Register Offsets */
 #define BMS1_REVISION1			0x0
 #define BMS1_REVISION2			0x1
 #define BMS1_STATUS1			0x8
 #define BMS1_MODE_CTL			0X40
-/* Columb counter clear registers */
+/* Coulomb counter clear registers */
 #define BMS1_CC_DATA_CTL		0x42
 #define BMS1_CC_CLEAR_CTRL		0x43
 /* OCV limit registers */
@@ -49,6 +40,8 @@
 #define BMS1_OCV_USE_HIGH_LIMIT_THR0	0x4A
 #define BMS1_OCV_USE_HIGH_LIMIT_THR1	0x4B
 #define BMS1_OCV_USE_LIMIT_CTL		0x4C
+/* Delay control */
+#define BMS1_S1_DELAY_CTL		0x5A
 /* CC interrupt threshold */
 #define BMS1_CC_THR0			0x7A
 #define BMS1_CC_THR1			0x7B
@@ -60,7 +53,7 @@
 #define BMS1_OCV_FOR_R_DATA1		0x81
 #define BMS1_VSENSE_FOR_R_DATA0		0x82
 #define BMS1_VSENSE_FOR_R_DATA1		0x83
-/* Columb counter data */
+/* Coulomb counter data */
 #define BMS1_CC_DATA0			0x8A
 #define BMS1_CC_DATA1			0x8B
 #define BMS1_CC_DATA2			0x8C
@@ -71,19 +64,45 @@
 #define BMS1_OCV_FOR_SOC_DATA1		0x91
 #define BMS1_VSENSE_PON_DATA0		0x94
 #define BMS1_VSENSE_PON_DATA1		0x95
+#define BMS1_VSENSE_AVG_DATA0		0x98
+#define BMS1_VSENSE_AVG_DATA1		0x99
 #define BMS1_VBAT_AVG_DATA0		0x9E
 #define BMS1_VBAT_AVG_DATA1		0x9F
 /* Extra bms registers */
 #define BMS1_BMS_DATA_REG_0		0xB0
-#define BMS1_BMS_DATA_REG_1		0xB1
-#define BMS1_BMS_DATA_REG_2		0xB2
+#define IAVG_STORAGE_REG		0xB1
+#define SOC_STORAGE_REG			0xB2
 #define BMS1_BMS_DATA_REG_3		0xB3
 
+/* Configuration for saving of shutdown soc/iavg */
+#define IGNORE_SOC_TEMP_DECIDEG		50
+#define IAVG_STEP_SIZE_MA		50
+#define IAVG_START			600
+#define SOC_ZERO			0xFF
+
+#define IAVG_SAMPLES 16
+
 #define QPNP_BMS_DEV_NAME "qcom,qpnp-bms"
 
+struct soc_params {
+	int		fcc_uah;
+	int		cc_uah;
+	int		rbatt;
+	int		iavg_ua;
+	int		uuc_uah;
+	int		ocv_charge_uah;
+};
+
+struct raw_soc_params {
+	uint16_t	last_good_ocv_raw;
+	int64_t		cc;
+	int		last_good_ocv_uv;
+};
+
 struct qpnp_bms_chip {
 	struct device			*dev;
 	struct power_supply		bms_psy;
+	struct power_supply		*batt_psy;
 	struct spmi_device		*spmi;
 	u16				base;
 
@@ -93,13 +112,71 @@
 	bool				online;
 	/* platform data */
 	unsigned int			r_sense_mohm;
-	unsigned int			v_cutoff;
-	unsigned int			max_voltage;
+	unsigned int			v_cutoff_uv;
+	unsigned int			max_voltage_uv;
 	unsigned int			r_conn_mohm;
 	int				shutdown_soc_valid_limit;
 	int				adjust_soc_low_threshold;
 	int				adjust_soc_high_threshold;
-	int				chg_term;
+	int				chg_term_ua;
+	enum battery_type		batt_type;
+	unsigned int			fcc;
+	struct single_row_lut		*fcc_temp_lut;
+	struct single_row_lut		*fcc_sf_lut;
+	struct pc_temp_ocv_lut		*pc_temp_ocv_lut;
+	struct sf_lut			*pc_sf_lut;
+	struct sf_lut			*rbatt_sf_lut;
+	int				default_rbatt_mohm;
+
+	struct delayed_work		calculate_soc_delayed_work;
+
+	struct mutex			bms_output_lock;
+	struct mutex			last_ocv_uv_mutex;
+	struct mutex			soc_invalidation_mutex;
+
+	unsigned int			start_percent;
+	unsigned int			end_percent;
+	bool				ignore_shutdown_soc;
+	int				shutdown_soc_invalid;
+	int				shutdown_soc;
+	int				shutdown_iavg_ma;
+
+	int				low_soc_calc_threshold;
+	int				low_soc_calculate_soc_ms;
+	int				calculate_soc_ms;
+
+	uint16_t			ocv_reading_at_100;
+	int64_t				cc_reading_at_100;
+	uint16_t			prev_last_good_ocv_raw;
+	int				last_ocv_uv;
+	int				last_cc_uah;
+	unsigned long			tm_sec;
+	bool				first_time_calc_soc;
+	bool				first_time_calc_uuc;
+	int				pon_ocv_uv;
+
+	int				iavg_samples_ma[IAVG_SAMPLES];
+	int				iavg_index;
+	int				iavg_num_samples;
+	struct timespec			t_soc_queried;
+	int				last_soc;
+	int				last_soc_est;
+
+	int				charge_time_us;
+	int				catch_up_time_us;
+	struct single_row_lut		*adjusted_fcc_temp_lut;
+
+	unsigned int			vadc_v0625;
+	unsigned int			vadc_v1250;
+
+	int				prev_iavg_ua;
+	int				prev_uuc_iavg_ma;
+	int				prev_pc_unusable;
+	int				ibat_at_cv_ua;
+	int				soc_at_cv;
+	int				prev_chg_soc;
+	int				calculated_soc;
+	int				last_vbat_read_uv;
 };
 
 static struct of_device_id qpnp_bms_match_table[] = {
@@ -119,6 +196,38 @@
 	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
 };
 
+static bool use_voltage_soc;
+
+/* module params */
+static int bms_param_set_bool(const char *val, const struct kernel_param *kp)
+{
+	int rc;
+	struct power_supply *bms_psy;
+
+	rc = param_set_bool(val, kp);
+	if (rc) {
+		pr_err("failed to set %s, rc = %d\n", kp->name, rc);
+		return rc;
+	}
+
+	bms_psy = power_supply_get_by_name("bms");
+
+	if (bms_psy)
+		power_supply_changed(bms_psy);
+	else
+		pr_debug("%s changed but bms has not been initialized yet\n",
+				kp->name);
+
+	return 0;
+}
+
+static struct kernel_param_ops bms_param_ops = {
+	.set = bms_param_set_bool,
+	.get = param_get_bool,
+};
+
+module_param_cb(use_voltage_soc, &bms_param_ops, &use_voltage_soc, 0644);
+
 static int qpnp_read_wrapper(struct qpnp_bms_chip *chip, u8 *val,
 			u16 base, int count)
 {
@@ -126,31 +235,1420 @@
 	struct spmi_device *spmi = chip->spmi;
 
 	rc = spmi_ext_register_readl(spmi->ctrl, spmi->sid, base, val, count);
-	if (rc)
+	if (rc) {
 		pr_err("SPMI read failed rc=%d\n", rc);
+		return rc;
+	}
+	return 0;
+}
+
+static int qpnp_write_wrapper(struct qpnp_bms_chip *chip, u8 *val,
+			u16 base, int count)
+{
+	int rc;
+	struct spmi_device *spmi = chip->spmi;
+
+	rc = spmi_ext_register_writel(spmi->ctrl, spmi->sid, base, val, count);
+	if (rc) {
+		pr_err("SPMI write failed rc=%d\n", rc);
+		return rc;
+	}
+	return 0;
+}
+
+static int qpnp_masked_write(struct qpnp_bms_chip *chip, u16 addr,
+							u8 mask, u8 val)
+{
+	int rc;
+	u8 reg;
+
+	rc = qpnp_read_wrapper(chip, &reg, chip->base + addr, 1);
+	if (rc) {
+		pr_err("read failed addr = %03X, rc = %d\n",
+				chip->base + addr, rc);
+		return rc;
+	}
+	reg &= ~mask;
+	reg |= val & mask;
+	rc = qpnp_write_wrapper(chip, &reg, chip->base + addr, 1);
+	if (rc) {
+		pr_err("write failed addr = %03X, val = %02x, mask = %02x, reg = %02x, rc = %d\n",
+				chip->base + addr, val, mask, reg, rc);
+		return rc;
+	}
+	return 0;
+}
+
+#define HOLD_OREG_DATA		BIT(0)
+static int lock_output_data(struct qpnp_bms_chip *chip)
+{
+	int rc;
+
+	rc = qpnp_masked_write(chip, BMS1_CC_DATA_CTL,
+				HOLD_OREG_DATA, HOLD_OREG_DATA);
+	if (rc) {
+		pr_err("couldnt lock bms output rc = %d\n", rc);
+		return rc;
+	}
+	return 0;
+}
+
+static int unlock_output_data(struct qpnp_bms_chip *chip)
+{
+	int rc;
+
+	rc = qpnp_masked_write(chip, BMS1_CC_DATA_CTL, HOLD_OREG_DATA, 0);
+	if (rc) {
+		pr_err("fail to unlock BMS_CONTROL rc = %d\n", rc);
+		return rc;
+	}
+	return 0;
+}
+
+#define V_PER_BIT_MUL_FACTOR	97656
+#define V_PER_BIT_DIV_FACTOR	1000
+#define VADC_INTRINSIC_OFFSET	0x6000
+
+static int vadc_reading_to_uv(unsigned int reading)
+{
+	if (reading <= VADC_INTRINSIC_OFFSET)
+		return 0;
+
+	return (reading - VADC_INTRINSIC_OFFSET)
+			* V_PER_BIT_MUL_FACTOR / V_PER_BIT_DIV_FACTOR;
+}
+
+#define VADC_CALIB_UV		625000
+#define VBATT_MUL_FACTOR	3
+
+static int adjust_vbatt_reading(struct qpnp_bms_chip *chip,
+						unsigned int reading_uv)
+{
+	s64 numerator, denominator;
+
+	if (reading_uv == 0)
+		return 0;
+
+	/* don't adjust if not calibrated */
+	if (chip->vadc_v0625 == 0 || chip->vadc_v1250 == 0) {
+		pr_debug("No cal yet return %d\n",
+				VBATT_MUL_FACTOR * reading_uv);
+		return VBATT_MUL_FACTOR * reading_uv;
+	}
+
+	numerator = ((s64)reading_uv - chip->vadc_v0625) * VADC_CALIB_UV;
+	denominator =  (s64)chip->vadc_v1250 - chip->vadc_v0625;
+	if (denominator == 0)
+		return reading_uv * VBATT_MUL_FACTOR;
+	return (VADC_CALIB_UV + div_s64(numerator, denominator))
+						* VBATT_MUL_FACTOR;
+}
+
+static inline int convert_vbatt_raw_to_uv(struct qpnp_bms_chip *chip,
+					uint16_t reading)
+{
+	int uv;
+
+	uv = vadc_reading_to_uv(reading);
+	pr_debug("%u raw converted into %d uv\n", reading, uv);
+	uv = adjust_vbatt_reading(chip, uv);
+	pr_debug("adjusted into %d uv\n", uv);
+	return uv;
+}
+
+#define CC_READING_RESOLUTION_N	542535
+#define CC_READING_RESOLUTION_D	100000
+static int cc_reading_to_uv(int16_t reading)
+{
+	return div_s64(reading * CC_READING_RESOLUTION_N,
+					CC_READING_RESOLUTION_D);
+}
+
+#define QPNP_ADC_GAIN_NV				17857LL
+static s64 cc_adjust_for_gain(s64 uv, uint16_t gain)
+{
+	s64 result_uv;
+
+	pr_debug("adjusting_uv = %lld\n", uv);
+	pr_debug("adjusting by factor: %lld/%hu = %lld%%\n",
+			QPNP_ADC_GAIN_NV, gain,
+			div_s64(QPNP_ADC_GAIN_NV * 100LL, (s64)gain));
+
+	result_uv = div_s64(uv * QPNP_ADC_GAIN_NV, (s64)gain);
+	pr_debug("result_uv = %lld\n", result_uv);
+	return result_uv;
+}
+
+static int convert_vsense_to_uv(struct qpnp_bms_chip *chip,
+					int16_t reading)
+{
+	struct qpnp_iadc_calib calibration;
+
+	qpnp_iadc_get_gain_and_offset(&calibration);
+	return cc_adjust_for_gain(cc_reading_to_uv(reading),
+			calibration.gain_raw);
+}
+
+static int read_vsense_avg(struct qpnp_bms_chip *chip, int *result_uv)
+{
+	int rc;
+	int16_t reading;
+
+	rc = qpnp_read_wrapper(chip, (u8 *)&reading,
+			chip->base + BMS1_VSENSE_AVG_DATA0, 2);
+
+	if (rc) {
+		pr_err("fail to read VSENSE_AVG rc = %d\n", rc);
+		return rc;
+	}
+
+	*result_uv = convert_vsense_to_uv(chip, reading);
+	return 0;
+}
+
+static int get_battery_current(struct qpnp_bms_chip *chip, int *result_ua)
+{
+	int vsense_uv = 0;
+
+	if (chip->r_sense_mohm == 0) {
+		pr_err("r_sense is zero\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&chip->bms_output_lock);
+	lock_output_data(chip);
+	read_vsense_avg(chip, &vsense_uv);
+	unlock_output_data(chip);
+	mutex_unlock(&chip->bms_output_lock);
+
+	pr_debug("vsense_uv=%duV\n", vsense_uv);
+	/* cast for signed division */
+	*result_ua = vsense_uv * 1000 / (int)chip->r_sense_mohm;
+	pr_debug("ibat=%duA\n", *result_ua);
+	return 0;
+}
+
+static int get_battery_voltage(int *result_uv)
+{
+	int rc;
+	struct qpnp_vadc_result adc_result;
+
+	rc = qpnp_vadc_read(VBAT_SNS, &adc_result);
+	if (rc) {
+		pr_err("error reading adc channel = %d, rc = %d\n",
+					VBAT_SNS, rc);
+		return rc;
+	}
+	pr_debug("mvolts phy = %lld meas = 0x%llx\n", adc_result.physical,
+						adc_result.measurement);
+	*result_uv = (int)adc_result.physical;
+	return 0;
+}
+
+#define CC_36_BIT_MASK 0xFFFFFFFFFLL
+
+static int read_cc_raw(struct qpnp_bms_chip *chip, int64_t *reading)
+{
+	int64_t raw_reading;
+	int rc;
+
+	rc = qpnp_read_wrapper(chip, (u8 *)&raw_reading,
+			chip->base + BMS1_CC_DATA0, 5);
+	if (rc) {
+		pr_err("Error reading cc: rc = %d\n", rc);
+		return -ENXIO;
+	}
+
+	raw_reading = raw_reading & CC_36_BIT_MASK;
+	/* convert 36 bit signed value into 64 signed value */
+	*reading = (raw_reading >> 35) == 0LL ?
+		raw_reading : ((-1LL ^ CC_36_BIT_MASK) | raw_reading);
+	pr_debug("before conversion: %llx, after conversion: %llx\n",
+			raw_reading, *reading);
 
 	return 0;
 }
 
+static int calib_vadc(struct qpnp_bms_chip *chip)
+{
+	int rc;
+	struct qpnp_vadc_result result;
+
+	rc = qpnp_vadc_read(REF_625MV, &result);
+	if (rc) {
+		pr_debug("vadc read failed with rc = %d\n", rc);
+		return rc;
+	}
+	chip->vadc_v0625 = result.physical;
+
+	rc = qpnp_vadc_read(REF_125V, &result);
+	if (rc) {
+		pr_debug("vadc read failed with rc = %d\n", rc);
+		return rc;
+	}
+	chip->vadc_v1250 = result.physical;
+	pr_debug("vadc calib: 0625 = %d, 1250 = %d\n",
+			chip->vadc_v0625, chip->vadc_v1250);
+	return 0;
+}
+
+static void convert_and_store_ocv(struct qpnp_bms_chip *chip,
+				struct raw_soc_params *raw)
+{
+	int rc;
+
+	pr_debug("prev_last_good_ocv_raw = %d, last_good_ocv_raw = %d\n",
+			chip->prev_last_good_ocv_raw,
+			raw->last_good_ocv_raw);
+	rc = calib_vadc(chip);
+	if (rc)
+		pr_err("Vadc reference voltage read failed, rc = %d\n", rc);
+	chip->prev_last_good_ocv_raw = raw->last_good_ocv_raw;
+	raw->last_good_ocv_uv = convert_vbatt_raw_to_uv(chip,
+					raw->last_good_ocv_raw);
+	chip->last_ocv_uv = raw->last_good_ocv_uv;
+	pr_debug("last_good_ocv_uv = %d\n", raw->last_good_ocv_uv);
+}
+
+static int read_soc_params_raw(struct qpnp_bms_chip *chip,
+				struct raw_soc_params *raw)
+{
+	int rc;
+
+	mutex_lock(&chip->bms_output_lock);
+	lock_output_data(chip);
+
+	rc = qpnp_read_wrapper(chip, (u8 *)&raw->last_good_ocv_raw,
+			chip->base + BMS1_OCV_FOR_SOC_DATA0, 2);
+	if (rc) {
+		pr_err("Error reading ocv: rc = %d\n", rc);
+		return -ENXIO;
+	}
+
+	rc = read_cc_raw(chip, &raw->cc);
+	if (rc) {
+		pr_err("Failed to read raw cc data, rc = %d\n", rc);
+		return rc;
+	}
+
+	unlock_output_data(chip);
+	mutex_unlock(&chip->bms_output_lock);
+
+	if (chip->prev_last_good_ocv_raw == 0) {
+		convert_and_store_ocv(chip, raw);
+		pr_debug("PON_OCV_UV = %d\n", chip->last_ocv_uv);
+	} else if (chip->prev_last_good_ocv_raw != raw->last_good_ocv_raw) {
+		convert_and_store_ocv(chip, raw);
+		/* forget the old cc value upon ocv */
+		chip->last_cc_uah = 0;
+	} else {
+		raw->last_good_ocv_uv = chip->last_ocv_uv;
+	}
+
+	/* fake a high OCV if done charging */
+	if (chip->ocv_reading_at_100 != raw->last_good_ocv_raw) {
+		chip->ocv_reading_at_100 = 0;
+		chip->cc_reading_at_100 = 0;
+	} else {
+		/*
+		 * force 100% ocv by selecting the highest voltage the
+		 * battery could ever reach
+		 */
+		raw->last_good_ocv_uv = chip->max_voltage_uv;
+		chip->last_ocv_uv = chip->max_voltage_uv;
+	}
+	pr_debug("last_good_ocv_raw= 0x%x, last_good_ocv_uv= %duV\n",
+			raw->last_good_ocv_raw, raw->last_good_ocv_uv);
+	pr_debug("cc_raw= 0x%llx\n", raw->cc);
+	return 0;
+}
+
+static int calculate_pc(struct qpnp_bms_chip *chip, int ocv_uv,
+							int batt_temp)
+{
+	int pc;
+
+	pc = interpolate_pc(chip->pc_temp_ocv_lut,
+			batt_temp / 10, ocv_uv / 1000);
+	pr_debug("pc = %u %% for ocv = %d uv batt_temp = %d\n",
+					pc, ocv_uv, batt_temp);
+	/* Multiply the initial FCC value by the scale factor. */
+	return pc;
+}
+
+static int calculate_fcc(struct qpnp_bms_chip *chip, int batt_temp)
+{
+	int fcc_uah;
+
+	if (chip->adjusted_fcc_temp_lut == NULL) {
+		/* interpolate_fcc returns a mv value. */
+		fcc_uah = interpolate_fcc(chip->fcc_temp_lut,
+						batt_temp) * 1000;
+		pr_debug("fcc = %d uAh\n", fcc_uah);
+		return fcc_uah;
+	} else {
+		return 1000 * interpolate_fcc(chip->adjusted_fcc_temp_lut,
+				batt_temp);
+	}
+}
+
+/* calculate remaining charge at the time of ocv */
+static int calculate_ocv_charge(struct qpnp_bms_chip *chip,
+						struct raw_soc_params *raw,
+						int fcc_uah,
+						int batt_temp)
+{
+	int  ocv_uv, pc;
+
+	ocv_uv = raw->last_good_ocv_uv;
+	pc = calculate_pc(chip, ocv_uv, batt_temp);
+	pr_debug("ocv_uv = %d pc = %d\n", ocv_uv, pc);
+	return (fcc_uah * pc) / 100;
+}
+
+#define CC_RESOLUTION_N		542535
+#define CC_RESOLUTION_D		100000
+
+static s64 cc_to_uv(s64 cc)
+{
+	return div_s64(cc * CC_RESOLUTION_N, CC_RESOLUTION_D);
+}
+
+#define CC_READING_TICKS	56
+#define SLEEP_CLK_HZ		32764
+#define SECONDS_PER_HOUR	3600
+
+static s64 cc_uv_to_nvh(s64 cc_uv)
+{
+	return div_s64(cc_uv * CC_READING_TICKS * 1000,
+			SLEEP_CLK_HZ * SECONDS_PER_HOUR);
+}
+
+/**
+ * calculate_cc-
+ * @chip:		the bms chip pointer
+ * @cc:			the cc reading from bms h/w
+ * @val:		return value
+ * @coulomb_counter:	adjusted coulomb counter for 100%
+ *
+ * RETURNS: in val pointer coulomb counter based charger in uAh
+ *          (micro Amp hour)
+ */
+static int calculate_cc(struct qpnp_bms_chip *chip, int64_t cc)
+{
+	int64_t cc_voltage_uv, cc_nvh, cc_uah;
+	struct qpnp_iadc_calib calibration;
+
+	qpnp_iadc_get_gain_and_offset(&calibration);
+	cc_voltage_uv = cc;
+	cc_voltage_uv -= chip->cc_reading_at_100;
+	pr_debug("cc = %lld. after subtracting 0x%llx cc = %lld\n",
+					cc, chip->cc_reading_at_100,
+					cc_voltage_uv);
+	cc_voltage_uv = cc_to_uv(cc_voltage_uv);
+	cc_voltage_uv = cc_adjust_for_gain(cc_voltage_uv, calibration.gain_raw);
+	pr_debug("cc_voltage_uv = %lld uv\n", cc_voltage_uv);
+	cc_nvh = cc_uv_to_nvh(cc_voltage_uv);
+	pr_debug("cc_nvh = %lld nano_volt_hour\n", cc_nvh);
+	cc_uah = div_s64(cc_nvh, chip->r_sense_mohm);
+	/* cc_raw had 4 bits of extra precision.
+	   By now it should be within 32 bit range */
+	return (int)cc_uah;
+}
+
+static int get_rbatt(struct qpnp_bms_chip *chip,
+					int soc_rbatt_mohm, int batt_temp)
+{
+	int rbatt_mohm, scalefactor;
+
+	rbatt_mohm = chip->default_rbatt_mohm;
+	pr_debug("rbatt before scaling = %d\n", rbatt_mohm);
+	if (chip->rbatt_sf_lut == NULL)  {
+		pr_debug("RBATT = %d\n", rbatt_mohm);
+		return rbatt_mohm;
+	}
+	/* Convert the batt_temp to DegC from deciDegC */
+	batt_temp = batt_temp / 10;
+	scalefactor = interpolate_scalingfactor(chip->rbatt_sf_lut,
+						batt_temp, soc_rbatt_mohm);
+	pr_debug("rbatt sf = %d for batt_temp = %d, soc_rbatt = %d\n",
+				scalefactor, batt_temp, soc_rbatt_mohm);
+	rbatt_mohm = (rbatt_mohm * scalefactor) / 100;
+
+	rbatt_mohm += chip->r_conn_mohm;
+	pr_debug("adding r_conn_mohm = %d rbatt = %d\n",
+				chip->r_conn_mohm, rbatt_mohm);
+
+	pr_debug("RBATT = %d\n", rbatt_mohm);
+	return rbatt_mohm;
+}
+
+static void calculate_iavg(struct qpnp_bms_chip *chip, int cc_uah,
+				int *iavg_ua)
+{
+	int delta_cc_uah, delta_time_s, rc;
+	struct rtc_time tm;
+	struct rtc_device *rtc;
+	unsigned long now_tm_sec = 0;
+
+	rc = 0;
+	/* if anything fails report the previous iavg_ua */
+	*iavg_ua = chip->prev_iavg_ua;
+
+	rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
+	if (rtc == NULL) {
+		pr_err("%s: unable to open rtc device (%s)\n",
+			__FILE__, CONFIG_RTC_HCTOSYS_DEVICE);
+		goto out;
+	}
+
+	rc = rtc_read_time(rtc, &tm);
+	if (rc) {
+		pr_err("Error reading rtc device (%s) : %d\n",
+			CONFIG_RTC_HCTOSYS_DEVICE, rc);
+		goto out;
+	}
+
+	rc = rtc_valid_tm(&tm);
+	if (rc) {
+		pr_err("Invalid RTC time (%s): %d\n",
+			CONFIG_RTC_HCTOSYS_DEVICE, rc);
+		goto out;
+	}
+	rtc_tm_to_time(&tm, &now_tm_sec);
+
+	if (chip->tm_sec == 0) {
+		get_battery_current(chip, iavg_ua);
+		goto out;
+	}
+
+	delta_time_s = (now_tm_sec - chip->tm_sec);
+
+	/* use the previous iavg if called within 15 seconds */
+	if (delta_time_s < 15) {
+		*iavg_ua = chip->prev_iavg_ua;
+		goto out;
+	}
+
+	delta_cc_uah = cc_uah - chip->last_cc_uah;
+
+	*iavg_ua = div_s64((s64)delta_cc_uah * 3600, delta_time_s);
+
+	pr_debug("tm_sec = %ld, now_tm_sec = %ld delta_s = %d delta_cc = %d iavg_ua = %d\n",
+				chip->tm_sec, now_tm_sec,
+				delta_time_s, delta_cc_uah, (int)*iavg_ua);
+
+out:
+	/* remember the iavg */
+	chip->prev_iavg_ua = *iavg_ua;
+
+	/* remember cc_uah */
+	chip->last_cc_uah = cc_uah;
+
+	/* remember this time */
+	chip->tm_sec = now_tm_sec;
+}
+
+static int calculate_termination_uuc(struct qpnp_bms_chip *chip,
+					struct soc_params *params,
+					int batt_temp, int uuc_iavg_ma,
+					int *ret_pc_unusable)
+{
+	int unusable_uv, pc_unusable, uuc_uah;
+	int i = 0;
+	int ocv_mv;
+	int batt_temp_degc = batt_temp / 10;
+	int rbatt_mohm;
+	int delta_uv;
+	int prev_delta_uv = 0;
+	int prev_rbatt_mohm = 0;
+	int uuc_rbatt_mohm;
+
+	for (i = 0; i <= 100; i++) {
+		ocv_mv = interpolate_ocv(chip->pc_temp_ocv_lut,
+				batt_temp_degc, i);
+		rbatt_mohm = get_rbatt(chip, i, batt_temp);
+		unusable_uv = (rbatt_mohm * uuc_iavg_ma)
+							+ (chip->v_cutoff_uv);
+		delta_uv = ocv_mv * 1000 - unusable_uv;
+
+		pr_debug("soc = %d ocv = %d rbat = %d u_uv = %d delta_v = %d\n",
+				i, ocv_mv, rbatt_mohm, unusable_uv, delta_uv);
+
+		if (delta_uv > 0)
+			break;
+
+		prev_delta_uv = delta_uv;
+		prev_rbatt_mohm = rbatt_mohm;
+	}
+
+	uuc_rbatt_mohm = linear_interpolate(rbatt_mohm, delta_uv,
+					prev_rbatt_mohm, prev_delta_uv,
+					0);
+
+	unusable_uv = (uuc_rbatt_mohm * uuc_iavg_ma) + (chip->v_cutoff_uv);
+
+	pc_unusable = calculate_pc(chip, unusable_uv, batt_temp);
+	uuc_uah = (params->fcc_uah * pc_unusable) / 100;
+	pr_debug("For uuc_iavg_ma = %d, unusable_rbatt = %d unusable_uv = %d unusable_pc = %d uuc = %d\n",
+					uuc_iavg_ma,
+					uuc_rbatt_mohm, unusable_uv,
+					pc_unusable, uuc_uah);
+	*ret_pc_unusable = pc_unusable;
+	return uuc_uah;
+}
+
+static int adjust_uuc(struct qpnp_bms_chip *chip,
+			struct soc_params *params,
+			int new_pc_unusable,
+			int new_uuc_uah,
+			int batt_temp)
+{
+	int new_unusable_mv, new_iavg_ma;
+	int batt_temp_degc = batt_temp / 10;
+
+	if (chip->prev_pc_unusable == -EINVAL
+		|| abs(chip->prev_pc_unusable - new_pc_unusable) <= 1) {
+		chip->prev_pc_unusable = new_pc_unusable;
+		return new_uuc_uah;
+	}
+
+	/* the uuc is trying to change more than 1% restrict it */
+	if (new_pc_unusable > chip->prev_pc_unusable)
+		chip->prev_pc_unusable++;
+	else
+		chip->prev_pc_unusable--;
+
+	new_uuc_uah = (params->fcc_uah * chip->prev_pc_unusable) / 100;
+
+	/* also find update the iavg_ma accordingly */
+	new_unusable_mv = interpolate_ocv(chip->pc_temp_ocv_lut,
+			batt_temp_degc, chip->prev_pc_unusable);
+	if (new_unusable_mv < chip->v_cutoff_uv/1000)
+		new_unusable_mv = chip->v_cutoff_uv/1000;
+
+	new_iavg_ma = (new_unusable_mv * 1000 - chip->v_cutoff_uv)
+						/ params->rbatt;
+	if (new_iavg_ma == 0)
+		new_iavg_ma = 1;
+	chip->prev_uuc_iavg_ma = new_iavg_ma;
+	pr_debug("Restricting UUC to %d (%d%%) unusable_mv = %d iavg_ma = %d\n",
+					new_uuc_uah, chip->prev_pc_unusable,
+					new_unusable_mv, new_iavg_ma);
+
+	return new_uuc_uah;
+}
+
+#define CHARGING_IAVG_MA 250
+#define MIN_SECONDS_FOR_VALID_SAMPLE	20
+static int calculate_unusable_charge_uah(struct qpnp_bms_chip *chip,
+					struct soc_params *params,
+					int batt_temp)
+{
+	int uuc_uah_iavg;
+	int i;
+	int uuc_iavg_ma = params->iavg_ua / 1000;
+	int pc_unusable;
+
+	/*
+	 * if called first time, fill all the samples with
+	 * the shutdown_iavg_ma
+	 */
+	if (chip->first_time_calc_uuc && chip->shutdown_iavg_ma != 0) {
+		pr_debug("Using shutdown_iavg_ma = %d in all samples\n",
+				chip->shutdown_iavg_ma);
+		for (i = 0; i < IAVG_SAMPLES; i++)
+			chip->iavg_samples_ma[i] = chip->shutdown_iavg_ma;
+
+		chip->iavg_index = 0;
+		chip->iavg_num_samples = IAVG_SAMPLES;
+	}
+
+	/*
+	 * if charging use a nominal avg current to keep
+	 * a reasonable UUC while charging
+	 */
+	if (uuc_iavg_ma < 0)
+		uuc_iavg_ma = CHARGING_IAVG_MA;
+	chip->iavg_samples_ma[chip->iavg_index] = uuc_iavg_ma;
+	chip->iavg_index = (chip->iavg_index + 1) % IAVG_SAMPLES;
+	chip->iavg_num_samples++;
+	if (chip->iavg_num_samples >= IAVG_SAMPLES)
+		chip->iavg_num_samples = IAVG_SAMPLES;
+
+	/* now that this sample is added calcualte the average */
+	uuc_iavg_ma = 0;
+	if (chip->iavg_num_samples != 0) {
+		for (i = 0; i < chip->iavg_num_samples; i++) {
+			pr_debug("iavg_samples_ma[%d] = %d\n", i,
+					chip->iavg_samples_ma[i]);
+			uuc_iavg_ma += chip->iavg_samples_ma[i];
+		}
+
+		uuc_iavg_ma = DIV_ROUND_CLOSEST(uuc_iavg_ma,
+						chip->iavg_num_samples);
+	}
+
+	uuc_uah_iavg = calculate_termination_uuc(chip, params, uuc_iavg_ma,
+						batt_temp, &pc_unusable);
+	pr_debug("uuc_iavg_ma = %d uuc with iavg = %d\n",
+						uuc_iavg_ma, uuc_uah_iavg);
+
+	chip->prev_uuc_iavg_ma = uuc_iavg_ma;
+	/* restrict the uuc such that it can increase only by one percent */
+	uuc_uah_iavg = adjust_uuc(chip, params, pc_unusable,
+					uuc_uah_iavg, batt_temp);
+
+	chip->first_time_calc_uuc = 0;
+	return uuc_uah_iavg;
+}
+
+static void find_ocv_for_soc(struct qpnp_bms_chip *chip,
+				struct soc_params *params,
+				int batt_temp,
+				int shutdown_soc,
+				int *ret_ocv_uv)
+{
+	s64 ocv_charge_uah;
+	int pc, new_pc;
+	int batt_temp_degc = batt_temp / 10;
+	int ocv_uv;
+
+	ocv_charge_uah = (s64)shutdown_soc
+				* (params->fcc_uah - params->uuc_uah);
+	ocv_charge_uah = div_s64(ocv_charge_uah, 100)
+				+ params->cc_uah + params->uuc_uah;
+	pc = DIV_ROUND_CLOSEST((int)ocv_charge_uah * 100, params->fcc_uah);
+	pc = clamp(pc, 0, 100);
+
+	ocv_uv = interpolate_ocv(chip->pc_temp_ocv_lut, batt_temp_degc, pc);
+
+	pr_debug("s_soc = %d, fcc = %d uuc = %d rc = %d, pc = %d, ocv mv = %d\n",
+					shutdown_soc, params->fcc_uah,
+					params->uuc_uah, (int)ocv_charge_uah,
+					pc, ocv_uv);
+	new_pc = interpolate_pc(chip->pc_temp_ocv_lut, batt_temp_degc, ocv_uv);
+	pr_debug("test revlookup pc = %d for ocv = %d\n", new_pc, ocv_uv);
+
+	while (abs(new_pc - pc) > 1) {
+		int delta_mv = 5;
+
+		if (new_pc > pc)
+			delta_mv = -1 * delta_mv;
+
+		ocv_uv = ocv_uv + delta_mv;
+		new_pc = interpolate_pc(chip->pc_temp_ocv_lut,
+				batt_temp_degc, ocv_uv);
+		pr_debug("test revlookup pc = %d for ocv = %d\n",
+				new_pc, ocv_uv);
+	}
+
+	*ret_ocv_uv = ocv_uv * 1000;
+	params->ocv_charge_uah = (int)ocv_charge_uah;
+}
+
+static void calculate_soc_params(struct qpnp_bms_chip *chip,
+						struct raw_soc_params *raw,
+						struct soc_params *params,
+						int batt_temp)
+{
+	int soc_rbatt;
+
+	params->fcc_uah = calculate_fcc(chip, batt_temp);
+	pr_debug("FCC = %uuAh batt_temp = %d\n", params->fcc_uah, batt_temp);
+
+	/* calculate remainging charge */
+	params->ocv_charge_uah = calculate_ocv_charge(
+						chip, raw,
+						params->fcc_uah,
+						batt_temp);
+	pr_debug("ocv_charge_uah = %uuAh\n", params->ocv_charge_uah);
+
+	/* calculate cc micro_volt_hour */
+	params->cc_uah = calculate_cc(chip, raw->cc);
+	pr_debug("cc_uah = %duAh raw->cc = %llx cc = %lld after subtracting %llx\n",
+				params->cc_uah, raw->cc,
+				(int64_t)raw->cc - chip->cc_reading_at_100,
+				chip->cc_reading_at_100);
+
+	soc_rbatt = ((params->ocv_charge_uah - params->cc_uah) * 100)
+							/ params->fcc_uah;
+	if (soc_rbatt < 0)
+		soc_rbatt = 0;
+	params->rbatt = get_rbatt(chip, soc_rbatt, batt_temp);
+
+	calculate_iavg(chip, params->cc_uah, &params->iavg_ua);
+
+	params->uuc_uah = calculate_unusable_charge_uah(chip, params,
+							batt_temp);
+	pr_debug("UUC = %uuAh\n", params->uuc_uah);
+}
+
+static bool is_shutdown_soc_within_limits(struct qpnp_bms_chip *chip, int soc)
+{
+	if (chip->shutdown_soc_invalid) {
+		pr_debug("NOT forcing shutdown soc = %d\n", chip->shutdown_soc);
+		return 0;
+	}
+
+	if (abs(chip->shutdown_soc - soc) > chip->shutdown_soc_valid_limit) {
+		pr_debug("rejecting shutdown soc = %d, soc = %d limit = %d\n",
+			chip->shutdown_soc, soc,
+			chip->shutdown_soc_valid_limit);
+		chip->shutdown_soc_invalid = 1;
+		return 0;
+	}
+
+	return 1;
+}
+
+#define BMS_OVERRIDE_MODE_EN_BIT	BIT(7)
+#define EN_VBAT_BIT			BIT(0)
+#define OVERRIDE_MODE_DELAY_MS		20
+static int override_mode_batt_v_and_i(
+		struct qpnp_bms_chip *chip, int *ibat_ua, int *vbat_uv)
+{
+	int16_t vsense_raw, vbat_raw;
+	int vsense_uv, rc;
+	u8 delay;
+
+	mutex_lock(&chip->bms_output_lock);
+
+	delay = 0x00;
+	rc = qpnp_write_wrapper(chip, &delay,
+			chip->base + BMS1_S1_DELAY_CTL, 1);
+	if (rc)
+		pr_err("unable to write into BMS1_S1_DELAY, rc: %d\n", rc);
+
+	rc = qpnp_masked_write(chip, BMS1_MODE_CTL,
+			BMS_OVERRIDE_MODE_EN_BIT | EN_VBAT_BIT,
+			BMS_OVERRIDE_MODE_EN_BIT | EN_VBAT_BIT);
+	if (rc)
+		pr_err("unable to write into BMS1_MODE_CTL, rc: %d\n", rc);
+
+	msleep(OVERRIDE_MODE_DELAY_MS);
+
+	lock_output_data(chip);
+	qpnp_read_wrapper(chip, (u8 *)&vsense_raw,
+			chip->base + BMS1_VSENSE_AVG_DATA0, 2);
+	qpnp_read_wrapper(chip, (u8 *)&vbat_raw,
+			chip->base + BMS1_VBAT_AVG_DATA0, 2);
+	unlock_output_data(chip);
+
+	rc = qpnp_masked_write(chip, BMS1_MODE_CTL,
+			BMS_OVERRIDE_MODE_EN_BIT | EN_VBAT_BIT, 0);
+
+	delay = 0x0B;
+	rc = qpnp_write_wrapper(chip, &delay,
+			chip->base + BMS1_S1_DELAY_CTL, 1);
+	if (rc)
+		pr_err("unable to write into BMS1_S1_DELAY, rc: %d\n", rc);
+
+	mutex_unlock(&chip->bms_output_lock);
+
+	*vbat_uv = convert_vbatt_raw_to_uv(chip, vbat_raw);
+	vsense_uv = convert_vsense_to_uv(chip, vsense_raw);
+	*ibat_ua = vsense_uv * 1000 / (int)chip->r_sense_mohm;
+
+	pr_debug("vsense_raw = 0x%x vbat_raw = 0x%x ibat_ua = %d vbat_uv = %d\n",
+			(uint16_t)vsense_raw, (uint16_t)vbat_raw,
+			*ibat_ua, *vbat_uv);
+	return 0;
+}
+
+static int get_simultaneous_batt_v_and_i(
+						struct qpnp_bms_chip *chip,
+						int *ibat_ua, int *vbat_uv)
+{
+	int rc;
+	union power_supply_propval ret = {0,};
+
+	if (chip->batt_psy == NULL)
+		chip->batt_psy = power_supply_get_by_name("battery");
+	if (chip->batt_psy) {
+		/* if battery has been registered, use the status property */
+		chip->batt_psy->get_property(chip->batt_psy,
+					POWER_SUPPLY_PROP_STATUS, &ret);
+	} else {
+		/* default to using separate vbat/ibat if unregistered */
+		ret.intval = POWER_SUPPLY_STATUS_FULL;
+	}
+
+	if (ret.intval == POWER_SUPPLY_STATUS_FULL) {
+		pr_debug("batfet is open using separate vbat and ibat meas\n");
+		rc = get_battery_voltage(vbat_uv);
+		if (rc < 0) {
+			pr_err("adc vbat failed err = %d\n", rc);
+			return rc;
+		}
+		rc = get_battery_current(chip, ibat_ua);
+		if (rc < 0) {
+			pr_err("bms ibat failed err = %d\n", rc);
+			return rc;
+		}
+	} else {
+		return override_mode_batt_v_and_i(chip, ibat_ua, vbat_uv);
+	}
+
+	return 0;
+}
+
+static int bound_soc(int soc)
+{
+	soc = max(0, soc);
+	soc = min(100, soc);
+	return soc;
+}
+
+static int charging_adjustments(struct qpnp_bms_chip *chip,
+				struct soc_params *params, int soc,
+				int vbat_uv, int ibat_ua, int batt_temp)
+{
+	int chg_soc;
+
+	if (chip->soc_at_cv == -EINVAL) {
+		/* In constant current charging return the calc soc */
+		if (vbat_uv <= chip->max_voltage_uv)
+			pr_debug("CC CHG SOC %d\n", soc);
+
+		/* Note the CC to CV point */
+		if (vbat_uv >= chip->max_voltage_uv) {
+			chip->soc_at_cv = soc;
+			chip->prev_chg_soc = soc;
+			chip->ibat_at_cv_ua = ibat_ua;
+			pr_debug("CC_TO_CV ibat_ua = %d CHG SOC %d\n",
+					ibat_ua, soc);
+		}
+		return soc;
+	}
+
+	/*
+	 * battery is in CV phase - begin liner inerpolation of soc based on
+	 * battery charge current
+	 */
+
+	/*
+	 * if voltage lessened (possibly because of a system load)
+	 * keep reporting the prev chg soc
+	 */
+	if (vbat_uv <= chip->max_voltage_uv) {
+		pr_debug("vbat %d < max = %d CC CHG SOC %d\n",
+			vbat_uv, chip->max_voltage_uv, chip->prev_chg_soc);
+		return chip->prev_chg_soc;
+	}
+
+	chg_soc = linear_interpolate(chip->soc_at_cv, chip->ibat_at_cv_ua,
+					100, -100000,
+					ibat_ua);
+
+	/* always report a higher soc */
+	if (chg_soc > chip->prev_chg_soc) {
+		int new_ocv_uv;
+
+		chip->prev_chg_soc = chg_soc;
+
+		find_ocv_for_soc(chip, params, batt_temp, chg_soc, &new_ocv_uv);
+		chip->last_ocv_uv = new_ocv_uv;
+		pr_debug("CC CHG ADJ OCV = %d CHG SOC %d\n",
+				new_ocv_uv,
+				chip->prev_chg_soc);
+	}
+
+	pr_debug("Reporting CHG SOC %d\n", chip->prev_chg_soc);
+	return chip->prev_chg_soc;
+}
+
+static int adjust_soc(struct qpnp_bms_chip *chip, struct soc_params *params,
+							int soc, int batt_temp)
+{
+	int ibat_ua = 0, vbat_uv = 0;
+	int ocv_est_uv = 0, soc_est = 0, pc_est = 0, pc = 0;
+	int delta_ocv_uv = 0;
+	int n = 0;
+	int rc_new_uah = 0;
+	int pc_new = 0;
+	int soc_new = 0;
+	int slope = 0;
+	int rc = 0;
+	int delta_ocv_uv_limit = 0;
+
+	rc = get_simultaneous_batt_v_and_i(chip, &ibat_ua, &vbat_uv);
+	if (rc < 0) {
+		pr_err("simultaneous vbat ibat failed err = %d\n", rc);
+		goto out;
+	}
+
+	delta_ocv_uv_limit = DIV_ROUND_CLOSEST(ibat_ua, 1000);
+
+	ocv_est_uv = vbat_uv + (ibat_ua * params->rbatt)/1000;
+	pc_est = calculate_pc(chip, ocv_est_uv, batt_temp);
+	soc_est = div_s64((s64)params->fcc_uah * pc_est - params->uuc_uah*100,
+				(s64)params->fcc_uah - params->uuc_uah);
+	soc_est = bound_soc(soc_est);
+
+	if (ibat_ua < 0) {
+		soc = charging_adjustments(chip, params, soc, vbat_uv, ibat_ua,
+				batt_temp);
+		goto out;
+	}
+
+	/*
+	 * do not adjust
+	 * if soc is same as what bms calculated
+	 * if soc_est is between 45 and 25, this is the flat portion of the
+	 * curve where soc_est is not so accurate. We generally don't want to
+	 * adjust when soc_est is inaccurate except for the cases when soc is
+	 * way far off (higher than 50 or lesser than 20).
+	 * Also don't adjust soc if it is above 90 becuase it might be pulled
+	 * low and cause a bad user experience
+	 */
+	if (soc_est == soc
+		|| (is_between(45, chip->adjust_soc_low_threshold, soc_est)
+		&& is_between(50, chip->adjust_soc_low_threshold - 5, soc))
+		|| soc >= 90)
+		goto out;
+
+	if (chip->last_soc_est == -EINVAL)
+		chip->last_soc_est = soc;
+
+	n = min(200, max(1 , soc + soc_est + chip->last_soc_est));
+	chip->last_soc_est = soc_est;
+
+	pc = calculate_pc(chip, chip->last_ocv_uv, batt_temp);
+	if (pc > 0) {
+		pc_new = calculate_pc(chip,
+				chip->last_ocv_uv - (++slope * 1000),
+				batt_temp);
+		while (pc_new == pc) {
+			/* start taking 10mV steps */
+			slope = slope + 10;
+			pc_new = calculate_pc(chip,
+				chip->last_ocv_uv - (slope * 1000),
+				batt_temp);
+		}
+	} else {
+		/*
+		 * pc is already at the lowest point,
+		 * assume 1 millivolt translates to 1% pc
+		 */
+		pc = 1;
+		pc_new = 0;
+		slope = 1;
+	}
+
+	delta_ocv_uv = div_s64((soc - soc_est) * (s64)slope * 1000,
+							n * (pc - pc_new));
+
+	if (abs(delta_ocv_uv) > delta_ocv_uv_limit) {
+		pr_debug("limiting delta ocv %d limit = %d\n", delta_ocv_uv,
+				delta_ocv_uv_limit);
+
+		if (delta_ocv_uv > 0)
+			delta_ocv_uv = delta_ocv_uv_limit;
+		else
+			delta_ocv_uv = -1 * delta_ocv_uv_limit;
+		pr_debug("new delta ocv = %d\n", delta_ocv_uv);
+	}
+
+	chip->last_ocv_uv -= delta_ocv_uv;
+
+	if (chip->last_ocv_uv >= chip->max_voltage_uv)
+		chip->last_ocv_uv = chip->max_voltage_uv;
+
+	/* calculate the soc based on this new ocv */
+	pc_new = calculate_pc(chip, chip->last_ocv_uv, batt_temp);
+	rc_new_uah = (params->fcc_uah * pc_new) / 100;
+	soc_new = (rc_new_uah - params->cc_uah - params->uuc_uah)*100
+					/ (params->fcc_uah - params->uuc_uah);
+	soc_new = bound_soc(soc_new);
+
+	/*
+	 * if soc_new is ZERO force it higher so that phone doesnt report soc=0
+	 * soc = 0 should happen only when soc_est == 0
+	 */
+	if (soc_new == 0 && soc_est != 0)
+		soc_new = 1;
+
+	soc = soc_new;
+
+out:
+	pr_debug("ibat_ua = %d, vbat_uv = %d, ocv_est_uv = %d, pc_est = %d, soc_est = %d, n = %d, delta_ocv_uv = %d, last_ocv_uv = %d, pc_new = %d, soc_new = %d, rbatt = %d, slope = %d\n",
+		ibat_ua, vbat_uv, ocv_est_uv, pc_est,
+		soc_est, n, delta_ocv_uv, chip->last_ocv_uv,
+		pc_new, soc_new, params->rbatt, slope);
+
+	return soc;
+}
+
+static int calculate_state_of_charge(struct qpnp_bms_chip *chip,
+					struct raw_soc_params *raw,
+					int batt_temp)
+{
+	int soc, new_ocv_uv;
+	int shutdown_soc, new_calculated_soc, remaining_usable_charge_uah;
+	struct soc_params params;
+
+	calculate_soc_params(chip, raw, &params, batt_temp);
+	/* calculate remaining usable charge */
+	remaining_usable_charge_uah = params.ocv_charge_uah
+					- params.cc_uah
+					- params.uuc_uah;
+
+	pr_debug("RUC = %duAh\n", remaining_usable_charge_uah);
+	if (params.fcc_uah - params.uuc_uah <= 0) {
+		pr_warn("FCC = %duAh, UUC = %duAh forcing soc = 0\n",
+						params.fcc_uah,
+						params.uuc_uah);
+		soc = 0;
+	} else {
+		soc = DIV_ROUND_CLOSEST((remaining_usable_charge_uah * 100),
+					(params.fcc_uah
+						- params.uuc_uah));
+	}
+
+	if (chip->first_time_calc_soc && soc < 0) {
+		/*
+		 * first time calcualtion and the pon ocv  is too low resulting
+		 * in a bad soc. Adjust ocv to get 0 soc
+		 */
+		pr_debug("soc is %d, adjusting pon ocv to make it 0\n", soc);
+		find_ocv_for_soc(chip, &params, batt_temp, 0, &new_ocv_uv);
+		chip->last_ocv_uv = new_ocv_uv;
+
+		remaining_usable_charge_uah = params.ocv_charge_uah
+					- params.cc_uah
+					- params.uuc_uah;
+
+		soc = DIV_ROUND_CLOSEST((remaining_usable_charge_uah * 100),
+					(params.fcc_uah
+						- params.uuc_uah));
+		pr_debug("DONE for O soc is %d, pon ocv adjusted to %duV\n",
+				soc, chip->last_ocv_uv);
+	}
+
+	if (soc > 100)
+		soc = 100;
+
+	if (soc < 0) {
+		pr_err("bad rem_usb_chg = %d rem_chg %d, cc_uah %d, unusb_chg %d\n",
+				remaining_usable_charge_uah,
+				params.ocv_charge_uah,
+				params.cc_uah, params.uuc_uah);
+
+		pr_err("for bad rem_usb_chg last_ocv_uv = %d batt_temp = %d fcc = %d soc =%d\n",
+				chip->last_ocv_uv, batt_temp,
+				params.fcc_uah, soc);
+		soc = 0;
+	}
+
+	mutex_lock(&chip->soc_invalidation_mutex);
+	shutdown_soc = chip->shutdown_soc;
+
+	if (chip->first_time_calc_soc && soc != shutdown_soc
+			&& is_shutdown_soc_within_limits(chip, soc)) {
+		/*
+		 * soc for the first time - use shutdown soc
+		 * to adjust pon ocv since it is a small percent away from
+		 * the real soc
+		 */
+		pr_debug("soc = %d before forcing shutdown_soc = %d\n",
+							soc, shutdown_soc);
+		find_ocv_for_soc(chip, &params, batt_temp,
+					shutdown_soc, &new_ocv_uv);
+		chip->pon_ocv_uv = chip->last_ocv_uv;
+		chip->last_ocv_uv = new_ocv_uv;
+
+		remaining_usable_charge_uah = params.ocv_charge_uah
+					- params.cc_uah
+					- params.uuc_uah;
+
+		soc = DIV_ROUND_CLOSEST((remaining_usable_charge_uah * 100),
+					(params.fcc_uah
+						- params.uuc_uah));
+
+		pr_debug("DONE for shutdown_soc = %d soc is %d, adjusted ocv to %duV\n",
+				shutdown_soc, soc, chip->last_ocv_uv);
+	}
+	mutex_unlock(&chip->soc_invalidation_mutex);
+
+	pr_debug("SOC before adjustment = %d\n", soc);
+	new_calculated_soc = adjust_soc(chip, &params, soc, batt_temp);
+
+	if (new_calculated_soc != chip->calculated_soc
+			&& chip->bms_psy.name != NULL) {
+		power_supply_changed(&chip->bms_psy);
+		pr_debug("power supply changed\n");
+	}
+
+	chip->calculated_soc = new_calculated_soc;
+	pr_debug("Set calculated SOC = %d\n", chip->calculated_soc);
+	chip->first_time_calc_soc = 0;
+	return chip->calculated_soc;
+}
+
+static void read_vbat(struct qpnp_bms_chip *chip)
+{
+	int rc;
+	struct qpnp_vadc_result result;
+
+	rc = qpnp_vadc_read(VBAT_SNS, &result);
+	if (rc) {
+		pr_err("error reading vadc VBAT_SNS = %d, rc = %d\n",
+					VBAT_SNS, rc);
+		return;
+	}
+	chip->last_vbat_read_uv = (int)result.physical;
+}
+
+static void calculate_soc_work(struct work_struct *work)
+{
+	struct qpnp_bms_chip *chip = container_of(work,
+				struct qpnp_bms_chip,
+				calculate_soc_delayed_work.work);
+	int batt_temp, rc, soc;
+	struct qpnp_vadc_result result;
+	struct raw_soc_params raw;
+
+	read_vbat(chip);
+
+	rc = qpnp_vadc_read(LR_MUX1_BATT_THERM, &result);
+	if (rc) {
+		pr_err("error reading vadc LR_MUX1_BATT_THERM = %d, rc = %d\n",
+					LR_MUX1_BATT_THERM, rc);
+		return;
+	}
+	pr_debug("batt_temp phy = %lld meas = 0x%llx\n", result.physical,
+						result.measurement);
+	batt_temp = (int)result.physical;
+
+	mutex_lock(&chip->last_ocv_uv_mutex);
+	read_soc_params_raw(chip, &raw);
+	soc = calculate_state_of_charge(chip, &raw, batt_temp);
+	mutex_unlock(&chip->last_ocv_uv_mutex);
+
+	if (soc < chip->low_soc_calc_threshold)
+		schedule_delayed_work(&chip->calculate_soc_delayed_work,
+			round_jiffies_relative(msecs_to_jiffies
+			(chip->low_soc_calculate_soc_ms)));
+	else
+		schedule_delayed_work(&chip->calculate_soc_delayed_work,
+			round_jiffies_relative(msecs_to_jiffies
+			(chip->calculate_soc_ms)));
+}
+
+static void backup_soc_and_iavg(struct qpnp_bms_chip *chip, int batt_temp,
+				int soc)
+{
+	u8 temp;
+	int rc;
+	int iavg_ma = chip->prev_uuc_iavg_ma;
+
+	if (iavg_ma > IAVG_START)
+		temp = (iavg_ma - IAVG_START) / IAVG_STEP_SIZE_MA;
+	else
+		temp = 0;
+
+	rc = qpnp_write_wrapper(chip, &temp,
+			chip->base + IAVG_STORAGE_REG, 1);
+
+	if (soc == 0)
+		temp = SOC_ZERO;
+	else
+		temp = soc;
+
+	/* don't store soc if temperature is below 5degC */
+	if (batt_temp > IGNORE_SOC_TEMP_DECIDEG)
+		rc = qpnp_write_wrapper(chip, &temp,
+				chip->base + SOC_STORAGE_REG, 1);
+}
+
+#define SOC_CATCHUP_SEC_MAX		600
+#define SOC_CATCHUP_SEC_PER_PERCENT	60
+#define MAX_CATCHUP_SOC	(SOC_CATCHUP_SEC_MAX/SOC_CATCHUP_SEC_PER_PERCENT)
+static int scale_soc_while_chg(struct qpnp_bms_chip *chip,
+				int delta_time_us, int new_soc, int prev_soc)
+{
+	int chg_time_sec;
+	int catch_up_sec;
+	int scaled_soc;
+	int numerator;
+
+	/*
+	 * The device must be charging for reporting a higher soc, if
+	 * not ignore this soc and continue reporting the prev_soc.
+	 * Also don't report a high value immediately slowly scale the
+	 * value from prev_soc to the new soc based on a charge time
+	 * weighted average
+	 */
+
+	/* if not charging, return last soc */
+	if (chip->start_percent == -EINVAL)
+		return prev_soc;
+
+	chg_time_sec = DIV_ROUND_UP(chip->charge_time_us, USEC_PER_SEC);
+	catch_up_sec = DIV_ROUND_UP(chip->catch_up_time_us, USEC_PER_SEC);
+	pr_debug("cts= %d catch_up_sec = %d\n", chg_time_sec, catch_up_sec);
+
+	/*
+	 * if charging for more than catch_up time, simply return
+	 * new soc
+	 */
+	if (chg_time_sec > catch_up_sec)
+		return new_soc;
+
+	numerator = (catch_up_sec - chg_time_sec) * prev_soc
+			+ chg_time_sec * new_soc;
+	scaled_soc = numerator / catch_up_sec;
+
+	pr_debug("cts = %d new_soc = %d prev_soc = %d scaled_soc = %d\n",
+			chg_time_sec, new_soc, prev_soc, scaled_soc);
+
+	return scaled_soc;
+}
+
+/*
+ * bms_fake_battery is set in setups where a battery emulator is used instead
+ * of a real battery. This makes the bms driver report a different/fake value
+ * regardless of the calculated state of charge.
+ */
+static int bms_fake_battery = -EINVAL;
+module_param(bms_fake_battery, int, 0644);
+
+static int report_state_of_charge(struct qpnp_bms_chip *chip)
+{
+	int soc;
+	int delta_time_us;
+	struct timespec now;
+	struct qpnp_vadc_result result;
+	int batt_temp;
+	int rc;
+
+	if (bms_fake_battery != -EINVAL) {
+		pr_debug("Returning Fake SOC = %d%%\n", bms_fake_battery);
+		return bms_fake_battery;
+	}
+
+	soc = chip->calculated_soc;
+
+	rc = qpnp_vadc_read(LR_MUX1_BATT_THERM, &result);
+
+	if (rc) {
+		pr_err("error reading adc channel = %d, rc = %d\n",
+					LR_MUX1_BATT_THERM, rc);
+		return rc;
+	}
+	pr_debug("batt_temp phy = %lld meas = 0x%llx\n", result.physical,
+						result.measurement);
+	batt_temp = (int)result.physical;
+
+	do_posix_clock_monotonic_gettime(&now);
+	if (chip->t_soc_queried.tv_sec != 0) {
+		delta_time_us
+		= (now.tv_sec - chip->t_soc_queried.tv_sec) * USEC_PER_SEC
+			+ (now.tv_nsec - chip->t_soc_queried.tv_nsec) / 1000;
+	} else {
+		/* calculation for the first time */
+		delta_time_us = 0;
+	}
+
+	/*
+	 * account for charge time - limit it to SOC_CATCHUP_SEC to
+	 * avoid overflows when charging continues for extended periods
+	 */
+	if (chip->start_percent != -EINVAL) {
+		if (chip->charge_time_us == 0) {
+			/*
+			 * calculating soc for the first time
+			 * after start of chg. Initialize catchup time
+			 */
+			if (abs(soc - chip->last_soc) < MAX_CATCHUP_SOC)
+				chip->catch_up_time_us =
+				(soc - chip->last_soc)
+					* SOC_CATCHUP_SEC_PER_PERCENT
+					* USEC_PER_SEC;
+			else
+				chip->catch_up_time_us =
+				SOC_CATCHUP_SEC_MAX * USEC_PER_SEC;
+
+			if (chip->catch_up_time_us < 0)
+				chip->catch_up_time_us = 0;
+		}
+
+		/* add charge time */
+		if (chip->charge_time_us < SOC_CATCHUP_SEC_MAX * USEC_PER_SEC)
+			chip->charge_time_us += delta_time_us;
+
+		/* end catchup if calculated soc and last soc are same */
+		if (chip->last_soc == soc)
+			chip->catch_up_time_us = 0;
+	}
+
+	/* last_soc < soc  ... scale and catch up */
+	if (chip->last_soc != -EINVAL && chip->last_soc < soc && soc != 100)
+		soc = scale_soc_while_chg(chip, delta_time_us,
+						soc, chip->last_soc);
+
+	pr_debug("last_soc = %d, calculated_soc = %d, soc = %d\n",
+			chip->last_soc, chip->calculated_soc, soc);
+	chip->last_soc = soc;
+	backup_soc_and_iavg(chip, batt_temp, chip->last_soc);
+	pr_debug("Reported SOC = %d\n", chip->last_soc);
+	chip->t_soc_queried = now;
+
+	return chip->last_soc;
+}
+
+static int calculate_soc_from_voltage(struct qpnp_bms_chip *chip)
+{
+	int voltage_range_uv, voltage_remaining_uv, voltage_based_soc;
+
+	if (chip->last_vbat_read_uv < 0)
+		read_vbat(chip);
+
+	voltage_range_uv = chip->max_voltage_uv - chip->v_cutoff_uv;
+	voltage_remaining_uv = chip->last_vbat_read_uv - chip->v_cutoff_uv;
+	voltage_based_soc = voltage_remaining_uv * 100 / voltage_range_uv;
+
+	return clamp(voltage_based_soc, 0, 100);
+}
+
 /* Returns capacity as a SoC percentage between 0 and 100 */
 static int get_prop_bms_capacity(struct qpnp_bms_chip *chip)
 {
-	/* return 50 until a real algorithm is implemented */
-	return 50;
+	if (use_voltage_soc)
+		return calculate_soc_from_voltage(chip);
+	else
+		return report_state_of_charge(chip);
 }
 
 /* Returns instantaneous current in uA */
 static int get_prop_bms_current_now(struct qpnp_bms_chip *chip)
 {
 	/* temporarily return 0 until a real algorithm is put in */
-	return 0;
+	int rc, result_ua;
+
+	rc = get_battery_current(chip, &result_ua);
+	if (rc) {
+		pr_err("failed to get current: %d\n", rc);
+		return rc;
+	}
+	return result_ua;
 }
 
 /* Returns full charge design in uAh */
 static int get_prop_bms_charge_full_design(struct qpnp_bms_chip *chip)
 {
-	/* temporarily return 0 until a real algorithm is put in */
-	return 0;
+	return chip->fcc;
+}
+
+static bool get_prop_bms_online(struct qpnp_bms_chip *chip)
+{
+	return chip->online;
+}
+
+static int get_prop_bms_status(struct qpnp_bms_chip *chip)
+{
+	return chip->charger_status;
 }
 
 static void set_prop_bms_online(struct qpnp_bms_chip *chip, bool online)
@@ -184,6 +1682,12 @@
 	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
 		val->intval = get_prop_bms_charge_full_design(chip);
 		break;
+	case POWER_SUPPLY_PROP_STATUS:
+		val->intval = get_prop_bms_status(chip);
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		val->intval = get_prop_bms_online(chip);
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -210,24 +1714,206 @@
 	return 0;
 }
 
-#define SPMI_PROPERTY_READ(chip_prop, qpnp_spmi_property, retval, errlabel)\
+static void read_shutdown_soc_and_iavg(struct qpnp_bms_chip *chip)
+{
+	int rc;
+	u8 temp;
+
+	if (chip->ignore_shutdown_soc) {
+		chip->shutdown_soc_invalid = 1;
+		chip->shutdown_soc = 0;
+		chip->shutdown_iavg_ma = 0;
+	} else {
+		rc = qpnp_read_wrapper(chip, &temp,
+				chip->base + IAVG_STORAGE_REG, 1);
+		if (rc) {
+			pr_err("failed to read addr = %d %d assuming %d\n",
+					chip->base + IAVG_STORAGE_REG, rc,
+					IAVG_START);
+			chip->shutdown_iavg_ma = IAVG_START;
+		} else {
+			if (temp == 0) {
+				chip->shutdown_iavg_ma = IAVG_START;
+			} else {
+				chip->shutdown_iavg_ma = IAVG_START
+					+ IAVG_STEP_SIZE_MA * (temp + 1);
+			}
+		}
+
+		rc = qpnp_read_wrapper(chip, &temp,
+				chip->base + SOC_STORAGE_REG, 1);
+		if (rc) {
+			pr_err("failed to read addr = %d %d\n",
+					chip->base + SOC_STORAGE_REG, rc);
+		} else {
+			chip->shutdown_soc = temp;
+
+			if (chip->shutdown_soc == 0) {
+				pr_debug("No shutdown soc available\n");
+				chip->shutdown_soc_invalid = 1;
+				chip->shutdown_iavg_ma = 0;
+			} else if (chip->shutdown_soc == SOC_ZERO) {
+				chip->shutdown_soc = 0;
+			}
+		}
+	}
+
+	pr_debug("shutdown_soc = %d shutdown_iavg = %d shutdown_soc_invalid = %d\n",
+			chip->shutdown_soc,
+			chip->shutdown_iavg_ma,
+			chip->shutdown_soc_invalid);
+}
+
+#define PALLADIUM_ID_MIN	0x7F40
+#define PALLADIUM_ID_MAX	0x7F5A
+#define DESAY_5200_ID_MIN	0x7F7F
+#define DESAY_5200_ID_MAX	0x802F
+static int32_t read_battery_id(struct qpnp_bms_chip *chip)
+{
+	int rc;
+	struct qpnp_vadc_result result;
+
+	rc = qpnp_vadc_read(LR_MUX2_BAT_ID, &result);
+	if (rc) {
+		pr_err("error reading batt id channel = %d, rc = %d\n",
+					LR_MUX2_BAT_ID, rc);
+		return rc;
+	}
+	pr_debug("batt_id phy = %lld meas = 0x%llx\n", result.physical,
+						result.measurement);
+	pr_debug("raw_code = 0x%x\n", result.adc_code);
+	return result.adc_code;
+}
+
+static int set_battery_data(struct qpnp_bms_chip *chip)
+{
+	int64_t battery_id;
+
+	if (chip->batt_type == BATT_DESAY)
+		goto desay;
+	else if (chip->batt_type == BATT_PALLADIUM)
+		goto palladium;
+
+	battery_id = read_battery_id(chip);
+	if (battery_id < 0) {
+		pr_err("cannot read battery id err = %lld\n", battery_id);
+		return battery_id;
+	}
+
+	if (is_between(PALLADIUM_ID_MIN, PALLADIUM_ID_MAX, battery_id)) {
+		goto palladium;
+	} else if (is_between(DESAY_5200_ID_MIN, DESAY_5200_ID_MAX,
+				battery_id)) {
+		goto desay;
+	} else {
+		pr_warn("invalid battid, palladium 1500 assumed batt_id %llx\n",
+				battery_id);
+		goto palladium;
+	}
+
+palladium:
+		chip->fcc = palladium_1500_data.fcc;
+		chip->fcc_temp_lut = palladium_1500_data.fcc_temp_lut;
+		chip->fcc_sf_lut = palladium_1500_data.fcc_sf_lut;
+		chip->pc_temp_ocv_lut = palladium_1500_data.pc_temp_ocv_lut;
+		chip->pc_sf_lut = palladium_1500_data.pc_sf_lut;
+		chip->rbatt_sf_lut = palladium_1500_data.rbatt_sf_lut;
+		chip->default_rbatt_mohm
+				= palladium_1500_data.default_rbatt_mohm;
+		goto check_lut;
+desay:
+		chip->fcc = desay_5200_data.fcc;
+		chip->fcc_temp_lut = desay_5200_data.fcc_temp_lut;
+		chip->pc_temp_ocv_lut = desay_5200_data.pc_temp_ocv_lut;
+		chip->pc_sf_lut = desay_5200_data.pc_sf_lut;
+		chip->rbatt_sf_lut = desay_5200_data.rbatt_sf_lut;
+		chip->default_rbatt_mohm = desay_5200_data.default_rbatt_mohm;
+		goto check_lut;
+check_lut:
+		if (chip->pc_temp_ocv_lut == NULL) {
+			pr_err("temp ocv lut table is NULL\n");
+			return -EINVAL;
+		}
+		return 0;
+}
+
+#define SPMI_PROP_READ(chip_prop, qpnp_spmi_property, retval)		\
 do {									\
-	retval = of_property_read_u32(spmi->dev.of_node,		\
+	retval = of_property_read_u32(chip->spmi->dev.of_node,		\
 				"qcom,bms-" qpnp_spmi_property,		\
 					&chip->chip_prop);		\
 	if (retval) {							\
 		pr_err("Error reading " #qpnp_spmi_property		\
 						" property %d\n", rc);	\
-		goto errlabel;						\
+		return -EINVAL;						\
 	}								\
 } while (0)
 
+static inline int bms_read_properties(struct qpnp_bms_chip *chip)
+{
+	int rc;
+
+	SPMI_PROP_READ(r_sense_mohm, "r-sense-mohm", rc);
+	SPMI_PROP_READ(v_cutoff_uv, "v-cutoff-uv", rc);
+	SPMI_PROP_READ(max_voltage_uv, "max-voltage-uv", rc);
+	SPMI_PROP_READ(r_conn_mohm, "r-conn-mohm", rc);
+	SPMI_PROP_READ(chg_term_ua, "chg-term-ua", rc);
+	SPMI_PROP_READ(shutdown_soc_valid_limit,
+			"shutdown-soc-valid-limit", rc);
+	SPMI_PROP_READ(adjust_soc_high_threshold,
+			"adjust-soc-high-threshold", rc);
+	SPMI_PROP_READ(adjust_soc_low_threshold,
+			"adjust-soc-low-threshold", rc);
+	SPMI_PROP_READ(batt_type, "batt-type", rc);
+	SPMI_PROP_READ(low_soc_calc_threshold,
+			"low-soc-calculate-soc-threshold", rc);
+	SPMI_PROP_READ(low_soc_calculate_soc_ms,
+			"low-soc-calculate-soc-ms", rc);
+	SPMI_PROP_READ(calculate_soc_ms, "calculate-soc-ms", rc);
+	chip->ignore_shutdown_soc = of_property_read_bool(
+			chip->spmi->dev.of_node,
+			"qcom,bms-ignore-shutdown-soc");
+	use_voltage_soc = of_property_read_bool(chip->spmi->dev.of_node,
+			"qcom,bms-use-voltage-soc");
+
+	if (chip->adjust_soc_low_threshold >= 45)
+		chip->adjust_soc_low_threshold = 45;
+
+	pr_debug("dts data: r_sense_mohm:%d, v_cutoff_uv:%d, max_v:%d\n",
+			chip->r_sense_mohm, chip->v_cutoff_uv,
+			chip->max_voltage_uv);
+	pr_debug("r_conn:%d, shutdown_soc: %d, adjust_soc_low:%d\n",
+			chip->r_conn_mohm, chip->shutdown_soc_valid_limit,
+			chip->adjust_soc_low_threshold);
+	pr_debug("adjust_soc_high:%d, chg_term_ua:%d, batt_type:%d\n",
+			chip->adjust_soc_high_threshold, chip->chg_term_ua,
+			chip->batt_type);
+	pr_debug("ignore_shutdown_soc:%d, use_voltage_soc:%d\n",
+			chip->ignore_shutdown_soc, use_voltage_soc);
+
+	return 0;
+}
+
+static inline void bms_initialize_constants(struct qpnp_bms_chip *chip)
+{
+	chip->start_percent = -EINVAL;
+	chip->end_percent = -EINVAL;
+	chip->prev_pc_unusable = -EINVAL;
+	chip->soc_at_cv = -EINVAL;
+	chip->calculated_soc = -EINVAL;
+	chip->last_soc = -EINVAL;
+	chip->last_vbat_read_uv = -EINVAL;
+	chip->last_soc_est = -EINVAL;
+	chip->first_time_calc_soc = 1;
+	chip->first_time_calc_uuc = 1;
+}
+
 static int __devinit
 qpnp_bms_probe(struct spmi_device *spmi)
 {
 	struct qpnp_bms_chip *chip;
 	struct resource *bms_resource;
-	int rc;
+	int rc, vbatt;
 
 	chip = kzalloc(sizeof *chip, GFP_KERNEL);
 
@@ -239,6 +1925,10 @@
 	chip->dev = &(spmi->dev);
 	chip->spmi = spmi;
 
+	mutex_init(&chip->bms_output_lock);
+	mutex_init(&chip->last_ocv_uv_mutex);
+	mutex_init(&chip->soc_invalidation_mutex);
+
 	bms_resource = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
 	if (!bms_resource) {
 		dev_err(&spmi->dev, "Unable to get BMS base address\n");
@@ -260,29 +1950,30 @@
 		goto error_read;
 	}
 
-	SPMI_PROPERTY_READ(r_sense_mohm, "r-sense-mohm", rc, error_read);
-	SPMI_PROPERTY_READ(v_cutoff, "v-cutoff-uv", rc, error_read);
-	SPMI_PROPERTY_READ(max_voltage, "max-voltage-uv", rc, error_read);
-	SPMI_PROPERTY_READ(r_conn_mohm, "r-conn-mohm", rc, error_read);
-	SPMI_PROPERTY_READ(shutdown_soc_valid_limit,
-			"shutdown-soc-valid-limit", rc, error_read);
-	SPMI_PROPERTY_READ(adjust_soc_low_threshold,
-			"adjust-soc-low-threshold", rc, error_read);
-	SPMI_PROPERTY_READ(adjust_soc_high_threshold,
-			"adjust-soc-high-threshold", rc, error_read);
-	SPMI_PROPERTY_READ(chg_term, "chg-term-ua", rc, error_read);
+	rc = set_battery_data(chip);
+	if (rc) {
+		pr_err("Bad battery data %d\n", rc);
+		goto error_read;
+	}
 
-	pr_debug("dts data: r_sense_mohm:%d, v_cutoff:%d, max_v:%d, r_conn:%d, shutdown_soc: %d, adjust_soc_low:%d, adjust_soc_high:%d, chg_term:%d\n",
-			chip->r_sense_mohm, chip->v_cutoff,
-			chip->max_voltage, chip->r_conn_mohm,
-			chip->shutdown_soc_valid_limit,
-			chip->adjust_soc_low_threshold,
-			chip->adjust_soc_high_threshold,
-			chip->chg_term);
+	rc = bms_read_properties(chip);
+	if (rc) {
+		pr_err("Unable to read all bms properties, rc = %d\n", rc);
+		goto error_read;
+	}
+
+	bms_initialize_constants(chip);
+
+	INIT_DELAYED_WORK(&chip->calculate_soc_delayed_work,
+			calculate_soc_work);
+
+	read_shutdown_soc_and_iavg(chip);
 
 	dev_set_drvdata(&spmi->dev, chip);
 	device_init_wakeup(&spmi->dev, 1);
 
+	calculate_soc_work(&(chip->calculate_soc_delayed_work.work));
+
 	/* setup & register the battery power supply */
 	chip->bms_psy.name = "bms";
 	chip->bms_psy.type = POWER_SUPPLY_TYPE_BMS;
@@ -302,6 +1993,12 @@
 		goto unregister_dc;
 	}
 
+	vbatt = 0;
+	get_battery_voltage(&vbatt);
+
+	pr_info("OK battery_capacity_at_boot=%d vbatt = %d\n",
+				get_prop_bms_capacity(chip),
+				vbatt);
 	pr_info("probe success\n");
 	return 0;
 
diff --git a/drivers/power/smb137c-charger.c b/drivers/power/smb137c-charger.c
index b865bd7..9cdf5b5 100644
--- a/drivers/power/smb137c-charger.c
+++ b/drivers/power/smb137c-charger.c
@@ -992,29 +992,47 @@
 {
 	struct smb137c_chip *chip = container_of(psy, struct smb137c_chip, psy);
 	union power_supply_propval prop = {0,};
+	int scope = POWER_SUPPLY_SCOPE_DEVICE;
+	int current_limit = USB_MIN_CURRENT_UA;
+	int online = 0;
+	int rc;
 
 	mutex_lock(&chip->lock);
 	dev_dbg(&chip->client->dev, "%s: start\n", __func__);
 
-	chip->usb_psy->get_property(chip->usb_psy, POWER_SUPPLY_PROP_ONLINE,
-					&prop);
+	rc = chip->usb_psy->get_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_ONLINE, &prop);
+	if (rc)
+		dev_err(&chip->client->dev, "%s: could not read USB online property, rc=%d\n",
+			__func__, rc);
+	else
+		online = prop.intval;
 
-	if (prop.intval) {
-		/* USB online */
-		chip->usb_psy->get_property(chip->usb_psy,
-						POWER_SUPPLY_PROP_SCOPE, &prop);
-		if (prop.intval == POWER_SUPPLY_SCOPE_SYSTEM) {
-			/* USB host mode */
-			smb137c_enable_otg_mode(chip);
-			smb137c_disable_charging(chip);
-		} else {
-			/* USB device mode */
-			chip->usb_psy->get_property(chip->usb_psy,
+	rc = chip->usb_psy->get_property(chip->usb_psy, POWER_SUPPLY_PROP_SCOPE,
+					&prop);
+	if (rc)
+		dev_err(&chip->client->dev, "%s: could not read USB scope property, rc=%d\n",
+			__func__, rc);
+	else
+		scope = prop.intval;
+
+	rc = chip->usb_psy->get_property(chip->usb_psy,
 					POWER_SUPPLY_PROP_CURRENT_MAX, &prop);
-			smb137c_set_usb_input_current_limit(chip, prop.intval);
-			smb137c_enable_charging(chip);
-			smb137c_disable_otg_mode(chip);
-		}
+	if (rc)
+		dev_err(&chip->client->dev, "%s: could not read USB current_max property, rc=%d\n",
+			__func__, rc);
+	else
+		current_limit = prop.intval;
+
+	if (scope == POWER_SUPPLY_SCOPE_SYSTEM) {
+		/* USB host mode */
+		smb137c_disable_charging(chip);
+		smb137c_enable_otg_mode(chip);
+	} else if (online) {
+		/* USB online in device mode */
+		smb137c_set_usb_input_current_limit(chip, current_limit);
+		smb137c_enable_charging(chip);
+		smb137c_disable_otg_mode(chip);
 	} else {
 		/* USB offline */
 		smb137c_disable_charging(chip);
@@ -1318,7 +1336,6 @@
 };
 MODULE_DEVICE_TABLE(i2c, smb137c_id);
 
-/* TODO: should this be "summit,smb137c-charger"? */
 static const struct of_device_id smb137c_match[] = {
 	{ .compatible = "summit,smb137c", },
 	{ },
diff --git a/drivers/power/smb350_charger.c b/drivers/power/smb350_charger.c
index dc0c4bd..21d7aea 100644
--- a/drivers/power/smb350_charger.c
+++ b/drivers/power/smb350_charger.c
@@ -229,20 +229,18 @@
 	return power_ok;
 }
 
-static bool smb350_is_charging(struct i2c_client *client)
+static bool smb350_is_charger_present(struct i2c_client *client)
 {
 	int val;
-	bool is_charging;
 
+	/* Normally the device is non-removable and embedded on the board.
+	 * Verify that charger is present by getting I2C response.
+	 */
 	val = smb350_read_reg(client, STATUS_B_REG);
 	if (val < 0)
 		return false;
 
-	val = (val >> 1) & 0x3;
-
-	is_charging = (val != 0);
-
-	return is_charging;
+	return true;
 }
 
 static int smb350_get_prop_charge_type(struct smb350_device *dev)
@@ -408,10 +406,10 @@
 
 	switch (psp) {
 	case POWER_SUPPLY_PROP_PRESENT:
-		val->intval = smb350_is_dc_present(client);
+		val->intval = smb350_is_charger_present(client);
 		break;
 	case POWER_SUPPLY_PROP_ONLINE:
-		val->intval = smb350_is_charging(client);
+		val->intval = smb350_is_dc_present(client);
 		break;
 	case POWER_SUPPLY_PROP_CHARGE_TYPE:
 		val->intval = smb350_get_prop_charge_type(dev);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 57cde45..0ebb944 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1897,6 +1897,8 @@
 		if (rdev->desc->ops->list_voltage)
 			selector = rdev->desc->ops->list_voltage(rdev,
 								 selector);
+		else if (rdev->desc->ops->get_voltage)
+			selector = rdev->desc->ops->get_voltage(rdev);
 		else
 			selector = -1;
 	} else if (rdev->desc->ops->set_voltage_sel) {
diff --git a/drivers/regulator/qpnp-regulator.c b/drivers/regulator/qpnp-regulator.c
index 0549593..a330f1b 100644
--- a/drivers/regulator/qpnp-regulator.c
+++ b/drivers/regulator/qpnp-regulator.c
@@ -550,11 +550,12 @@
 }
 
 static int qpnp_regulator_select_voltage(struct qpnp_regulator *vreg,
-		int min_uV, int max_uV, int *range_sel, int *voltage_sel)
+		int min_uV, int max_uV, int *range_sel, int *voltage_sel,
+		unsigned *selector)
 {
 	struct qpnp_voltage_range *range;
 	int uV = min_uV;
-	int lim_min_uV, lim_max_uV, i;
+	int lim_min_uV, lim_max_uV, i, range_id;
 
 	/* Check if request voltage is outside of physically settable range. */
 	lim_min_uV = vreg->set_points->range[0].set_point_min_uV;
@@ -575,7 +576,8 @@
 	for (i = vreg->set_points->count - 1; i > 0; i--)
 		if (uV > vreg->set_points->range[i - 1].max_uV)
 			break;
-	range = &vreg->set_points->range[i];
+	range_id = i;
+	range = &vreg->set_points->range[range_id];
 	*range_sel = range->range_sel;
 
 	/*
@@ -594,6 +596,11 @@
 		return -EINVAL;
 	}
 
+	*selector = 0;
+	for (i = 0; i < range_id; i++)
+		*selector += vreg->set_points->range[i].n_voltages;
+	*selector += (uV - range->set_point_min_uV) / range->step_uV;
+
 	return 0;
 }
 
@@ -605,7 +612,7 @@
 	u8 buf[2];
 
 	rc = qpnp_regulator_select_voltage(vreg, min_uV, max_uV, &range_sel,
-		&voltage_sel);
+		&voltage_sel, selector);
 	if (rc) {
 		vreg_err(vreg, "could not set voltage, rc=%d\n", rc);
 		return rc;
@@ -669,7 +676,7 @@
 	int rc, range_sel, voltage_sel;
 
 	rc = qpnp_regulator_select_voltage(vreg, min_uV, max_uV, &range_sel,
-		&voltage_sel);
+		&voltage_sel, selector);
 	if (rc) {
 		vreg_err(vreg, "could not set voltage, rc=%d\n", rc);
 		return rc;
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index c26da60..0497a32 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -30,17 +30,18 @@
 #include <linux/workqueue.h>
 #include <linux/io.h>
 #include <linux/debugfs.h>
-#include <mach/msm_spi.h>
-#include <linux/dma-mapping.h>
-#include <linux/sched.h>
-#include <mach/dma.h>
-#include <asm/atomic.h>
-#include <linux/mutex.h>
 #include <linux/gpio.h>
 #include <linux/remote_spinlock.h>
 #include <linux/pm_qos.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/atomic.h>
+#include <mach/msm_spi.h>
+#include <mach/sps.h>
+#include <mach/dma.h>
 #include "spi_qsd.h"
 
 static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
@@ -211,16 +212,19 @@
 				   &dd->output_block_size, block, mult)) {
 		goto fifo_size_err;
 	}
-	/* DM mode is not available for this block size */
-	if (dd->input_block_size == 4 || dd->output_block_size == 4)
-		dd->use_dma = 0;
+	if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
+		/* DM mode is not available for this block size */
+		if (dd->input_block_size == 4 || dd->output_block_size == 4)
+			dd->use_dma = 0;
 
-	/* DM mode is currently unsupported for different block sizes */
-	if (dd->input_block_size != dd->output_block_size)
-		dd->use_dma = 0;
+		/* DM mode is currently unsupported for different block sizes */
+		if (dd->input_block_size != dd->output_block_size)
+			dd->use_dma = 0;
 
-	if (dd->use_dma)
-		dd->burst_size = max(dd->input_block_size, DM_BURST_SIZE);
+		if (dd->use_dma)
+			dd->burst_size = max(dd->input_block_size,
+							DM_BURST_SIZE);
+	}
 
 	return;
 
@@ -352,14 +356,19 @@
 	return 0;
 }
 
-static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
+/**
+ * msm_spi_set_bpw_and_no_io_flags: configure N, and no-input/no-output flags
+ */
+static inline void
+msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n)
 {
 	*config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
 
 	if (n != (*config & SPI_CFG_N))
 		*config = (*config & ~SPI_CFG_N) | n;
 
-	if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
+	if (((dd->mode == SPI_DMOV_MODE) && (!dd->read_len))
+					|| (dd->mode == SPI_BAM_MODE)) {
 		if (dd->read_buf == NULL)
 			*config |= SPI_NO_INPUT;
 		if (dd->write_buf == NULL)
@@ -367,23 +376,207 @@
 	}
 }
 
-static void msm_spi_set_config(struct msm_spi *dd, int bpw)
+/**
+ * msm_spi_calc_spi_config_loopback_and_input_first: Calculate the values that
+ * should be updated into SPI_CONFIG's LOOPBACK and INPUT_FIRST flags
+ * @return calculatd value for SPI_CONFIG
+ */
+static u32
+msm_spi_calc_spi_config_loopback_and_input_first(u32 spi_config, u8 mode)
 {
-	u32 spi_config;
-
-	spi_config = readl_relaxed(dd->base + SPI_CONFIG);
-
-	if (dd->cur_msg->spi->mode & SPI_CPHA)
-		spi_config &= ~SPI_CFG_INPUT_FIRST;
-	else
-		spi_config |= SPI_CFG_INPUT_FIRST;
-	if (dd->cur_msg->spi->mode & SPI_LOOP)
+	if (mode & SPI_LOOP)
 		spi_config |= SPI_CFG_LOOPBACK;
 	else
 		spi_config &= ~SPI_CFG_LOOPBACK;
-	msm_spi_add_configs(dd, &spi_config, bpw-1);
+
+	if (mode & SPI_CPHA)
+		spi_config &= ~SPI_CFG_INPUT_FIRST;
+	else
+		spi_config |= SPI_CFG_INPUT_FIRST;
+
+	return spi_config;
+}
+
+/**
+ * msm_spi_set_spi_config: prepares register SPI_CONFIG to process the
+ * next transfer
+ */
+static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw)
+{
+	u32 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
+	spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
+					spi_config, dd->cur_msg->spi->mode);
+
+	if (dd->qup_ver == SPI_QUP_VERSION_NONE)
+		/* flags removed from SPI_CONFIG in QUP version-2 */
+		msm_spi_set_bpw_and_no_io_flags(dd, &spi_config, bpw-1);
+	else if (dd->mode == SPI_BAM_MODE)
+		spi_config |= SPI_CFG_INPUT_FIRST;
+
 	writel_relaxed(spi_config, dd->base + SPI_CONFIG);
-	msm_spi_set_qup_config(dd, bpw);
+}
+
+/**
+ * msm_spi_set_mx_counts: set SPI_MX_INPUT_COUNT and SPI_MX_INPUT_COUNT
+ * for FIFO-mode. set SPI_MX_INPUT_COUNT and SPI_MX_OUTPUT_COUNT for
+ * BAM and DMOV modes.
+ * @n_words The number of reads/writes of size N.
+ */
+static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words)
+{
+	/*
+	 * n_words cannot exceed fifo_size, and only one READ COUNT
+	 * interrupt is generated per transaction, so for transactions
+	 * larger than fifo size READ COUNT must be disabled.
+	 * For those transactions we usually move to Data Mover mode.
+	 */
+	if (dd->mode == SPI_FIFO_MODE) {
+		if (n_words <= dd->input_fifo_size) {
+			writel_relaxed(n_words,
+				       dd->base + SPI_MX_READ_COUNT);
+			msm_spi_set_write_count(dd, n_words);
+		} else {
+			writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
+			msm_spi_set_write_count(dd, 0);
+		}
+		if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
+			/* must be zero for FIFO */
+			writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT);
+			writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
+		}
+	} else {
+		/* must be zero for BAM and DMOV */
+		writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
+		msm_spi_set_write_count(dd, 0);
+
+		/*
+		 * for DMA transfers, both QUP_MX_INPUT_COUNT and
+		 * QUP_MX_OUTPUT_COUNT must be zero to all cases but one.
+		 * That case is a non-balanced transfer when there is
+		 * only a read_buf.
+		 */
+		if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
+			if (dd->write_buf)
+				writel_relaxed(0,
+						dd->base + SPI_MX_INPUT_COUNT);
+			else
+				writel_relaxed(n_words,
+						dd->base + SPI_MX_INPUT_COUNT);
+
+			writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
+		}
+	}
+}
+
+/**
+ * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes
+ * using BAM.
+ * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single
+ * transfer. Between transfer QUP must change to reset state. A loop is
+ * issuing a single BAM transfer at a time. If another tsranfer is
+ * required, it waits for the trasfer to finish, then moving to reset
+ * state, and back to run state to issue the next transfer.
+ * The function dose not wait for the last transfer to end, or if only
+ * a single transfer is required, the function dose not wait for it to
+ * end.
+ * @timeout max time in jiffies to wait for a transfer to finish.
+ * @return zero on success
+ */
+static int
+msm_spi_bam_begin_transfer(struct msm_spi *dd, u32 timeout, u8 bpw)
+{
+	u32 bytes_to_send, bytes_sent, n_words_xfr, cons_flags, prod_flags;
+	int ret;
+	/*
+	 * QUP must move to reset mode every 64K-1 bytes of transfer
+	 * (counter is 16 bit)
+	 */
+	if (dd->tx_bytes_remaining > SPI_MAX_TRFR_BTWN_RESETS) {
+		/* assert chip select unconditionally */
+		u32 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+		if (!(spi_ioc & SPI_IO_C_FORCE_CS))
+			writel_relaxed(spi_ioc | SPI_IO_C_FORCE_CS,
+				dd->base + SPI_IO_CONTROL);
+	}
+
+	/* Following flags are required since we are waiting on all transfers */
+	cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
+	/*
+	 * on a balanced transaction, BAM will set the flags on the producer
+	 * pipe based on the flags set on the consumer pipe
+	 */
+	prod_flags = (dd->write_buf) ? 0 : cons_flags;
+
+	while (dd->tx_bytes_remaining > 0) {
+		bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
+		bytes_to_send = min_t(u32, dd->tx_bytes_remaining
+						, SPI_MAX_TRFR_BTWN_RESETS);
+		n_words_xfr = DIV_ROUND_UP(bytes_to_send
+						, dd->bytes_per_word);
+
+		msm_spi_set_mx_counts(dd, n_words_xfr);
+
+		ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
+		if (ret < 0) {
+			dev_err(dd->dev,
+				"%s: Failed to set QUP state to run",
+				__func__);
+			goto xfr_err;
+		}
+
+		/* enqueue read buffer in BAM */
+		if (dd->read_buf) {
+			ret = sps_transfer_one(dd->bam.prod.handle,
+				dd->cur_transfer->rx_dma + bytes_sent,
+				bytes_to_send, dd, prod_flags);
+			if (ret < 0) {
+				dev_err(dd->dev,
+				"%s: Failed to queue producer BAM transfer",
+				__func__);
+				goto xfr_err;
+			}
+		}
+
+		/* enqueue write buffer in BAM */
+		if (dd->write_buf) {
+			ret = sps_transfer_one(dd->bam.cons.handle,
+				dd->cur_transfer->tx_dma + bytes_sent,
+				bytes_to_send, dd, cons_flags);
+			if (ret < 0) {
+				dev_err(dd->dev,
+				"%s: Failed to queue consumer BAM transfer",
+				__func__);
+				goto xfr_err;
+			}
+		}
+
+		dd->tx_bytes_remaining -= bytes_to_send;
+
+		/* move to reset state after SPI_MAX_TRFR_BTWN_RESETS */
+		if (dd->tx_bytes_remaining > 0) {
+			if (!wait_for_completion_timeout(
+				&dd->transfer_complete, timeout)) {
+				dev_err(dd->dev,
+					"%s: SPI transaction timeout",
+					__func__);
+				dd->cur_msg->status = -EIO;
+				ret = -EIO;
+				goto xfr_err;
+			}
+			ret = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+			if (ret < 0) {
+				dev_err(dd->dev,
+					"%s: Failed to set QUP state to reset",
+					__func__);
+				goto xfr_err;
+			}
+			init_completion(&dd->transfer_complete);
+		}
+	}
+	return 0;
+
+xfr_err:
+	return ret;
 }
 
 static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
@@ -767,7 +960,15 @@
 	return IRQ_HANDLED;
 }
 
-static int msm_spi_map_dma_buffers(struct msm_spi *dd)
+/**
+ * msm_spi_dma_map_buffers: prepares buffer for DMA transfer
+ * @return zero on success or negative error code
+ *
+ * calls dma_map_single() on the read/write buffers, effectively invalidating
+ * their cash entries. for For WR-WR and WR-RD transfers, allocates temporary
+ * buffer and copy the data to/from the client buffers
+ */
+static int msm_spi_dma_map_buffers(struct msm_spi *dd)
 {
 	struct device *dev;
 	struct spi_transfer *first_xfr;
@@ -847,7 +1048,7 @@
 	return ret;
 }
 
-static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
+static void msm_spi_dmov_unmap_buffers(struct msm_spi *dd)
 {
 	struct device *dev;
 	u32 offset;
@@ -914,56 +1115,190 @@
 	}
 }
 
+static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
+{
+	struct device *dev;
+
+	 /* mapped by client */
+	if (dd->cur_msg->is_dma_mapped)
+		return;
+
+	dev = &dd->cur_msg->spi->dev;
+	if (dd->cur_transfer->rx_buf)
+		dma_unmap_single(dev, dd->cur_transfer->rx_dma,
+				dd->cur_transfer->len,
+				DMA_FROM_DEVICE);
+
+	if (dd->cur_transfer->tx_buf)
+		dma_unmap_single(dev, dd->cur_transfer->tx_dma,
+				dd->cur_transfer->len,
+				DMA_TO_DEVICE);
+}
+
+static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
+{
+	if (dd->mode == SPI_DMOV_MODE)
+		msm_spi_dmov_unmap_buffers(dd);
+	else if (dd->mode == SPI_BAM_MODE)
+		msm_spi_bam_unmap_buffers(dd);
+}
+
 /**
- * msm_use_dm - decides whether to use data mover for this
- * 		transfer
+ * msm_spi_use_dma - decides whether to use Data-Mover or BAM for
+ * the given transfer
  * @dd:       device
  * @tr:       transfer
  *
- * Start using DM if:
- * 1. Transfer is longer than 3*block size.
- * 2. Buffers should be aligned to cache line.
- * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
+ * Start using DMA if:
+ * 1. Is supported by HW
+ * 2. Is not diabled by platfrom data
+ * 3. Transfer size is greater than 3*block size.
+ * 4. Buffers are aligned to cache line.
+ * 5. Bytes-per-word is 8,16 or 32.
   */
-static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
-			     u8 bpw)
+static inline bool
+msm_spi_use_dma(struct msm_spi *dd, struct spi_transfer *tr, u8 bpw)
 {
-	u32 cache_line = dma_get_cache_alignment();
-
 	if (!dd->use_dma)
-		return 0;
+		return false;
+
+	/* check constraints from platform data */
+	if ((dd->qup_ver == SPI_QUP_VERSION_BFAM) && !dd->pdata->use_bam)
+		return false;
 
 	if (dd->cur_msg_len < 3*dd->input_block_size)
-		return 0;
+		return false;
 
 	if (dd->multi_xfr && !dd->read_len && !dd->write_len)
-		return 0;
+		return false;
 
-	if (tr->tx_buf) {
-		if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
-			return 0;
-	}
-	if (tr->rx_buf) {
-		if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
-			return 0;
+	if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
+		u32 cache_line = dma_get_cache_alignment();
+
+		if (tr->tx_buf) {
+			if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
+				return 0;
+		}
+		if (tr->rx_buf) {
+			if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
+				return false;
+		}
+
+		if (tr->cs_change &&
+		   ((bpw != 8) || (bpw != 16) || (bpw != 32)))
+			return false;
 	}
 
-	if (tr->cs_change &&
-	   ((bpw != 8) || (bpw != 16) || (bpw != 32)))
-		return 0;
-	return 1;
+	return true;
+}
+
+/**
+ * msm_spi_set_transfer_mode: Chooses optimal transfer mode. Sets dd->mode and
+ * prepares to process a transfer.
+ */
+static void
+msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count)
+{
+	if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) {
+		if (dd->qup_ver) {
+			dd->mode = SPI_BAM_MODE;
+		} else {
+			dd->mode = SPI_DMOV_MODE;
+			if (dd->write_len && dd->read_len) {
+				dd->tx_bytes_remaining = dd->write_len;
+				dd->rx_bytes_remaining = dd->read_len;
+			}
+		}
+	} else {
+		dd->mode = SPI_FIFO_MODE;
+		if (dd->multi_xfr) {
+			dd->read_len = dd->cur_transfer->len;
+			dd->write_len = dd->cur_transfer->len;
+		}
+	}
+}
+
+/**
+ * msm_spi_set_qup_io_modes: prepares register QUP_IO_MODES to process a
+ * transfer
+ */
+static void msm_spi_set_qup_io_modes(struct msm_spi *dd)
+{
+	u32 spi_iom;
+	spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
+	/* Set input and output transfer mode: FIFO, DMOV, or BAM */
+	spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
+	spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
+	spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
+	/* Turn on packing for data mover */
+	if ((dd->mode == SPI_DMOV_MODE) || (dd->mode == SPI_BAM_MODE))
+		spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
+	else
+		spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
+
+	/*if (dd->mode == SPI_BAM_MODE) {
+		spi_iom |= SPI_IO_C_NO_TRI_STATE;
+		spi_iom &= ~(SPI_IO_C_CS_SELECT | SPI_IO_C_CS_N_POLARITY);
+	}*/
+	writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
+}
+
+static u32 msm_spi_calc_spi_ioc_clk_polarity(u32 spi_ioc, u8 mode)
+{
+	if (mode & SPI_CPOL)
+		spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
+	else
+		spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
+	return spi_ioc;
+}
+
+/**
+ * msm_spi_set_spi_io_control: prepares register SPI_IO_CONTROL to process the
+ * next transfer
+ * @return the new set value of SPI_IO_CONTROL
+ */
+static u32 msm_spi_set_spi_io_control(struct msm_spi *dd)
+{
+	u32 spi_ioc, spi_ioc_orig, chip_select;
+
+	spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+	spi_ioc_orig = spi_ioc;
+	spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc
+						, dd->cur_msg->spi->mode);
+	/* Set chip-select */
+	chip_select = dd->cur_msg->spi->chip_select << 2;
+	if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
+		spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
+	if (!dd->cur_transfer->cs_change)
+		spi_ioc |= SPI_IO_C_MX_CS_MODE;
+
+	if (spi_ioc != spi_ioc_orig)
+		writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+
+	return spi_ioc;
+}
+
+/**
+ * msm_spi_set_qup_op_mask: prepares register QUP_OPERATIONAL_MASK to process
+ * the next transfer
+ */
+static void msm_spi_set_qup_op_mask(struct msm_spi *dd)
+{
+	/* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
+	 * change in BAM mode */
+	u32 mask = (dd->mode == SPI_BAM_MODE) ?
+		QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG
+		: 0;
+	writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK);
 }
 
 static void msm_spi_process_transfer(struct msm_spi *dd)
 {
 	u8  bpw;
-	u32 spi_ioc;
-	u32 spi_iom;
-	u32 spi_ioc_orig;
 	u32 max_speed;
-	u32 chip_select;
 	u32 read_count;
 	u32 timeout;
+	u32 spi_ioc;
 	u32 int_loopback = 0;
 
 	dd->tx_bytes_remaining = dd->cur_msg_len;
@@ -987,6 +1322,10 @@
 	if (!dd->clock_speed || max_speed != dd->clock_speed)
 		msm_spi_clock_set(dd, max_speed);
 
+	timeout = 100 * msecs_to_jiffies(
+			DIV_ROUND_UP(dd->cur_msg_len * 8,
+			DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
+
 	read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
 	if (dd->cur_msg->spi->mode & SPI_LOOP)
 		int_loopback = 1;
@@ -1004,60 +1343,24 @@
 			__func__);
 		return;
 	}
-	if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
-		dd->mode = SPI_FIFO_MODE;
-		if (dd->multi_xfr) {
-			dd->read_len = dd->cur_transfer->len;
-			dd->write_len = dd->cur_transfer->len;
-		}
-		/* read_count cannot exceed fifo_size, and only one READ COUNT
-		   interrupt is generated per transaction, so for transactions
-		   larger than fifo size READ COUNT must be disabled.
-		   For those transactions we usually move to Data Mover mode.
-		*/
-		if (read_count <= dd->input_fifo_size) {
-			writel_relaxed(read_count,
-				       dd->base + SPI_MX_READ_COUNT);
-			msm_spi_set_write_count(dd, read_count);
-		} else {
-			writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
-			msm_spi_set_write_count(dd, 0);
-		}
-	} else {
-		dd->mode = SPI_DMOV_MODE;
-		if (dd->write_len && dd->read_len) {
-			dd->tx_bytes_remaining = dd->write_len;
-			dd->rx_bytes_remaining = dd->read_len;
-		}
-	}
 
-	/* Write mode - fifo or data mover*/
-	spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
-	spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
-	spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
-	spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
-	/* Turn on packing for data mover */
-	if (dd->mode == SPI_DMOV_MODE)
-		spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
-	else
-		spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
-	writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
+	if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
+		dev_err(dd->dev,
+			"%s: Error setting QUP to reset-state",
+			__func__);
 
-	msm_spi_set_config(dd, bpw);
-
-	spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
-	spi_ioc_orig = spi_ioc;
-	if (dd->cur_msg->spi->mode & SPI_CPOL)
-		spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
-	else
-		spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
-	chip_select = dd->cur_msg->spi->chip_select << 2;
-	if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
-		spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
-	if (!dd->cur_transfer->cs_change)
-		spi_ioc |= SPI_IO_C_MX_CS_MODE;
-	if (spi_ioc != spi_ioc_orig)
-		writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+	msm_spi_set_transfer_mode(dd, bpw, read_count);
+	msm_spi_set_mx_counts(dd, read_count);
+	if ((dd->mode == SPI_BAM_MODE) || (dd->mode == SPI_DMOV_MODE))
+		if (msm_spi_dma_map_buffers(dd) < 0) {
+			pr_err("Mapping DMA buffers\n");
+			return;
+		}
+	msm_spi_set_qup_io_modes(dd);
+	msm_spi_set_spi_config(dd, bpw);
+	msm_spi_set_qup_config(dd, bpw);
+	spi_ioc = msm_spi_set_spi_io_control(dd);
+	msm_spi_set_qup_op_mask(dd);
 
 	if (dd->mode == SPI_DMOV_MODE) {
 		msm_spi_setup_dm_transfer(dd);
@@ -1071,27 +1374,35 @@
 		if (msm_spi_prepare_for_write(dd))
 			goto transfer_end;
 		msm_spi_start_write(dd, read_count);
+	} else if (dd->mode == SPI_BAM_MODE) {
+		if ((msm_spi_bam_begin_transfer(dd, timeout, bpw)) < 0)
+			dev_err(dd->dev, "%s: BAM transfer setup failed\n",
+				__func__);
 	}
 
-	/* Only enter the RUN state after the first word is written into
-	   the output FIFO. Otherwise, the output FIFO EMPTY interrupt
-	   might fire before the first word is written resulting in a
-	   possible race condition.
+	/*
+	 * On BAM mode, current state here is run.
+	 * Only enter the RUN state after the first word is written into
+	 * the output FIFO. Otherwise, the output FIFO EMPTY interrupt
+	 * might fire before the first word is written resulting in a
+	 * possible race condition.
 	 */
-	if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
-		goto transfer_end;
-
-	timeout = 100 * msecs_to_jiffies(
-	      DIV_ROUND_UP(dd->cur_msg_len * 8,
-		 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
+	if (dd->mode != SPI_BAM_MODE)
+		if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) {
+			dev_warn(dd->dev,
+				"%s: Failed to set QUP to run-state. Mode:%d",
+				__func__, dd->mode);
+			goto transfer_end;
+		}
 
 	/* Assume success, this might change later upon transaction result */
 	dd->cur_msg->status = 0;
 	do {
 		if (!wait_for_completion_timeout(&dd->transfer_complete,
 						 timeout)) {
-				dev_err(dd->dev, "%s: SPI transaction "
-						 "timeout\n", __func__);
+				dev_err(dd->dev,
+					"%s: SPI transaction timeout\n",
+					__func__);
 				dd->cur_msg->status = -EIO;
 				if (dd->mode == SPI_DMOV_MODE) {
 					msm_dmov_flush(dd->tx_dma_chan, 1);
@@ -1102,8 +1413,7 @@
 	} while (msm_spi_dm_send_next(dd));
 
 transfer_end:
-	if (dd->mode == SPI_DMOV_MODE)
-		msm_spi_unmap_dma_buffers(dd);
+	msm_spi_dma_unmap_buffers(dd);
 	dd->mode = SPI_MODE_NONE;
 
 	msm_spi_set_state(dd, SPI_OP_STATE_RESET);
@@ -1266,10 +1576,10 @@
 			 * WR-WR or WR-RD transfers
 			 */
 			if ((!dd->cur_msg->is_dma_mapped) &&
-			    (msm_use_dm(dd, dd->cur_transfer,
+			    (msm_spi_use_dma(dd, dd->cur_transfer,
 					dd->cur_transfer->bits_per_word))) {
 				/* Mapping of DMA buffers */
-				int ret = msm_spi_map_dma_buffers(dd);
+				int ret = msm_spi_dma_map_buffers(dd);
 				if (ret < 0) {
 					dd->cur_msg->status = ret;
 					goto error;
@@ -1474,22 +1784,13 @@
 		spi_ioc |= mask;
 	else
 		spi_ioc &= ~mask;
-	if (spi->mode & SPI_CPOL)
-		spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
-	else
-		spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
+	spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc, spi->mode);
 
 	writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
 
 	spi_config = readl_relaxed(dd->base + SPI_CONFIG);
-	if (spi->mode & SPI_LOOP)
-		spi_config |= SPI_CFG_LOOPBACK;
-	else
-		spi_config &= ~SPI_CFG_LOOPBACK;
-	if (spi->mode & SPI_CPHA)
-		spi_config &= ~SPI_CFG_INPUT_FIRST;
-	else
-		spi_config |= SPI_CFG_INPUT_FIRST;
+	spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
+							spi_config, spi->mode);
 	writel_relaxed(spi_config, dd->base + SPI_CONFIG);
 
 	/* Ensure previous write completed before disabling the clocks */
@@ -1730,7 +2031,7 @@
 			  roundup(dd->burst_size, cache_line))*2;
 }
 
-static void msm_spi_teardown_dma(struct msm_spi *dd)
+static void msm_spi_dmov_teardown(struct msm_spi *dd)
 {
 	int limit = 0;
 
@@ -1749,7 +2050,171 @@
 	dd->tx_padding = dd->rx_padding = NULL;
 }
 
-static __init int msm_spi_init_dma(struct msm_spi *dd)
+static void msm_spi_bam_pipe_teardown(struct msm_spi *dd,
+					enum msm_spi_pipe_direction pipe_dir)
+{
+	struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
+					(&dd->bam.prod) : (&dd->bam.cons);
+	if (!pipe->teardown_required)
+		return;
+
+	sps_disconnect(pipe->handle);
+	dma_free_coherent(dd->dev, pipe->config.desc.size,
+		pipe->config.desc.base, pipe->config.desc.phys_base);
+	sps_free_endpoint(pipe->handle);
+	pipe->handle = 0;
+	pipe->teardown_required = false;
+}
+
+static int msm_spi_bam_pipe_init(struct msm_spi *dd,
+					enum msm_spi_pipe_direction pipe_dir)
+{
+	int rc = 0;
+	struct sps_pipe *pipe_handle;
+	struct sps_register_event event = {0};
+	struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
+					(&dd->bam.prod) : (&dd->bam.cons);
+	struct sps_connect *pipe_conf = &pipe->config;
+
+	pipe->handle = 0;
+	pipe_handle = sps_alloc_endpoint();
+	if (!pipe_handle) {
+		dev_err(dd->dev, "%s: Failed to allocate BAM endpoint\n"
+								, __func__);
+		return -ENOMEM;
+	}
+
+	memset(pipe_conf, 0, sizeof(*pipe_conf));
+	rc = sps_get_config(pipe_handle, pipe_conf);
+	if (rc) {
+		dev_err(dd->dev, "%s: Failed to get BAM pipe config\n"
+			, __func__);
+		goto config_err;
+	}
+
+	if (pipe_dir == SPI_BAM_CONSUMER_PIPE) {
+		pipe_conf->source          = dd->bam.handle;
+		pipe_conf->destination     = SPS_DEV_HANDLE_MEM;
+		pipe_conf->mode            = SPS_MODE_SRC;
+		pipe_conf->src_pipe_index  =
+					dd->pdata->bam_producer_pipe_index;
+		pipe_conf->dest_pipe_index = 0;
+	} else {
+		pipe_conf->source          = SPS_DEV_HANDLE_MEM;
+		pipe_conf->destination     = dd->bam.handle;
+		pipe_conf->mode            = SPS_MODE_DEST;
+		pipe_conf->src_pipe_index  = 0;
+		pipe_conf->dest_pipe_index =
+					dd->pdata->bam_consumer_pipe_index;
+	}
+	pipe_conf->options = SPS_O_EOT | SPS_O_AUTO_ENABLE;
+	pipe_conf->desc.size = SPI_BAM_MAX_DESC_NUM * sizeof(struct sps_iovec);
+	pipe_conf->desc.base = dma_alloc_coherent(dd->dev,
+				pipe_conf->desc.size,
+				&pipe_conf->desc.phys_base,
+				GFP_KERNEL);
+	if (!pipe_conf->desc.base) {
+		dev_err(dd->dev, "%s: Failed allocate BAM pipe memory"
+			, __func__);
+		rc = -ENOMEM;
+		goto config_err;
+	}
+
+	memset(pipe_conf->desc.base, 0x00, pipe_conf->desc.size);
+
+	rc = sps_connect(pipe_handle, pipe_conf);
+	if (rc) {
+		dev_err(dd->dev, "%s: Failed to connect BAM pipe", __func__);
+		goto connect_err;
+	}
+
+	event.mode      = SPS_TRIGGER_WAIT;
+	event.options   = SPS_O_EOT;
+	event.xfer_done = &dd->transfer_complete;
+	event.user      = (void *)dd;
+	rc = sps_register_event(pipe_handle, &event);
+	if (rc) {
+		dev_err(dd->dev, "%s: Failed to register BAM EOT event",
+			__func__);
+		goto register_err;
+	}
+
+	pipe->handle = pipe_handle;
+	pipe->teardown_required = true;
+	return 0;
+
+register_err:
+	sps_disconnect(pipe_handle);
+connect_err:
+	dma_free_coherent(dd->dev, pipe_conf->desc.size,
+		pipe_conf->desc.base, pipe_conf->desc.phys_base);
+config_err:
+	sps_free_endpoint(pipe_handle);
+
+	return rc;
+}
+
+static void msm_spi_bam_teardown(struct msm_spi *dd)
+{
+	msm_spi_bam_pipe_teardown(dd, SPI_BAM_PRODUCER_PIPE);
+	msm_spi_bam_pipe_teardown(dd, SPI_BAM_CONSUMER_PIPE);
+
+	if (dd->bam.deregister_required) {
+		sps_deregister_bam_device(dd->bam.handle);
+		dd->bam.deregister_required = false;
+	}
+}
+
+static int msm_spi_bam_init(struct msm_spi *dd)
+{
+	struct sps_bam_props bam_props = {0};
+	u32 bam_handle;
+	int rc = 0;
+
+	rc = sps_phy2h(dd->bam.phys_addr, &bam_handle);
+	if (rc || !bam_handle) {
+		bam_props.phys_addr = dd->bam.phys_addr;
+		bam_props.virt_addr = dd->bam.base;
+		bam_props.irq       = dd->bam.irq;
+		bam_props.manage    = SPS_BAM_MGR_LOCAL;
+		bam_props.summing_threshold = 0x10;
+
+		rc = sps_register_bam_device(&bam_props, &bam_handle);
+		if (rc) {
+			dev_err(dd->dev,
+				"%s: Failed to register BAM device",
+				__func__);
+			return rc;
+		}
+		dd->bam.deregister_required = true;
+	}
+
+	dd->bam.handle = bam_handle;
+
+	rc = msm_spi_bam_pipe_init(dd, SPI_BAM_PRODUCER_PIPE);
+	if (rc) {
+		dev_err(dd->dev,
+			"%s: Failed to init producer BAM-pipe",
+			__func__);
+		goto bam_init_error;
+	}
+
+	rc = msm_spi_bam_pipe_init(dd, SPI_BAM_CONSUMER_PIPE);
+	if (rc) {
+		dev_err(dd->dev,
+			"%s: Failed to init consumer BAM-pipe",
+			__func__);
+		goto bam_init_error;
+	}
+
+	return 0;
+
+bam_init_error:
+	msm_spi_bam_teardown(dd);
+	return rc;
+}
+
+static __init int msm_spi_dmov_init(struct msm_spi *dd)
 {
 	dmov_box *box;
 	u32 cache_line = dma_get_cache_alignment();
@@ -1811,10 +2276,15 @@
 	return 0;
 }
 
-struct msm_spi_platform_data *msm_spi_dt_to_pdata(struct platform_device *pdev)
+/**
+ * msm_spi_dt_to_pdata: copy device-tree data to platfrom data struct
+ */
+struct msm_spi_platform_data *
+__init msm_spi_dt_to_pdata(struct platform_device *pdev)
 {
 	struct device_node *node = pdev->dev.of_node;
 	struct msm_spi_platform_data *pdata;
+	int rc;
 
 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata) {
@@ -1827,9 +2297,76 @@
 	of_property_read_u32(node, "infinite_mode",
 			&pdata->infinite_mode);
 
+	pdata->ver_reg_exists = of_property_read_bool(node
+						, "qcom,ver-reg-exists");
+
+	pdata->use_bam = of_property_read_bool(node, "qcom,use-bam");
+
+	if (pdata->use_bam) {
+		rc = of_property_read_u32(node, "qcom,bam-consumer-pipe-index",
+					&pdata->bam_consumer_pipe_index);
+		if (rc) {
+			dev_warn(&pdev->dev,
+			"missing qcom,bam-consumer-pipe-index entry in device-tree\n");
+			pdata->use_bam = false;
+		}
+
+		rc = of_property_read_u32(node, "qcom,bam-producer-pipe-index",
+					&pdata->bam_producer_pipe_index);
+		if (rc) {
+			dev_warn(&pdev->dev,
+			"missing qcom,bam-producer-pipe-index entry in device-tree\n");
+			pdata->use_bam = false;
+		}
+	}
 	return pdata;
 }
 
+static int __init msm_spi_get_qup_hw_ver(struct device *dev, struct msm_spi *dd)
+{
+	u32 data = readl_relaxed(dd->base + QUP_HARDWARE_VER);
+	return (data >= QUP_HARDWARE_VER_2_1_1) ? SPI_QUP_VERSION_BFAM
+						: SPI_QUP_VERSION_NONE;
+}
+
+static int __init msm_spi_bam_get_resources(struct msm_spi *dd,
+	struct platform_device *pdev, struct spi_master *master)
+{
+	struct resource *resource;
+	size_t bam_mem_size;
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"spi_bam_physical");
+	if (!resource) {
+		dev_warn(&pdev->dev,
+			"%s: Missing spi_bam_physical entry in DT",
+			__func__);
+		return -ENXIO;
+	}
+
+	dd->bam.phys_addr = resource->start;
+	bam_mem_size = resource_size(resource);
+	dd->bam.base = devm_ioremap(&pdev->dev, dd->bam.phys_addr,
+					bam_mem_size);
+	if (!dd->bam.base) {
+		dev_warn(&pdev->dev,
+			"%s: Failed to ioremap(spi_bam_physical)",
+			__func__);
+		return -ENXIO;
+	}
+
+	dd->bam.irq = platform_get_irq_byname(pdev, "spi_bam_irq");
+	if (dd->bam.irq < 0) {
+		dev_warn(&pdev->dev, "%s: Missing spi_bam_irq entry in DT",
+			__func__);
+		return -EINVAL;
+	}
+
+	dd->dma_init = msm_spi_bam_init;
+	dd->dma_teardown = msm_spi_bam_teardown;
+	return 0;
+}
+
 static int __init msm_spi_probe(struct platform_device *pdev)
 {
 	struct spi_master      *master;
@@ -1926,21 +2463,39 @@
 				goto skip_dma_resources;
 			}
 		}
-		resource = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-		if (resource) {
-			dd->rx_dma_chan = resource->start;
-			dd->tx_dma_chan = resource->end;
-			resource = platform_get_resource(pdev, IORESOURCE_DMA,
-							1);
-			if (!resource) {
-				rc = -ENXIO;
-				goto err_probe_res;
-			}
+		if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
+			resource = platform_get_resource(pdev,
+							IORESOURCE_DMA, 0);
+			if (resource) {
+				dd->rx_dma_chan = resource->start;
+				dd->tx_dma_chan = resource->end;
+				resource = platform_get_resource(pdev,
+							IORESOURCE_DMA, 1);
+				if (!resource) {
+					rc = -ENXIO;
+					goto err_probe_res;
+				}
 
-			dd->rx_dma_crci = resource->start;
-			dd->tx_dma_crci = resource->end;
+				dd->rx_dma_crci = resource->start;
+				dd->tx_dma_crci = resource->end;
+				dd->use_dma = 1;
+				master->dma_alignment =
+						dma_get_cache_alignment();
+				dd->dma_init = msm_spi_dmov_init ;
+				dd->dma_teardown = msm_spi_dmov_teardown;
+			}
+		} else {
+			if (!dd->pdata->use_bam)
+				goto skip_dma_resources;
+
+			rc = msm_spi_bam_get_resources(dd, pdev, master);
+			if (rc) {
+				dev_warn(dd->dev,
+					"%s: Faild to get BAM resources",
+					__func__);
+				goto skip_dma_resources;
+			}
 			dd->use_dma = 1;
-			master->dma_alignment =	dma_get_cache_alignment();
 		}
 	}
 
@@ -1968,6 +2523,15 @@
 		goto err_probe_reqmem;
 	}
 
+	if (pdata && pdata->ver_reg_exists) {
+		enum msm_spi_qup_version ver =
+					msm_spi_get_qup_hw_ver(&pdev->dev, dd);
+		if (dd->qup_ver != ver)
+			dev_warn(&pdev->dev,
+			"%s: HW version different then initially assumed by probe",
+			__func__);
+	}
+
 	if (pdata && pdata->rsl_id) {
 		struct remote_mutex_id rmid;
 		rmid.r_spinlock_id = pdata->rsl_id;
@@ -1984,7 +2548,7 @@
 		dd->use_rlock = 1;
 		dd->pm_lat = pdata->pm_lat;
 		pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
-					PM_QOS_DEFAULT_VALUE);
+						PM_QOS_DEFAULT_VALUE);
 	}
 
 	mutex_lock(&dd->core_lock);
@@ -2026,13 +2590,16 @@
 	}
 
 	pclk_enabled = 1;
-	rc = msm_spi_configure_gsbi(dd, pdev);
-	if (rc)
-		goto err_probe_gsbi;
+	/* GSBI dose not exists on B-family MSM-chips */
+	if (dd->qup_ver != SPI_QUP_VERSION_BFAM) {
+		rc = msm_spi_configure_gsbi(dd, pdev);
+		if (rc)
+			goto err_probe_gsbi;
+	}
 
 	msm_spi_calculate_fifo_size(dd);
 	if (dd->use_dma) {
-		rc = msm_spi_init_dma(dd);
+		rc = dd->dma_init(dd);
 		if (rc)
 			goto err_probe_dma;
 	}
@@ -2091,7 +2658,7 @@
 err_probe_reg_master:
 err_probe_irq:
 err_probe_state:
-	msm_spi_teardown_dma(dd);
+	dd->dma_teardown(dd);
 err_probe_dma:
 err_probe_gsbi:
 	if (pclk_enabled)
@@ -2174,8 +2741,7 @@
 	spi_debugfs_exit(dd);
 	sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
 
-	msm_spi_teardown_dma(dd);
-
+	dd->dma_teardown(dd);
 	clk_put(dd->clk);
 	clk_put(dd->pclk);
 	destroy_workqueue(dd->workqueue);
diff --git a/drivers/spi/spi_qsd.h b/drivers/spi/spi_qsd.h
index a0dee34..62f1830 100644
--- a/drivers/spi/spi_qsd.h
+++ b/drivers/spi/spi_qsd.h
@@ -41,8 +41,13 @@
 
 #define GSBI_CTRL_REG                 0x0
 #define GSBI_SPI_CONFIG               0x30
+/* B-family only registers */
 #define QUP_HARDWARE_VER              0x0030
+#define QUP_HARDWARE_VER_2_1_1        0X20010001
 #define QUP_OPERATIONAL_MASK          0x0028
+#define QUP_OP_MASK_OUTPUT_SERVICE_FLAG 0x100
+#define QUP_OP_MASK_INPUT_SERVICE_FLAG  0x200
+
 #define QUP_ERROR_FLAGS               0x0308
 
 #define SPI_CONFIG                    QSD_REG(0x0000) QUP_REG(0x0300)
@@ -73,6 +78,7 @@
 #define SPI_NO_OUTPUT                 0x00000040
 #define SPI_CFG_LOOPBACK              0x00000100
 #define SPI_CFG_N                     0x0000001F
+#define SPI_EN_EXT_OUT_FLAG           0x00010000
 
 /* SPI_IO_CONTROL fields */
 #define SPI_IO_C_FORCE_CS             0x00000800
@@ -148,8 +154,18 @@
 /* Data Mover commands should be aligned to 64 bit(8 bytes) */
 #define DM_BYTE_ALIGN                 8
 
-#define SPI_QUP_VERSION_NONE      0x0
-#define SPI_QUP_VERSION_BFAM      0x2
+enum msm_spi_qup_version {
+	SPI_QUP_VERSION_NONE    = 0x0,
+	SPI_QUP_VERSION_BFAM    = 0x2,
+};
+
+enum msm_spi_pipe_direction {
+	SPI_BAM_CONSUMER_PIPE   = 0x0,
+	SPI_BAM_PRODUCER_PIPE   = 0x1,
+};
+
+#define SPI_BAM_MAX_DESC_NUM      32
+#define SPI_MAX_TRFR_BTWN_RESETS  ((64 * 1024) - 16)  /* 64KB - 16byte */
 
 static char const * const spi_rsrcs[] = {
 	"spi_clk",
@@ -231,6 +247,22 @@
 };
 #endif
 
+struct msm_spi_bam_pipe {
+	struct sps_pipe         *handle;
+	struct sps_connect       config;
+	bool                     teardown_required;
+};
+
+struct msm_spi_bam {
+	void __iomem            *base;
+	u32                      phys_addr;
+	u32                      handle;
+	u32                      irq;
+	struct msm_spi_bam_pipe  prod;
+	struct msm_spi_bam_pipe  cons;
+	bool                     deregister_required;
+};
+
 struct msm_spi {
 	u8                      *read_buf;
 	const u8                *write_buf;
@@ -244,8 +276,8 @@
 	struct spi_message      *cur_msg;
 	struct spi_transfer     *cur_transfer;
 	struct completion        transfer_complete;
-	struct clk              *clk;
-	struct clk              *pclk;
+	struct clk              *clk;    /* core clock */
+	struct clk              *pclk;   /* interface clock */
 	unsigned long            mem_phys_addr;
 	size_t                   mem_size;
 	int                      input_fifo_size;
@@ -273,6 +305,9 @@
 	int                      tx_dma_crci;
 	int                      rx_dma_chan;
 	int                      rx_dma_crci;
+	int                      (*dma_init) (struct msm_spi *dd);
+	void                     (*dma_teardown) (struct msm_spi *dd);
+	struct msm_spi_bam       bam;
 	/* Data Mover Commands */
 	struct spi_dmov_cmd      *tx_dmov_cmd;
 	struct spi_dmov_cmd      *rx_dmov_cmd;
@@ -321,7 +356,7 @@
 	int                      spi_gpios[ARRAY_SIZE(spi_rsrcs)];
 	/* SPI CS GPIOs for each slave */
 	struct spi_cs_gpio       cs_gpios[ARRAY_SIZE(spi_cs_rsrcs)];
-	int                      qup_ver;
+	enum msm_spi_qup_version qup_ver;
 	int			 max_trfr_len;
 };
 
@@ -333,7 +368,7 @@
 				    enum msm_spi_state state);
 static void msm_spi_write_word_to_fifo(struct msm_spi *dd);
 static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd);
-static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id);
+static irqreturn_t msm_spi_qup_irq(int irq, void *dev_id);
 
 #if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
 static inline void msm_spi_disable_irqs(struct msm_spi *dd)
@@ -385,7 +420,7 @@
 static inline void msm_spi_ack_clk_err(struct msm_spi *dd) {}
 static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw) {}
 
-static inline int msm_spi_prepare_for_write(struct msm_spi *dd) { return 0; }
+static inline int  msm_spi_prepare_for_write(struct msm_spi *dd) { return 0; }
 static inline void msm_spi_start_write(struct msm_spi *dd, u32 read_count)
 {
 	msm_spi_write_word_to_fifo(dd);
@@ -441,16 +476,18 @@
 	writel_relaxed(QUP_ERR_MASK, dd->base + QUP_ERROR_FLAGS);
 }
 
-static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n);
+static inline void
+msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n);
 
-/* QUP has no_input, no_output, and N bits at QUP_CONFIG */
+/**
+ * msm_spi_set_qup_config: set QUP_CONFIG to no_input, no_output, and N bits
+ */
 static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw)
 {
 	u32 qup_config = readl_relaxed(dd->base + QUP_CONFIG);
 
-	msm_spi_add_configs(dd, &qup_config, bpw-1);
-	writel_relaxed(qup_config | QUP_CONFIG_SPI_MODE,
-		       dd->base + QUP_CONFIG);
+	msm_spi_set_bpw_and_no_io_flags(dd, &qup_config, bpw-1);
+	writel_relaxed(qup_config | QUP_CONFIG_SPI_MODE, dd->base + QUP_CONFIG);
 }
 
 static inline int msm_spi_prepare_for_write(struct msm_spi *dd)
@@ -482,12 +519,22 @@
 
 static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
 {
-	writel_relaxed(0x00000078, dd->base + SPI_ERROR_FLAGS_EN);
+	if (dd->qup_ver == SPI_QUP_VERSION_BFAM)
+		writel_relaxed(
+			SPI_ERR_CLK_UNDER_RUN_ERR | SPI_ERR_CLK_OVER_RUN_ERR,
+			dd->base + SPI_ERROR_FLAGS_EN);
+	else
+		writel_relaxed(0x00000078, dd->base + SPI_ERROR_FLAGS_EN);
 }
 
 static inline void msm_spi_clear_error_flags(struct msm_spi *dd)
 {
-	writel_relaxed(0x0000007C, dd->base + SPI_ERROR_FLAGS);
+	if (dd->qup_ver == SPI_QUP_VERSION_BFAM)
+		writel_relaxed(
+			SPI_ERR_CLK_UNDER_RUN_ERR | SPI_ERR_CLK_OVER_RUN_ERR,
+			dd->base + SPI_ERROR_FLAGS);
+	else
+		writel_relaxed(0x0000007C, dd->base + SPI_ERROR_FLAGS);
 }
 
 #endif
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
index becd823..2161fac 100644
--- a/drivers/spmi/Makefile
+++ b/drivers/spmi/Makefile
@@ -4,3 +4,7 @@
 obj-$(CONFIG_SPMI)			+= spmi.o spmi-resources.o
 obj-$(CONFIG_SPMI_MSM_PMIC_ARB)		+= spmi-pmic-arb.o
 obj-$(CONFIG_MSM_QPNP_INT)		+= qpnp-int.o
+
+ifdef CONFIG_DEBUG_FS
+obj-$(CONFIG_SPMI)			+= spmi-dbgfs.o
+endif
diff --git a/drivers/spmi/spmi-dbgfs.c b/drivers/spmi/spmi-dbgfs.c
new file mode 100644
index 0000000..a23f945
--- /dev/null
+++ b/drivers/spmi/spmi-dbgfs.c
@@ -0,0 +1,725 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * SPMI Debug-fs support.
+ *
+ * Hierarchy schema:
+ * /sys/kernel/debug/spmi
+ *        /help			-- static help text
+ *        /spmi-0
+ *        /spmi-0/address	-- Starting register address for reads or writes
+ *        /spmi-0/count		-- number of registers to read (only on read)
+ *        /spmi-0/data		-- Triggers the SPMI formatted read.
+ *        /spmi-0/data_raw	-- Triggers the SPMI raw read or write
+ *        /spmi-#
+ */
+
+#define DEBUG
+#define pr_fmt(fmt) "%s:%d: " fmt, __func__, __LINE__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/ctype.h>
+
+#define ADDR_LEN	 6	/* 5 byte address + 1 space character */
+#define CHARS_PER_ITEM   3	/* Format is 'XX ' */
+#define ITEMS_PER_LINE	16	/* 16 data items per line */
+#define MAX_LINE_LENGTH  (ADDR_LEN + (ITEMS_PER_LINE * CHARS_PER_ITEM) + 1)
+#define MAX_REG_PER_TRANSACTION	(8)
+
+static const char *DFS_ROOT_NAME	= "spmi";
+static const mode_t DFS_MODE = S_IRUSR | S_IWUSR;
+
+/* Log buffer */
+struct spmi_log_buffer {
+	u32  rpos;	/* Current 'read' position in buffer */
+	u32  wpos;	/* Current 'write' position in buffer */
+	u32  len;	/* Length of the buffer */
+	char data[0];	/* Log buffer */
+};
+
+/* SPMI controller specific data */
+struct spmi_ctrl_data {
+	u32 cnt;
+	u32 addr;
+	struct list_head node;
+	struct spmi_controller *ctrl;
+};
+
+/* SPMI transaction parameters */
+struct spmi_trans {
+	u32 cnt;	/* Number of bytes to read */
+	u32 addr;	/* 20-bit address: SID + PID + Register offset */
+	u32 offset;	/* Offset of last read data */
+	bool raw_data;	/* Set to true for raw data dump */
+	struct spmi_controller *ctrl;
+	struct spmi_log_buffer *log; /* log buffer */
+};
+
+struct spmi_dbgfs {
+	struct dentry *root;
+	struct mutex  lock;
+	struct list_head ctrl; /* List of spmi_ctrl_data nodes */
+	struct debugfs_blob_wrapper help_msg;
+};
+
+static struct spmi_dbgfs dbgfs_data = {
+	.lock = __MUTEX_INITIALIZER(dbgfs_data.lock),
+	.ctrl = LIST_HEAD_INIT(dbgfs_data.ctrl),
+	.help_msg = {
+	.data =
+"SPMI Debug-FS support\n"
+"\n"
+"Hierarchy schema:\n"
+"/sys/kernel/debug/spmi\n"
+"       /help            -- Static help text\n"
+"       /spmi-0          -- Directory for SPMI bus 0\n"
+"       /spmi-0/address  -- Starting register address for reads or writes\n"
+"       /spmi-0/count    -- Number of registers to read (only used for reads)\n"
+"       /spmi-0/data     -- Initiates the SPMI read (formatted output)\n"
+"       /spmi-0/data_raw -- Initiates the SPMI raw read or write\n"
+"       /spmi-n          -- Directory for SPMI bus n\n"
+"\n"
+"To perform SPMI read or write transactions, you need to first write the\n"
+"address of the slave device register to the 'address' file.  For read\n"
+"transactions, the number of bytes to be read needs to be written to the\n"
+"'count' file.\n"
+"\n"
+"The 'address' file specifies the 20-bit address of a slave device register.\n"
+"The upper 4 bits 'address[19..16]' specify the slave identifier (SID) for\n"
+"the slave device.  The lower 16 bits specify the slave register address.\n"
+"\n"
+"Reading from the 'data' file will initiate a SPMI read transaction starting\n"
+"from slave register 'address' for 'count' number of bytes.\n"
+"\n"
+"Writing to the 'data' file will initiate a SPMI write transaction starting\n"
+"from slave register 'address'.  The number of registers written to will\n"
+"match the number of bytes written to the 'data' file.\n"
+"\n"
+"Example: Read 4 bytes starting at register address 0x1234 for SID 2\n"
+"\n"
+"echo 0x21234 > address\n"
+"echo 4 > count\n"
+"cat data\n"
+"\n"
+"Example: Write 3 bytes starting at register address 0x1008 for SID 1\n"
+"\n"
+"echo 0x11008 > address\n"
+"echo 0x01 0x02 0x03 > data\n"
+"\n"
+"Note that the count file is not used for writes.  Since 3 bytes are\n"
+"written to the 'data' file, then 3 bytes will be written across the\n"
+"SPMI bus.\n\n",
+	},
+};
+
+static int spmi_dfs_open(struct spmi_ctrl_data *ctrl_data, struct file *file)
+{
+	struct spmi_log_buffer *log;
+	struct spmi_trans *trans;
+
+	size_t logbufsize = SZ_4K;
+
+	if (!ctrl_data) {
+		pr_err("No SPMI controller data\n");
+		return -EINVAL;
+	}
+
+	/* Per file "transaction" data */
+	trans = kzalloc(sizeof(*trans), GFP_KERNEL);
+
+	if (!trans) {
+		pr_err("Unable to allocate memory for transaction data\n");
+		return -ENOMEM;
+	}
+
+	/* Allocate log buffer */
+	log = kzalloc(logbufsize, GFP_KERNEL);
+
+	if (!log) {
+		kfree(trans);
+		pr_err("Unable to allocate memory for log buffer\n");
+		return -ENOMEM;
+	}
+
+	log->rpos = 0;
+	log->wpos = 0;
+	log->len = logbufsize - sizeof(*log);
+
+	trans->log = log;
+	trans->cnt = ctrl_data->cnt;
+	trans->addr = ctrl_data->addr;
+	trans->ctrl = ctrl_data->ctrl;
+	trans->offset = trans->addr;
+
+	file->private_data = trans;
+	return 0;
+}
+
+static int spmi_dfs_data_open(struct inode *inode, struct file *file)
+{
+	struct spmi_ctrl_data *ctrl_data = inode->i_private;
+	return spmi_dfs_open(ctrl_data, file);
+}
+
+static int spmi_dfs_raw_data_open(struct inode *inode, struct file *file)
+{
+	int rc;
+	struct spmi_trans *trans;
+	struct spmi_ctrl_data *ctrl_data = inode->i_private;
+
+	rc = spmi_dfs_open(ctrl_data, file);
+	trans = file->private_data;
+	trans->raw_data = true;
+	return rc;
+}
+
+static int spmi_dfs_close(struct inode *inode, struct file *file)
+{
+	struct spmi_trans *trans = file->private_data;
+
+	if (trans && trans->log) {
+		file->private_data = NULL;
+		kfree(trans->log);
+		kfree(trans);
+	}
+
+	return 0;
+}
+
+/**
+ * spmi_read_data: reads data across the SPMI bus
+ * @ctrl: The SPMI controller
+ * @buf: buffer to store the data read.
+ * @offset: SPMI address offset to start reading from.
+ * @cnt: The number of bytes to read.
+ *
+ * Returns 0 on success, otherwise returns error code from SPMI driver.
+ */
+static int
+spmi_read_data(struct spmi_controller *ctrl, uint8_t *buf, int offset, int cnt)
+{
+	int ret = 0;
+	int len;
+	uint8_t sid;
+	uint16_t addr;
+
+	while (cnt > 0) {
+		sid = (offset >> 16) & 0xF;
+		addr = offset & 0xFFFF;
+		len = min(cnt, MAX_REG_PER_TRANSACTION);
+
+		ret = spmi_ext_register_readl(ctrl, sid, addr, buf, len);
+		if (ret < 0) {
+			pr_err("SPMI read failed, err = %d\n", ret);
+			goto done;
+		}
+
+		cnt -= len;
+		buf += len;
+		offset += len;
+	}
+
+done:
+	return ret;
+}
+
+/**
+ * spmi_write_data: writes data across the SPMI bus
+ * @ctrl: The SPMI controller
+ * @buf: data to be written.
+ * @offset: SPMI address offset to start writing to.
+ * @cnt: The number of bytes to write.
+ *
+ * Returns 0 on success, otherwise returns error code from SPMI driver.
+ */
+static int
+spmi_write_data(struct spmi_controller *ctrl, uint8_t *buf, int offset, int cnt)
+{
+	int ret = 0;
+	int len;
+	uint8_t sid;
+	uint16_t addr;
+
+	while (cnt > 0) {
+		sid = (offset >> 16) & 0xF;
+		addr = offset & 0xFFFF;
+		len = min(cnt, MAX_REG_PER_TRANSACTION);
+
+		ret = spmi_ext_register_writel(ctrl, sid, addr, buf, len);
+		if (ret < 0) {
+			pr_err("SPMI write failed, err = %d\n", ret);
+			goto done;
+		}
+
+		cnt -= len;
+		buf += len;
+		offset += len;
+	}
+
+done:
+	return ret;
+}
+
+/**
+ * print_to_log: format a string and place into the log buffer
+ * @log: The log buffer to place the result into.
+ * @fmt: The format string to use.
+ * @...: The arguments for the format string.
+ *
+ * The return value is the number of characters written to @log buffer
+ * not including the trailing '\0'.
+ */
+static int print_to_log(struct spmi_log_buffer *log, const char *fmt, ...)
+{
+	va_list args;
+	int cnt;
+	char *buf = &log->data[log->wpos];
+	size_t size = log->len - log->wpos;
+
+	va_start(args, fmt);
+	cnt = vscnprintf(buf, size, fmt, args);
+	va_end(args);
+
+	log->wpos += cnt;
+	return cnt;
+}
+
+/**
+ * write_next_line_to_log: Writes a single "line" of data into the log buffer
+ * @trans: Pointer to SPMI transaction data.
+ * @offset: SPMI address offset to start reading from.
+ * @pcnt: Pointer to 'cnt' variable.  Indicates the number of bytes to read.
+ *
+ * The 'offset' is a 20-bits SPMI address which includes a 4-bit slave id (SID),
+ * an 8-bit peripheral id (PID), and an 8-bit peripheral register address.
+ *
+ * On a successful read, the pcnt is decremented by the number of data
+ * bytes read across the SPMI bus.  When the cnt reaches 0, all requested
+ * bytes have been read.
+ */
+static int
+write_next_line_to_log(struct spmi_trans *trans, int offset, size_t *pcnt)
+{
+	int i, j;
+	u8  data[ITEMS_PER_LINE];
+	struct spmi_log_buffer *log = trans->log;
+
+	int cnt = 0;
+	int padding = offset % ITEMS_PER_LINE;
+	int items_to_read = min(ARRAY_SIZE(data) - padding, *pcnt);
+	int items_to_log = min(ITEMS_PER_LINE, padding + items_to_read);
+
+	/* Buffer needs enough space for an entire line */
+	if ((log->len - log->wpos) < MAX_LINE_LENGTH)
+		goto done;
+
+	/* Read the desired number of "items" */
+	if (spmi_read_data(trans->ctrl, data, offset, items_to_read))
+		goto done;
+
+	*pcnt -= items_to_read;
+
+	/* Each line starts with the aligned offset (20-bit address) */
+	cnt = print_to_log(log, "%5.5X ", offset & 0xffff0);
+	if (cnt == 0)
+		goto done;
+
+	/* If the offset is unaligned, add padding to right justify items */
+	for (i = 0; i < padding; ++i) {
+		cnt = print_to_log(log, "-- ");
+		if (cnt == 0)
+			goto done;
+	}
+
+	/* Log the data items */
+	for (j = 0; i < items_to_log; ++i, ++j) {
+		cnt = print_to_log(log, "%2.2X ", data[j]);
+		if (cnt == 0)
+			goto done;
+	}
+
+	/* If the last character was a space, then replace it with a newline */
+	if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+		log->data[log->wpos - 1] = '\n';
+
+done:
+	return cnt;
+}
+
+/**
+ * write_raw_data_to_log: Writes a single "line" of data into the log buffer
+ * @trans: Pointer to SPMI transaction data.
+ * @offset: SPMI address offset to start reading from.
+ * @pcnt: Pointer to 'cnt' variable.  Indicates the number of bytes to read.
+ *
+ * The 'offset' is a 20-bits SPMI address which includes a 4-bit slave id (SID),
+ * an 8-bit peripheral id (PID), and an 8-bit peripheral register address.
+ *
+ * On a successful read, the pcnt is decremented by the number of data
+ * bytes read across the SPMI bus.  When the cnt reaches 0, all requested
+ * bytes have been read.
+ */
+static int
+write_raw_data_to_log(struct spmi_trans *trans, int offset, size_t *pcnt)
+{
+	u8  data[16];
+	struct spmi_log_buffer *log = trans->log;
+
+	int i;
+	int cnt = 0;
+	int items_to_read = min(ARRAY_SIZE(data), *pcnt);
+
+	/* Buffer needs enough space for an entire line */
+	if ((log->len - log->wpos) < 80)
+		goto done;
+
+	/* Read the desired number of "items" */
+	if (spmi_read_data(trans->ctrl, data, offset, items_to_read))
+		goto done;
+
+	*pcnt -= items_to_read;
+
+	/* Log the data items */
+	for (i = 0; i < items_to_read; ++i) {
+		cnt = print_to_log(log, "0x%2.2X ", data[i]);
+		if (cnt == 0)
+			goto done;
+	}
+
+	/* If the last character was a space, then replace it with a newline */
+	if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+		log->data[log->wpos - 1] = '\n';
+
+done:
+	return cnt;
+}
+
+/**
+ * get_log_data - reads data across the SPMI bus and saves to the log buffer
+ * @trans: Pointer to SPMI transaction data.
+ *
+ * Returns the number of "items" read or SPMI error code for read failures.
+ */
+static int get_log_data(struct spmi_trans *trans)
+{
+	int cnt;
+	int last_cnt;
+	int items_read;
+	int total_items_read = 0;
+	u32 offset = trans->offset;
+	size_t item_cnt = trans->cnt;
+	struct spmi_log_buffer *log = trans->log;
+	int (*write_to_log)(struct spmi_trans *, int, size_t *);
+
+	if (item_cnt == 0)
+		return 0;
+
+	if (trans->raw_data)
+		write_to_log = write_raw_data_to_log;
+	else
+		write_to_log = write_next_line_to_log;
+
+	/* Reset the log buffer 'pointers' */
+	log->wpos = log->rpos = 0;
+
+	/* Keep reading data until the log is full */
+	do {
+		last_cnt = item_cnt;
+		cnt = write_to_log(trans, offset, &item_cnt);
+		items_read = last_cnt - item_cnt;
+		offset += items_read;
+		total_items_read += items_read;
+	} while (cnt && item_cnt > 0);
+
+	/* Adjust the transaction offset and count */
+	trans->cnt = item_cnt;
+	trans->offset += total_items_read;
+
+	return total_items_read;
+}
+
+/**
+ * spmi_dfs_reg_write: write user's byte array (coded as string) over SPMI.
+ * @file: file pointer
+ * @buf: user data to be written.
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user byte written, or negative error value
+ */
+static ssize_t spmi_dfs_reg_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	int bytes_read;
+	int data;
+	int pos = 0;
+	int cnt = 0;
+	u8  *values;
+	size_t ret = 0;
+
+	struct spmi_trans *trans = file->private_data;
+	u32 offset = trans->offset;
+
+	/* Make a copy of the user data */
+	char *kbuf = kmalloc(count + 1, GFP_KERNEL);
+	if (!kbuf)
+		return -ENOMEM;
+
+	ret = copy_from_user(kbuf, buf, count);
+	if (ret == count) {
+		pr_err("failed to copy data from user\n");
+		ret = -EFAULT;
+		goto free_buf;
+	}
+
+	count -= ret;
+	*ppos += count;
+	kbuf[count] = '\0';
+
+	/* Override the text buffer with the raw data */
+	values = kbuf;
+
+	/* Parse the data in the buffer.  It should be a string of numbers */
+	while (sscanf(kbuf + pos, "%i%n", &data, &bytes_read) == 1) {
+		pos += bytes_read;
+		values[cnt++] = data & 0xff;
+	}
+
+	if (!cnt)
+		goto free_buf;
+
+	/* Perform the SPMI write(s) */
+	ret = spmi_write_data(trans->ctrl, values, offset, cnt);
+
+	if (ret) {
+		pr_err("SPMI write failed, err = %zu\n", ret);
+	} else {
+		ret = count;
+		trans->offset += cnt;
+	}
+
+free_buf:
+	kfree(kbuf);
+	return ret;
+}
+
+/**
+ * spmi_dfs_reg_read: reads value(s) over SPMI and fill user's buffer a
+ *  byte array (coded as string)
+ * @file: file pointer
+ * @buf: where to put the result
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user bytes read, or negative error value
+ */
+static ssize_t spmi_dfs_reg_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	struct spmi_trans *trans = file->private_data;
+	struct spmi_log_buffer *log = trans->log;
+	size_t ret;
+	size_t len;
+
+	/* Is the the log buffer empty */
+	if (log->rpos >= log->wpos) {
+		if (get_log_data(trans) <= 0)
+			return 0;
+	}
+
+	len = min(count, log->wpos - log->rpos);
+
+	ret = copy_to_user(buf, &log->data[log->rpos], len);
+	if (ret == len) {
+		pr_err("error copy SPMI register values to user\n");
+		return -EFAULT;
+	}
+
+	/* 'ret' is the number of bytes not copied */
+	len -= ret;
+
+	*ppos += len;
+	log->rpos += len;
+	return len;
+}
+
+static const struct file_operations spmi_dfs_reg_fops = {
+	.open		= spmi_dfs_data_open,
+	.release	= spmi_dfs_close,
+	.read		= spmi_dfs_reg_read,
+	.write		= spmi_dfs_reg_write,
+};
+
+static const struct file_operations spmi_dfs_raw_data_fops = {
+	.open		= spmi_dfs_raw_data_open,
+	.release	= spmi_dfs_close,
+	.read		= spmi_dfs_reg_read,
+	.write		= spmi_dfs_reg_write,
+};
+
+/**
+ * spmi_dfs_create_fs: create debugfs file system.
+ * @return pointer to root directory or NULL if failed to create fs
+ */
+static struct dentry *spmi_dfs_create_fs(void)
+{
+	struct dentry *root, *file;
+
+	pr_debug("Creating SPMI debugfs file-system at\n");
+	root = debugfs_create_dir(DFS_ROOT_NAME, NULL);
+	if (IS_ERR(root)) {
+		pr_err("Error creating top level directory err:%ld",
+			(long)root);
+		if ((int)root == -ENODEV)
+			pr_err("debugfs is not enabled in the kernel");
+		return NULL;
+	}
+
+	dbgfs_data.help_msg.size = strlen(dbgfs_data.help_msg.data);
+
+	file = debugfs_create_blob("help", S_IRUGO, root, &dbgfs_data.help_msg);
+	if (!file) {
+		pr_err("error creating help entry\n");
+		goto err_remove_fs;
+	}
+	return root;
+
+err_remove_fs:
+	debugfs_remove_recursive(root);
+	return NULL;
+}
+
+/**
+ * spmi_dfs_get_root: return a pointer to SPMI debugfs root directory.
+ * @brief return a pointer to the existing directory, or if no root
+ * directory exists then create one. Directory is created with file that
+ * configures SPMI transaction, namely: sid, address, and count.
+ * @returns valid pointer on success or NULL
+ */
+struct dentry *spmi_dfs_get_root(void)
+{
+	if (dbgfs_data.root)
+		return dbgfs_data.root;
+
+	if (mutex_lock_interruptible(&dbgfs_data.lock) < 0)
+		return NULL;
+	/* critical section */
+	if (!dbgfs_data.root) { /* double checking idiom */
+		dbgfs_data.root = spmi_dfs_create_fs();
+	}
+	mutex_unlock(&dbgfs_data.lock);
+	return dbgfs_data.root;
+}
+
+/*
+ * spmi_dfs_add_controller: adds new spmi controller entry
+ * @return zero on success
+ */
+int spmi_dfs_add_controller(struct spmi_controller *ctrl)
+{
+	struct dentry *dir;
+	struct dentry *root;
+	struct dentry *file;
+	struct spmi_ctrl_data *ctrl_data;
+
+	pr_debug("Adding controller %s\n", ctrl->dev.kobj.name);
+	root = spmi_dfs_get_root();
+	if (!root)
+		return -ENOENT;
+
+	/* Allocate transaction data for the controller */
+	ctrl_data = kzalloc(sizeof(*ctrl_data), GFP_KERNEL);
+	if (!ctrl_data)
+		return -ENOMEM;
+
+	dir = debugfs_create_dir(ctrl->dev.kobj.name, root);
+	if (!dir) {
+		pr_err("Error creating entry for spmi controller %s\n",
+						ctrl->dev.kobj.name);
+		goto err_create_dir_failed;
+	}
+
+	ctrl_data->cnt  = 1;
+	ctrl_data->ctrl = ctrl;
+
+	file = debugfs_create_u32("count", DFS_MODE, dir, &ctrl_data->cnt);
+	if (!file) {
+		pr_err("error creating 'count' entry\n");
+		goto err_remove_fs;
+	}
+
+	file = debugfs_create_x32("address", DFS_MODE, dir, &ctrl_data->addr);
+	if (!file) {
+		pr_err("error creating 'address' entry\n");
+		goto err_remove_fs;
+	}
+
+	file = debugfs_create_file("data", DFS_MODE, dir, ctrl_data,
+							&spmi_dfs_reg_fops);
+	if (!file) {
+		pr_err("error creating 'data' entry\n");
+		goto err_remove_fs;
+	}
+
+	file = debugfs_create_file("data_raw", DFS_MODE, dir, ctrl_data,
+						&spmi_dfs_raw_data_fops);
+	if (!file) {
+		pr_err("error creating 'data' entry\n");
+		goto err_remove_fs;
+	}
+
+	list_add(&ctrl_data->node, &dbgfs_data.ctrl);
+	return 0;
+
+err_remove_fs:
+	debugfs_remove_recursive(dir);
+err_create_dir_failed:
+	kfree(ctrl_data);
+	return -ENOMEM;
+}
+
+static void __exit spmi_dfs_delete_all_ctrl(struct list_head *head)
+{
+	struct list_head *pos, *tmp;
+
+	list_for_each_safe(pos, tmp, head) {
+		struct spmi_ctrl_data *ctrl_data;
+
+		ctrl_data = list_entry(pos, struct spmi_ctrl_data, node);
+		list_del(pos);
+		kfree(ctrl_data);
+	}
+}
+
+static void __exit spmi_dfs_destroy(void)
+{
+	pr_debug("de-initializing spmi debugfs ...");
+	if (mutex_lock_interruptible(&dbgfs_data.lock) < 0)
+		return;
+	if (dbgfs_data.root) {
+		debugfs_remove_recursive(dbgfs_data.root);
+		dbgfs_data.root = NULL;
+		spmi_dfs_delete_all_ctrl(&dbgfs_data.ctrl);
+	}
+	mutex_unlock(&dbgfs_data.lock);
+}
+
+module_exit(spmi_dfs_destroy);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spmi_debug_fs");
diff --git a/drivers/spmi/spmi-dbgfs.h b/drivers/spmi/spmi-dbgfs.h
new file mode 100644
index 0000000..0baa4db
--- /dev/null
+++ b/drivers/spmi/spmi-dbgfs.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SPMI_DBGFS_H
+#define _SPMI_DBGFS_H
+
+#ifdef CONFIG_DEBUG_FS
+int spmi_dfs_add_controller(struct spmi_controller *ctrl);
+#else
+int spmi_dfs_add_controller(struct spmi_controller *ctrl) { return 0; }
+#endif
+
+#endif /* _SPMI_DBGFS_H */
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 914df95..ad58240 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -22,6 +22,8 @@
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
 
+#include "spmi-dbgfs.h"
+
 struct spmii_boardinfo {
 	struct list_head	list;
 	struct spmi_boardinfo	board_info;
@@ -755,6 +757,7 @@
 	list_add_tail(&ctrl->list, &spmi_ctrl_list);
 	mutex_unlock(&board_lock);
 
+	spmi_dfs_add_controller(ctrl);
 	return 0;
 
 exit:
diff --git a/drivers/thermal/pm8xxx-tm.c b/drivers/thermal/pm8xxx-tm.c
index ec04369..4568933 100644
--- a/drivers/thermal/pm8xxx-tm.c
+++ b/drivers/thermal/pm8xxx-tm.c
@@ -33,29 +33,32 @@
 #include <linux/msm_adc.h>
 
 /* Register TEMP_ALARM_CTRL bits */
-#define	TEMP_ALARM_CTRL_ST3_SD		0x80
-#define	TEMP_ALARM_CTRL_ST2_SD		0x40
-#define	TEMP_ALARM_CTRL_STATUS_MASK	0x30
-#define	TEMP_ALARM_CTRL_STATUS_SHIFT	4
-#define	TEMP_ALARM_CTRL_THRESH_MASK	0x0C
-#define	TEMP_ALARM_CTRL_THRESH_SHIFT	2
-#define	TEMP_ALARM_CTRL_OVRD_ST3	0x02
-#define	TEMP_ALARM_CTRL_OVRD_ST2	0x01
-#define	TEMP_ALARM_CTRL_OVRD_MASK	0x03
+#define TEMP_ALARM_CTRL_ST3_SD		0x80
+#define TEMP_ALARM_CTRL_ST2_SD		0x40
+#define TEMP_ALARM_CTRL_STATUS_MASK	0x30
+#define TEMP_ALARM_CTRL_STATUS_SHIFT	4
+#define TEMP_ALARM_CTRL_THRESH_MASK	0x0C
+#define TEMP_ALARM_CTRL_THRESH_SHIFT	2
+#define TEMP_ALARM_CTRL_OVRD_ST3	0x02
+#define TEMP_ALARM_CTRL_OVRD_ST2	0x01
+#define TEMP_ALARM_CTRL_OVRD_MASK	0x03
 
-#define	TEMP_STAGE_STEP			20000	/* Stage step: 20.000 C */
-#define	TEMP_STAGE_HYSTERESIS		2000
+#define TEMP_STAGE_STEP			20000	/* Stage step: 20.000 C */
+#define TEMP_STAGE_HYSTERESIS		2000
 
-#define	TEMP_THRESH_MIN			105000	/* Threshold Min: 105 C */
-#define	TEMP_THRESH_STEP		5000	/* Threshold step: 5 C */
+#define TEMP_THRESH_MIN			105000	/* Threshold Min: 105 C */
+#define TEMP_THRESH_STEP		5000	/* Threshold step: 5 C */
 
 /* Register TEMP_ALARM_PWM bits */
-#define	TEMP_ALARM_PWM_EN_MASK		0xC0
-#define	TEMP_ALARM_PWM_EN_SHIFT		6
-#define	TEMP_ALARM_PWM_PER_PRE_MASK	0x38
-#define	TEMP_ALARM_PWM_PER_PRE_SHIFT	3
-#define	TEMP_ALARM_PWM_PER_DIV_MASK	0x07
-#define	TEMP_ALARM_PWM_PER_DIV_SHIFT	0
+#define TEMP_ALARM_PWM_EN_MASK		0xC0
+#define TEMP_ALARM_PWM_EN_NEVER		0x00
+#define TEMP_ALARM_PWM_EN_SLEEP_B	0x40
+#define TEMP_ALARM_PWM_EN_PWM		0x80
+#define TEMP_ALARM_PWM_EN_ALWAYS	0xC0
+#define TEMP_ALARM_PWM_PER_PRE_MASK	0x38
+#define TEMP_ALARM_PWM_PER_PRE_SHIFT	3
+#define TEMP_ALARM_PWM_PER_DIV_MASK	0x07
+#define TEMP_ALARM_PWM_PER_DIV_SHIFT	0
 
 /* Trips: from critical to less critical */
 #define TRIP_STAGE3			0
@@ -516,16 +519,15 @@
 		return rc;
 
 	/*
-	 * Set the PMIC alarm module PWM to have a frequency of 8 Hz. This
-	 * helps cut down on the number of unnecessary interrupts fired when
-	 * changing between thermal stages.  Also, Enable the over temperature
-	 * PWM whenever the PMIC is enabled.
+	 * Set the PMIC temperature alarm module to be always on.  This ensures
+	 * that die temperature monitoring is active even if CXO is disabled
+	 * (i.e. when sleep_b is low).  This is necessary since CXO can be
+	 * disabled while the system is still heavily loaded.  Also, using
+	 * the alway-on instead of PWM-enabled configurations ensures that the
+	 * die temperature can be measured by the PMIC ADC without reconfiguring
+	 * the temperature alarm module first.
 	 */
-	reg =  (1 << TEMP_ALARM_PWM_EN_SHIFT)
-		| (3 << TEMP_ALARM_PWM_PER_PRE_SHIFT)
-		| (3 << TEMP_ALARM_PWM_PER_DIV_SHIFT);
-
-	rc = pm8xxx_tm_write_pwm(chip, reg);
+	rc = pm8xxx_tm_write_pwm(chip, TEMP_ALARM_PWM_EN_ALWAYS);
 
 	return rc;
 }
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 84cd3e7..7a0e32b 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -391,8 +391,6 @@
 
 	struct msm_hs_port *msm_uport;
 	struct device *dev;
-	struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
-
 
 	if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
 		printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id);
@@ -402,10 +400,6 @@
 	msm_uport = &q_uart_port[pdev->id];
 	dev = msm_uport->uport.dev;
 
-	if (pdata && pdata->gpio_config)
-		if (pdata->gpio_config(0))
-			dev_err(dev, "GPIO config error\n");
-
 	sysfs_remove_file(&pdev->dev.kobj, &dev_attr_clock.attr);
 	debugfs_remove(msm_uport->loopback_dir);
 
@@ -1646,6 +1640,9 @@
 	unsigned long flags;
 	unsigned int data;
 	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct platform_device *pdev = to_platform_device(uport->dev);
+	const struct msm_serial_hs_platform_data *pdata =
+					pdev->dev.platform_data;
 	struct circ_buf *tx_buf = &uport->state->xmit;
 	struct msm_hs_tx *tx = &msm_uport->tx;
 
@@ -1665,6 +1662,10 @@
 		return ret;
 	}
 
+	if (pdata && pdata->gpio_config)
+		if (unlikely(pdata->gpio_config(1)))
+			dev_err(uport->dev, "Cannot configure gpios\n");
+
 	/* Set auto RFR Level */
 	data = msm_hs_read(uport, UARTDM_MR1_ADDR);
 	data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
@@ -1945,10 +1946,6 @@
 		if (unlikely(msm_uport->wakeup.irq < 0))
 			return -ENXIO;
 
-		if (pdata->gpio_config)
-			if (unlikely(pdata->gpio_config(1)))
-				dev_err(uport->dev, "Cannot configure"
-					"gpios\n");
 	}
 
 	resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
@@ -2086,6 +2083,9 @@
 	unsigned int data;
 	unsigned long flags;
 	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct platform_device *pdev = to_platform_device(uport->dev);
+	const struct msm_serial_hs_platform_data *pdata =
+				pdev->dev.platform_data;
 
 	if (msm_uport->tx.dma_in_flight) {
 		spin_lock_irqsave(&uport->lock, flags);
@@ -2148,6 +2148,10 @@
 	free_irq(uport->irq, msm_uport);
 	if (use_low_power_wakeup(msm_uport))
 		free_irq(msm_uport->wakeup.irq, msm_uport);
+
+	if (pdata && pdata->gpio_config)
+		if (pdata->gpio_config(0))
+			dev_err(uport->dev, "GPIO config error\n");
 }
 
 static void __exit msm_serial_hs_exit(void)
diff --git a/drivers/tty/serial/msm_serial_hs_lite.c b/drivers/tty/serial/msm_serial_hs_lite.c
index 2f3f83d..cc9ffaa 100644
--- a/drivers/tty/serial/msm_serial_hs_lite.c
+++ b/drivers/tty/serial/msm_serial_hs_lite.c
@@ -1369,9 +1369,12 @@
 	else
 		line = pdev->id;
 
-	/* Use line number from device tree if present */
-	if (pdev->dev.of_node)
-		of_property_read_u32(pdev->dev.of_node, "cell-index", &line);
+	/* Use line number from device tree alias if present */
+	if (pdev->dev.of_node) {
+		ret = of_alias_get_id(pdev->dev.of_node, "serial");
+		if (ret >= 0)
+			line = ret;
+	}
 
 	if (unlikely(line < 0 || line >= UART_NR))
 		return -ENXIO;
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 4073fc8..7430e5a 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -36,6 +36,7 @@
 #include <linux/power_supply.h>
 
 #include <mach/rpm-regulator.h>
+#include <mach/rpm-regulator-smd.h>
 #include <mach/msm_xo.h>
 #include <mach/msm_bus.h>
 
@@ -141,8 +142,6 @@
 	struct regulator	*hsusb_vddcx;
 	struct regulator	*ssusb_1p8;
 	struct regulator	*ssusb_vddcx;
-	enum usb_vdd_type	ss_vdd_type;
-	enum usb_vdd_type	hs_vdd_type;
 	struct dwc3_ext_xceiv	ext_xceiv;
 	bool			resume_pending;
 	atomic_t                pm_suspended;
@@ -162,6 +161,9 @@
 	unsigned int		online;
 	unsigned int		host_mode;
 	unsigned int		current_max;
+	unsigned int		vdd_no_vol_level;
+	unsigned int		vdd_low_vol_level;
+	unsigned int		vdd_high_vol_level;
 	bool			vbus_active;
 };
 
@@ -177,23 +179,6 @@
 #define USB_SSPHY_1P8_VOL_MAX		1800000 /* uV */
 #define USB_SSPHY_1P8_HPM_LOAD		23000	/* uA */
 
-#define USB_PHY_VDD_DIG_VOL_NONE	0	/* uV */
-#define USB_PHY_VDD_DIG_VOL_MIN		1045000 /* uV */
-#define USB_PHY_VDD_DIG_VOL_MAX		1320000 /* uV */
-
-static const int vdd_val[VDD_TYPE_MAX][VDD_VAL_MAX] = {
-		{  /* VDD_CX CORNER Voting */
-			[VDD_NONE]	= RPM_VREG_CORNER_NONE,
-			[VDD_MIN]	= RPM_VREG_CORNER_NOMINAL,
-			[VDD_MAX]	= RPM_VREG_CORNER_HIGH,
-		},
-		{ /* VDD_CX Voltage Voting */
-			[VDD_NONE]	= USB_PHY_VDD_DIG_VOL_NONE,
-			[VDD_MIN]	= USB_PHY_VDD_DIG_VOL_MIN,
-			[VDD_MAX]	= USB_PHY_VDD_DIG_VOL_MAX,
-		},
-};
-
 static struct dwc3_msm *context;
 static u64 dwc3_msm_dma_mask = DMA_BIT_MASK(64);
 
@@ -858,12 +843,11 @@
 /* HSPHY */
 static int dwc3_hsusb_config_vddcx(int high)
 {
-	int min_vol, ret;
+	int min_vol, max_vol, ret;
 	struct dwc3_msm *dwc = context;
-	enum usb_vdd_type vdd_type = context->hs_vdd_type;
-	int max_vol = vdd_val[vdd_type][VDD_MAX];
 
-	min_vol = vdd_val[vdd_type][high ? VDD_MIN : VDD_NONE];
+	max_vol = dwc->vdd_high_vol_level;
+	min_vol = high ? dwc->vdd_low_vol_level : dwc->vdd_no_vol_level;
 	ret = regulator_set_voltage(dwc->hsusb_vddcx, min_vol, max_vol);
 	if (ret) {
 		dev_err(dwc->dev, "unable to set voltage for HSUSB_VDDCX\n");
@@ -983,12 +967,11 @@
 /* SSPHY */
 static int dwc3_ssusb_config_vddcx(int high)
 {
-	int min_vol, ret;
+	int min_vol, max_vol, ret;
 	struct dwc3_msm *dwc = context;
-	enum usb_vdd_type vdd_type = context->ss_vdd_type;
-	int max_vol = vdd_val[vdd_type][VDD_MAX];
 
-	min_vol = vdd_val[vdd_type][high ? VDD_MIN : VDD_NONE];
+	max_vol = dwc->vdd_high_vol_level;
+	min_vol = high ? dwc->vdd_low_vol_level : dwc->vdd_no_vol_level;
 	ret = regulator_set_voltage(dwc->ssusb_vddcx, min_vol, max_vol);
 	if (ret) {
 		dev_err(dwc->dev, "unable to set voltage for SSUSB_VDDCX\n");
@@ -1250,6 +1233,14 @@
 		return 0;
 	}
 
+	if (cancel_delayed_work_sync(&mdwc->chg_work))
+		dev_dbg(mdwc->dev, "%s: chg_work was pending\n", __func__);
+	if (mdwc->chg_state != USB_CHG_STATE_DETECTED) {
+		/* charger detection wasn't complete; re-init flags */
+		mdwc->chg_state = USB_CHG_STATE_UNDEFINED;
+		mdwc->charger.chg_type = DWC3_INVALID_CHARGER;
+	}
+
 	/* Sequence to put hardware in low power state:
 	 * 1. Set OTGDISABLE to disable OTG block in HSPHY (saves power)
 	 * 2. Clear charger detection control fields
@@ -1615,6 +1606,8 @@
 	struct resource *res;
 	void __iomem *tcsr;
 	int ret = 0;
+	int len = 0;
+	u32 tmp[3];
 
 	msm = devm_kzalloc(&pdev->dev, sizeof(*msm), GFP_KERNEL);
 	if (!msm) {
@@ -1689,19 +1682,26 @@
 	}
 	clk_prepare_enable(msm->ref_clk);
 
+
+	of_get_property(node, "qcom,vdd-voltage-level", &len);
+	if (len == sizeof(tmp)) {
+		of_property_read_u32_array(node, "qcom,vdd-voltage-level",
+							tmp, len/sizeof(*tmp));
+		msm->vdd_no_vol_level = tmp[0];
+		msm->vdd_low_vol_level = tmp[1];
+		msm->vdd_high_vol_level = tmp[2];
+	} else {
+		dev_err(&pdev->dev, "no qcom,vdd-voltage-level property\n");
+		ret = -EINVAL;
+		goto disable_ref_clk;
+	}
+
 	/* SS PHY */
-	msm->ss_vdd_type = VDDCX_CORNER;
 	msm->ssusb_vddcx = devm_regulator_get(&pdev->dev, "ssusb_vdd_dig");
 	if (IS_ERR(msm->ssusb_vddcx)) {
-		msm->ssusb_vddcx = devm_regulator_get(&pdev->dev,
-							"SSUSB_VDDCX");
-		if (IS_ERR(msm->ssusb_vddcx)) {
-			dev_err(&pdev->dev, "unable to get ssusb vddcx\n");
-			ret = PTR_ERR(msm->ssusb_vddcx);
-			goto disable_ref_clk;
-		}
-		msm->ss_vdd_type = VDDCX;
-		dev_dbg(&pdev->dev, "ss_vdd_type: VDDCX\n");
+		dev_err(&pdev->dev, "unable to get ssusb vddcx\n");
+		ret = PTR_ERR(msm->ssusb_vddcx);
+		goto disable_ref_clk;
 	}
 
 	ret = dwc3_ssusb_config_vddcx(1);
@@ -1729,18 +1729,11 @@
 	}
 
 	/* HS PHY */
-	msm->hs_vdd_type = VDDCX_CORNER;
 	msm->hsusb_vddcx = devm_regulator_get(&pdev->dev, "hsusb_vdd_dig");
 	if (IS_ERR(msm->hsusb_vddcx)) {
-		msm->hsusb_vddcx = devm_regulator_get(&pdev->dev,
-							"HSUSB_VDDCX");
-		if (IS_ERR(msm->hsusb_vddcx)) {
-			dev_err(&pdev->dev, "unable to get hsusb vddcx\n");
-			ret = PTR_ERR(msm->ssusb_vddcx);
-			goto disable_ss_ldo;
-		}
-		msm->hs_vdd_type = VDDCX;
-		dev_dbg(&pdev->dev, "hs_vdd_type: VDDCX\n");
+		dev_err(&pdev->dev, "unable to get hsusb vddcx\n");
+		ret = PTR_ERR(msm->hsusb_vddcx);
+		goto disable_ss_ldo;
 	}
 
 	ret = dwc3_hsusb_config_vddcx(1);
diff --git a/drivers/usb/dwc3/dwc3_otg.c b/drivers/usb/dwc3/dwc3_otg.c
index 7b672c4..fab443c 100644
--- a/drivers/usb/dwc3/dwc3_otg.c
+++ b/drivers/usb/dwc3/dwc3_otg.c
@@ -640,14 +640,9 @@
 				}
 			}
 		} else {
-			if (charger) {
-				if (charger->chg_type == DWC3_INVALID_CHARGER)
-					charger->start_detection(dotg->charger,
-									false);
-				else
-					charger->chg_type =
-							DWC3_INVALID_CHARGER;
-			}
+			if (charger)
+				charger->start_detection(dotg->charger, false);
+
 			dwc3_otg_set_power(phy, 0);
 			dev_dbg(phy->dev, "No device, trying to suspend\n");
 			pm_runtime_put_sync(phy->dev);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 6f903dd..3679191 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1527,6 +1527,13 @@
 		} else {
 			ret = dwc3_gadget_run_stop(dwc, 0);
 		}
+	} else if (dwc->gadget_driver && !dwc->softconnect &&
+						!dwc->vbus_active) {
+		if (dwc->gadget_driver->disconnect) {
+			spin_unlock_irqrestore(&dwc->lock, flags);
+			dwc->gadget_driver->disconnect(&dwc->gadget);
+			return 0;
+		}
 	}
 
 	spin_unlock_irqrestore(&dwc->lock, flags);
diff --git a/drivers/usb/gadget/f_mbim.c b/drivers/usb/gadget/f_mbim.c
index 6b9295b..85240ef 100644
--- a/drivers/usb/gadget/f_mbim.c
+++ b/drivers/usb/gadget/f_mbim.c
@@ -941,23 +941,23 @@
 
 	pr_debug("dev:%p port#%d\n", dev, dev->port_num);
 
-	spin_lock(&dev->lock);
-	if (!dev->is_open) {
-		pr_err("mbim file handler %p is not open", dev);
-		spin_unlock(&dev->lock);
-		return;
-	}
-
 	cpkt = mbim_alloc_ctrl_pkt(len, GFP_ATOMIC);
 	if (!cpkt) {
 		pr_err("Unable to allocate ctrl pkt\n");
-		spin_unlock(&dev->lock);
 		return;
 	}
 
 	pr_debug("Add to cpkt_req_q packet with len = %d\n", len);
 	memcpy(cpkt->buf, req->buf, len);
 
+	spin_lock(&dev->lock);
+	if (!dev->is_open) {
+		pr_err("mbim file handler %p is not open", dev);
+		spin_unlock(&dev->lock);
+		mbim_free_ctrl_pkt(cpkt);
+		return;
+	}
+
 	list_add_tail(&cpkt->list, &dev->cpkt_req_q);
 	spin_unlock(&dev->lock);
 
diff --git a/drivers/usb/gadget/f_rmnet.c b/drivers/usb/gadget/f_rmnet.c
index 4357e0d..79aac27 100644
--- a/drivers/usb/gadget/f_rmnet.c
+++ b/drivers/usb/gadget/f_rmnet.c
@@ -18,6 +18,7 @@
 #include <linux/spinlock.h>
 
 #include <mach/usb_gadget_xport.h>
+#include <mach/usb_bam.h>
 
 #include "u_rmnet.h"
 #include "gadget_chips.h"
@@ -49,6 +50,9 @@
 	struct list_head		cpkt_resp_q;
 	atomic_t			notify_count;
 	unsigned long			cpkts_len;
+
+	/* IPA / RmNet Bridge support*/
+	struct usb_bam_connect_ipa_params ipa_params;
 };
 
 #define NR_RMNET_PORTS	3
@@ -149,7 +153,7 @@
 
 /* Super speed support */
 static struct usb_endpoint_descriptor rmnet_ss_notify_desc  = {
-	.bLength =		sizeof rmnet_ss_notify_desc,
+	.bLength =		USB_DT_ENDPOINT_SIZE,
 	.bDescriptorType =	USB_DT_ENDPOINT,
 	.bEndpointAddress =	USB_DIR_IN,
 	.bmAttributes =		USB_ENDPOINT_XFER_INT,
@@ -168,7 +172,7 @@
 };
 
 static struct usb_endpoint_descriptor rmnet_ss_in_desc = {
-	.bLength =		sizeof rmnet_ss_in_desc,
+	.bLength =		USB_DT_ENDPOINT_SIZE,
 	.bDescriptorType =	USB_DT_ENDPOINT,
 	.bEndpointAddress =	USB_DIR_IN,
 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
@@ -185,7 +189,7 @@
 };
 
 static struct usb_endpoint_descriptor rmnet_ss_out_desc = {
-	.bLength =		sizeof rmnet_ss_out_desc,
+	.bLength =		USB_DT_ENDPOINT_SIZE,
 	.bDescriptorType =	USB_DT_ENDPOINT,
 	.bEndpointAddress =	USB_DIR_OUT,
 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
@@ -430,7 +434,17 @@
 	case USB_GADGET_XPORT_BAM:
 	case USB_GADGET_XPORT_BAM2BAM:
 		ret = gbam_connect(&dev->port, port_num,
-						   dxport, port_num);
+						   dxport, port_num, NULL);
+		if (ret) {
+			pr_err("%s: gbam_connect failed: err:%d\n",
+					__func__, ret);
+			gsmd_ctrl_disconnect(&dev->port, port_num);
+			return ret;
+		}
+		break;
+	case USB_GADGET_XPORT_BAM2BAM_IPA:
+		ret = gbam_connect(&dev->port, port_num,
+					dxport, port_num, &(dev->ipa_params));
 		if (ret) {
 			pr_err("%s: gbam_connect failed: err:%d\n",
 					__func__, ret);
@@ -500,7 +514,11 @@
 	switch (dxport) {
 	case USB_GADGET_XPORT_BAM:
 	case USB_GADGET_XPORT_BAM2BAM:
-		gbam_disconnect(&dev->port, port_num, dxport);
+		gbam_disconnect(&dev->port, port_num, dxport, NULL);
+		break;
+	case USB_GADGET_XPORT_BAM2BAM_IPA:
+		gbam_disconnect(&dev->port, port_num, dxport,
+						&(dev->ipa_params));
 		break;
 	case USB_GADGET_XPORT_HSIC:
 		ghsic_data_disconnect(&dev->port, port_num);
@@ -551,6 +569,7 @@
 	case USB_GADGET_XPORT_BAM:
 		break;
 	case USB_GADGET_XPORT_BAM2BAM:
+	case USB_GADGET_XPORT_BAM2BAM_IPA:
 		gbam_suspend(&dev->port, port_num, dxport);
 		break;
 	case USB_GADGET_XPORT_HSIC:
@@ -580,6 +599,7 @@
 	case USB_GADGET_XPORT_BAM:
 		break;
 	case USB_GADGET_XPORT_BAM2BAM:
+	case USB_GADGET_XPORT_BAM2BAM_IPA:
 		gbam_resume(&dev->port, port_num, dxport);
 		break;
 	case USB_GADGET_XPORT_HSIC:
@@ -1235,6 +1255,7 @@
 		no_data_bam_ports++;
 		break;
 	case USB_GADGET_XPORT_BAM2BAM:
+	case USB_GADGET_XPORT_BAM2BAM_IPA:
 		rmnet_port->data_xport_num = no_data_bam2bam_ports;
 		no_data_bam2bam_ports++;
 		break;
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 43347b3..74dba07 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -246,7 +246,7 @@
 
 #ifdef CONFIG_MODEM_SUPPORT
 static struct usb_endpoint_descriptor gser_ss_notify_desc  = {
-	.bLength =		sizeof gser_ss_notify_desc,
+	.bLength =		USB_DT_ENDPOINT_SIZE,
 	.bDescriptorType =	USB_DT_ENDPOINT,
 	.bEndpointAddress =	USB_DIR_IN,
 	.bmAttributes =		USB_ENDPOINT_XFER_INT,
diff --git a/drivers/usb/gadget/u_bam.c b/drivers/usb/gadget/u_bam.c
index f092329..7f3713f 100644
--- a/drivers/usb/gadget/u_bam.c
+++ b/drivers/usb/gadget/u_bam.c
@@ -101,6 +101,8 @@
 	u32					src_pipe_idx;
 	u32					dst_pipe_idx;
 	u8					connection_idx;
+	enum transport_type trans;
+	struct usb_bam_connect_ipa_params *ipa_params;
 
 	/* stats */
 	unsigned int		pending_with_bam;
@@ -640,6 +642,21 @@
 	clear_bit(BAM_CH_OPENED, &d->flags);
 }
 
+static void gbam2bam_disconnect_work(struct work_struct *w)
+{
+	struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
+	struct bam_ch_info *d = &port->data_ch;
+	int ret;
+
+	if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+		ret = usb_bam_disconnect_ipa(d->connection_idx, d->ipa_params);
+		if (ret)
+			pr_err("%s: usb_bam_disconnect_ipa failed: err:%d\n",
+				__func__, ret);
+		rmnet_bridge_disconnect();
+	}
+}
+
 static void gbam_connect_work(struct work_struct *w)
 {
 	struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
@@ -680,12 +697,38 @@
 	u32 sps_params;
 	int ret;
 
-	ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
-						  &d->dst_pipe_idx);
-	if (ret) {
-		pr_err("%s: usb_bam_connect failed: err:%d\n",
-			__func__, ret);
-		return;
+	if (d->trans == USB_GADGET_XPORT_BAM2BAM) {
+		ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
+							  &d->dst_pipe_idx);
+		if (ret) {
+			pr_err("%s: usb_bam_connect failed: err:%d\n",
+				__func__, ret);
+			return;
+		}
+	} else if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+		d->ipa_params->client = IPA_CLIENT_USB_CONS;
+		d->ipa_params->dir = PEER_PERIPHERAL_TO_USB;
+		ret = usb_bam_connect_ipa(d->ipa_params);
+		if (ret) {
+			pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+				__func__, ret);
+			return;
+		}
+
+		d->ipa_params->client = IPA_CLIENT_USB_PROD;
+		d->ipa_params->dir = USB_TO_PEER_PERIPHERAL;
+		/* Currently only DMA mode is supported */
+		d->ipa_params->ipa_ep_cfg.mode.mode = IPA_DMA;
+		d->ipa_params->ipa_ep_cfg.mode.dst =
+				IPA_CLIENT_A2_TETHERED_CONS;
+		ret = usb_bam_connect_ipa(d->ipa_params);
+		if (ret) {
+			pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+				__func__, ret);
+			return;
+		}
+		rmnet_bridge_connect(d->ipa_params->prod_clnt_hdl,
+				d->ipa_params->cons_clnt_hdl, 0);
 	}
 
 	d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_KERNEL);
@@ -873,6 +916,7 @@
 	spin_lock_init(&port->port_lock_dl);
 
 	INIT_WORK(&port->connect_w, gbam2bam_connect_work);
+	INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
 
 	/* data ch */
 	d = &port->data_ch;
@@ -993,7 +1037,8 @@
 static void gam_debugfs_init(void) { }
 #endif
 
-void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans)
+void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans,
+			struct usb_bam_connect_ipa_params *ipa_params)
 {
 	struct gbam_port	*port;
 	unsigned long		flags;
@@ -1008,7 +1053,8 @@
 		return;
 	}
 
-	if (trans == USB_GADGET_XPORT_BAM2BAM &&
+	if ((trans == USB_GADGET_XPORT_BAM2BAM ||
+		 trans == USB_GADGET_XPORT_BAM2BAM_IPA) &&
 		port_num >= n_bam2bam_ports) {
 		pr_err("%s: invalid bam2bam portno#%d\n",
 			   __func__, port_num);
@@ -1044,12 +1090,14 @@
 	gr->in->driver_data = NULL;
 	gr->out->driver_data = NULL;
 
-	if (trans == USB_GADGET_XPORT_BAM)
+	if (trans == USB_GADGET_XPORT_BAM ||
+		trans == USB_GADGET_XPORT_BAM2BAM_IPA)
 		queue_work(gbam_wq, &port->disconnect_w);
 }
 
 int gbam_connect(struct grmnet *gr, u8 port_num,
-				 enum transport_type trans, u8 connection_idx)
+				 enum transport_type trans, u8 connection_idx,
+				 struct usb_bam_connect_ipa_params *ipa_params)
 {
 	struct gbam_port	*port;
 	struct bam_ch_info	*d;
@@ -1063,7 +1111,9 @@
 		return -ENODEV;
 	}
 
-	if (trans == USB_GADGET_XPORT_BAM2BAM && port_num >= n_bam2bam_ports) {
+	if ((trans == USB_GADGET_XPORT_BAM2BAM ||
+		trans == USB_GADGET_XPORT_BAM2BAM_IPA)
+		&& port_num >= n_bam2bam_ports) {
 		pr_err("%s: invalid portno#%d\n", __func__, port_num);
 		return -ENODEV;
 	}
@@ -1115,8 +1165,15 @@
 	if (trans == USB_GADGET_XPORT_BAM2BAM) {
 		port->gr = gr;
 		d->connection_idx = connection_idx;
+	} else if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+		d->ipa_params = ipa_params;
+		port->gr = gr;
+		d->ipa_params->src_pipe = &(d->src_pipe_idx);
+		d->ipa_params->dst_pipe = &(d->dst_pipe_idx);
+		d->ipa_params->idx = connection_idx;
 	}
 
+	d->trans = trans;
 	queue_work(gbam_wq, &port->connect_w);
 
 	return 0;
@@ -1195,7 +1252,8 @@
 	struct gbam_port	*port;
 	struct bam_ch_info *d;
 
-	if (trans != USB_GADGET_XPORT_BAM2BAM)
+	if (trans != USB_GADGET_XPORT_BAM2BAM &&
+		trans != USB_GADGET_XPORT_BAM2BAM_IPA)
 		return;
 
 	port = bam2bam_ports[port_num];
@@ -1211,7 +1269,8 @@
 	struct gbam_port	*port;
 	struct bam_ch_info *d;
 
-	if (trans != USB_GADGET_XPORT_BAM2BAM)
+	if (trans != USB_GADGET_XPORT_BAM2BAM &&
+		trans != USB_GADGET_XPORT_BAM2BAM_IPA)
 		return;
 
 	port = bam2bam_ports[port_num];
diff --git a/drivers/usb/gadget/u_qc_ether.c b/drivers/usb/gadget/u_qc_ether.c
index 4931c1e..bba2ca6 100644
--- a/drivers/usb/gadget/u_qc_ether.c
+++ b/drivers/usb/gadget/u_qc_ether.c
@@ -332,6 +332,7 @@
 	net_dev = dev_get_by_name(&init_net, netname);
 
 	if (net_dev) {
+		dev_put(net_dev);
 		unregister_netdev(net_dev);
 		free_netdev(net_dev);
 	}
@@ -355,6 +356,10 @@
 
 	/* Extract the eth_qc_dev from the net device */
 	net_dev = dev_get_by_name(&init_net, netname);
+	if (!net_dev)
+		return ERR_PTR(-EINVAL);
+
+	dev_put(net_dev);
 	dev = netdev_priv(net_dev);
 
 	if (!dev)
@@ -400,6 +405,10 @@
 
 	/* Extract the eth_qc_dev from the net device */
 	net_dev = dev_get_by_name(&init_net, netname);
+	if (!net_dev)
+		return;
+
+	dev_put(net_dev);
 	dev = netdev_priv(net_dev);
 
 	if (!dev)
diff --git a/drivers/usb/gadget/u_rmnet.h b/drivers/usb/gadget/u_rmnet.h
index 0f7c4fb..a3d42fa 100644
--- a/drivers/usb/gadget/u_rmnet.h
+++ b/drivers/usb/gadget/u_rmnet.h
@@ -48,8 +48,10 @@
 
 int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port);
 int gbam_connect(struct grmnet *gr, u8 port_num,
-				 enum transport_type trans, u8 connection_idx);
-void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans);
+				 enum transport_type trans, u8 connection_idx,
+				 struct usb_bam_connect_ipa_params *ipa_params);
+void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans,
+				struct usb_bam_connect_ipa_params *ipa_params);
 void gbam_suspend(struct grmnet *gr, u8 port_num, enum transport_type trans);
 void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans);
 int gsmd_ctrl_connect(struct grmnet *gr, int port_num);
diff --git a/drivers/usb/gadget/u_smd.c b/drivers/usb/gadget/u_smd.c
index ce285a3..effe418 100644
--- a/drivers/usb/gadget/u_smd.c
+++ b/drivers/usb/gadget/u_smd.c
@@ -72,6 +72,7 @@
 
 	struct smd_port_info	*pi;
 	struct delayed_work	connect_work;
+	struct work_struct	disconnect_work;
 
 	/* At present, smd does not notify
 	 * control bit change info from modem
@@ -589,6 +590,20 @@
 	}
 }
 
+static void gsmd_disconnect_work(struct work_struct *w)
+{
+	struct gsmd_port *port;
+	struct smd_port_info *pi;
+
+	port = container_of(w, struct gsmd_port, disconnect_work);
+	pi = port->pi;
+
+	pr_debug("%s: port:%p port#%d\n", __func__, port, port->port_num);
+
+	smd_close(port->pi->ch);
+	port->pi->ch = NULL;
+}
+
 static void gsmd_notify_modem(void *gptr, u8 portno, int ctrl_bits)
 {
 	struct gsmd_port *port;
@@ -731,10 +746,8 @@
 				~port->cbits_to_modem);
 	}
 
-	if (port->pi->ch) {
-		smd_close(port->pi->ch);
-		port->pi->ch = NULL;
-	}
+	if (port->pi->ch)
+		queue_work(gsmd_wq, &port->disconnect_work);
 }
 
 #define SMD_CH_MAX_LEN	20
@@ -819,6 +832,7 @@
 	INIT_WORK(&port->pull, gsmd_tx_pull);
 
 	INIT_DELAYED_WORK(&port->connect_work, gsmd_connect_work);
+	INIT_WORK(&port->disconnect_work, gsmd_disconnect_work);
 
 	smd_ports[portno].port = port;
 	pdrv = &smd_ports[portno].pdrv;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index fff9465..1a75bd7 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -337,7 +337,7 @@
 
 
 /* caller has locked the root hub, and should reset/reinit on error */
-static int ehci_bus_resume (struct usb_hcd *hcd)
+static int __maybe_unused ehci_bus_resume(struct usb_hcd *hcd)
 {
 	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
 	u32			temp;
diff --git a/drivers/usb/host/ehci-msm-hsic.c b/drivers/usb/host/ehci-msm-hsic.c
index 7b616e4..8c22f8e 100644
--- a/drivers/usb/host/ehci-msm-hsic.c
+++ b/drivers/usb/host/ehci-msm-hsic.c
@@ -76,6 +76,7 @@
 	struct clk		*phy_clk;
 	struct clk		*cal_clk;
 	struct regulator	*hsic_vddcx;
+	struct regulator	*hsic_gdsc;
 	bool			async_int;
 	atomic_t                in_lpm;
 	struct wake_lock	wlock;
@@ -103,6 +104,8 @@
 struct msm_hsic_hcd *__mehci;
 
 static bool debug_bus_voting_enabled = true;
+static u64 ehci_msm_hsic_dma_mask = DMA_BIT_MASK(32);
+
 
 static unsigned int enable_payload_log = 1;
 module_param(enable_payload_log, uint, S_IRUGO | S_IWUSR);
@@ -324,7 +327,7 @@
 #define ULPI_IO_TIMEOUT_USEC	(10 * 1000)
 
 #define USB_PHY_VDD_DIG_VOL_NONE	0 /*uV */
-#define USB_PHY_VDD_DIG_VOL_MIN		1000000 /* uV */
+#define USB_PHY_VDD_DIG_VOL_MIN		945000 /* uV */
 #define USB_PHY_VDD_DIG_VOL_MAX		1320000 /* uV */
 
 #define HSIC_DBG1_REG		0x38
@@ -393,6 +396,35 @@
 
 }
 
+/* Global Distributed Switch Controller (GDSC) init */
+static int msm_hsic_init_gdsc(struct msm_hsic_hcd *mehci, int init)
+{
+	int ret = 0;
+
+	if (IS_ERR(mehci->hsic_gdsc))
+		return 0;
+
+	if (!mehci->hsic_gdsc) {
+		mehci->hsic_gdsc = devm_regulator_get(mehci->dev,
+			"HSIC_GDSC");
+		if (IS_ERR(mehci->hsic_gdsc))
+			return 0;
+	}
+
+	if (init) {
+		ret = regulator_enable(mehci->hsic_gdsc);
+		if (ret) {
+			dev_err(mehci->dev, "unable to enable hsic gdsc\n");
+			return ret;
+		}
+	} else {
+		regulator_disable(mehci->hsic_gdsc);
+	}
+
+	return 0;
+
+}
+
 static int ulpi_read(struct msm_hsic_hcd *mehci, u32 reg)
 {
 	struct usb_hcd *hcd = hsic_to_hcd(mehci);
@@ -563,18 +595,22 @@
 #define HSIC_PAD_CALIBRATION	0xA8
 #define HSIC_GPIO_PAD_VAL	0x0A0AAA10
 #define LINK_RESET_TIMEOUT_USEC		(250 * 1000)
-static int msm_hsic_reset(struct msm_hsic_hcd *mehci)
+
+static void msm_hsic_phy_reset(struct msm_hsic_hcd *mehci)
 {
 	struct usb_hcd *hcd = hsic_to_hcd(mehci);
-	int ret;
-	struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
 
 	msm_hsic_clk_reset(mehci);
 
 	/* select ulpi phy */
 	writel_relaxed(0x80000000, USB_PORTSC);
-
 	mb();
+}
+
+static int msm_hsic_start(struct msm_hsic_hcd *mehci)
+{
+	struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
+	int ret;
 
 	/* HSIC init sequence when HSIC signals (Strobe/Data) are
 	routed via GPIOs */
@@ -635,12 +671,22 @@
 #define PHY_RESUME_TIMEOUT_USEC		(100 * 1000)
 
 #ifdef CONFIG_PM_SLEEP
+static int msm_hsic_reset(struct msm_hsic_hcd *mehci)
+{
+	/* reset HSIC phy */
+	msm_hsic_phy_reset(mehci);
+
+	/* HSIC init procedure (caliberation) */
+	return msm_hsic_start(mehci);
+}
+
 static int msm_hsic_suspend(struct msm_hsic_hcd *mehci)
 {
 	struct usb_hcd *hcd = hsic_to_hcd(mehci);
 	int cnt = 0, ret;
 	u32 val;
 	int none_vol, max_vol;
+	struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
 
 	if (atomic_read(&mehci->in_lpm)) {
 		dev_dbg(mehci->dev, "%s called in lpm\n", __func__);
@@ -719,6 +765,10 @@
 	enable_irq_wake(mehci->wakeup_irq);
 	enable_irq(mehci->wakeup_irq);
 
+	if (pdata && pdata->standalone_latency)
+		pm_qos_update_request(&mehci->pm_qos_req_dma,
+			PM_QOS_DEFAULT_VALUE);
+
 	wake_unlock(&mehci->wlock);
 
 	dev_info(mehci->dev, "HSIC-USB in low power mode\n");
@@ -733,12 +783,17 @@
 	unsigned temp;
 	int min_vol, max_vol;
 	unsigned long flags;
+	struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
 
 	if (!atomic_read(&mehci->in_lpm)) {
 		dev_dbg(mehci->dev, "%s called in !in_lpm\n", __func__);
 		return 0;
 	}
 
+	if (pdata && pdata->standalone_latency)
+		pm_qos_update_request(&mehci->pm_qos_req_dma,
+			pdata->standalone_latency + 1);
+
 	spin_lock_irqsave(&mehci->wakeup_lock, flags);
 	if (mehci->wakeup_irq_enabled) {
 		disable_irq_wake(mehci->wakeup_irq);
@@ -1031,9 +1086,9 @@
 				pm_qos_update_request(&mehci->pm_qos_req_dma,
 					pdata->swfi_latency + 1);
 			wait_for_completion(&mehci->gpt0_completion);
-			if (pdata && pdata->swfi_latency)
+			if (pdata && pdata->standalone_latency)
 				pm_qos_update_request(&mehci->pm_qos_req_dma,
-					PM_QOS_DEFAULT_VALUE);
+					pdata->standalone_latency + 1);
 			spin_lock_irq(&ehci->lock);
 		} else {
 			dbg_log_event(NULL, "FPR: Tightloop", 0);
@@ -1524,6 +1579,11 @@
 
 	dev_dbg(&pdev->dev, "ehci_msm-hsic probe\n");
 
+	if (!pdev->dev.dma_mask)
+		pdev->dev.dma_mask = &ehci_msm_hsic_dma_mask;
+	if (!pdev->dev.coherent_dma_mask)
+		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
 	/* After parent device's probe is executed, it will be put in suspend
 	 * mode. When child device's probe is called, driver core is not
 	 * resuming parent device due to which parent will be in suspend even
@@ -1578,6 +1638,13 @@
 	if (pdata)
 		mehci->ehci.log2_irq_thresh = pdata->log2_irq_thresh;
 
+	ret = msm_hsic_init_gdsc(mehci, 1);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to initialize GDSC\n");
+		ret = -ENODEV;
+		goto put_hcd;
+	}
+
 	res = platform_get_resource_byname(pdev,
 			IORESOURCE_IRQ,
 			"peripheral_status_irq");
@@ -1606,11 +1673,8 @@
 
 	init_completion(&mehci->rt_completion);
 	init_completion(&mehci->gpt0_completion);
-	ret = msm_hsic_reset(mehci);
-	if (ret) {
-		dev_err(&pdev->dev, "unable to initialize PHY\n");
-		goto deinit_vddcx;
-	}
+
+	msm_hsic_phy_reset(mehci);
 
 	ehci_wq = create_singlethread_workqueue("ehci_wq");
 	if (!ehci_wq) {
@@ -1624,7 +1688,13 @@
 	ret = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
 	if (ret) {
 		dev_err(&pdev->dev, "unable to register HCD\n");
-		goto unconfig_gpio;
+		goto destroy_wq;
+	}
+
+	ret = msm_hsic_start(mehci);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to initialize PHY\n");
+		goto destroy_wq;
 	}
 
 	device_init_wakeup(&pdev->dev, 1);
@@ -1680,9 +1750,9 @@
 
 	__mehci = mehci;
 
-	if (pdata && pdata->swfi_latency)
+	if (pdata && pdata->standalone_latency)
 		pm_qos_add_request(&mehci->pm_qos_req_dma,
-			PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+			PM_QOS_CPU_DMA_LATENCY, pdata->standalone_latency + 1);
 
 	/*
 	 * This pdev->dev is assigned parent of root-hub by USB core,
@@ -1700,11 +1770,11 @@
 
 	return 0;
 
-unconfig_gpio:
+destroy_wq:
 	destroy_workqueue(ehci_wq);
-	msm_hsic_config_gpios(mehci, 0);
 deinit_vddcx:
 	msm_hsic_init_vddcx(mehci, 0);
+	msm_hsic_init_gdsc(mehci, 0);
 deinit_clocks:
 	msm_hsic_init_clocks(mehci, 0);
 unmap:
@@ -1721,7 +1791,7 @@
 	struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
 	struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
 
-	if (pdata && pdata->swfi_latency)
+	if (pdata && pdata->standalone_latency)
 		pm_qos_remove_request(&mehci->pm_qos_req_dma);
 
 	if (mehci->peripheral_status_irq)
@@ -1753,6 +1823,7 @@
 	usb_remove_hcd(hcd);
 	msm_hsic_config_gpios(mehci, 0);
 	msm_hsic_init_vddcx(mehci, 0);
+	msm_hsic_init_gdsc(mehci, 0);
 
 	msm_hsic_init_clocks(mehci, 0);
 	wake_lock_destroy(&mehci->wlock);
@@ -1871,7 +1942,11 @@
 				msm_hsic_runtime_idle)
 };
 #endif
-
+static const struct of_device_id hsic_host_dt_match[] = {
+	{ .compatible = "qcom,hsic-host",
+	},
+	{}
+};
 static struct platform_driver ehci_msm_hsic_driver = {
 	.probe	= ehci_hsic_msm_probe,
 	.remove	= __devexit_p(ehci_hsic_msm_remove),
@@ -1880,5 +1955,6 @@
 #ifdef CONFIG_PM
 		.pm = &msm_hsic_dev_pm_ops,
 #endif
+		.of_match_table = hsic_host_dt_match,
 	},
 };
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 3d9422f..38a3c15 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1277,7 +1277,7 @@
 static void handle_port_status(struct xhci_hcd *xhci,
 		union xhci_trb *event)
 {
-	struct usb_hcd *hcd;
+	struct usb_hcd *hcd = NULL;
 	u32 port_id;
 	u32 temp, temp1;
 	int max_ports;
@@ -1331,6 +1331,8 @@
 	 */
 	/* Find the right roothub. */
 	hcd = xhci_to_hcd(xhci);
+	if (!hcd)
+		goto cleanup;
 	if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
 		hcd = xhci->shared_hcd;
 	bus_state = &xhci->bus_state[hcd_index(hcd)];
diff --git a/drivers/usb/misc/mdm_ctrl_bridge.c b/drivers/usb/misc/mdm_ctrl_bridge.c
index 2f7e2c3..abc7b86 100644
--- a/drivers/usb/misc/mdm_ctrl_bridge.c
+++ b/drivers/usb/misc/mdm_ctrl_bridge.c
@@ -320,7 +320,6 @@
 	dev_dbg(&dev->intf->dev, "%s:\n", __func__);
 
 	ctrl_bridge_set_cbits(dev->brdg->ch_id, 0);
-	usb_unlink_anchored_urbs(&dev->tx_submitted);
 
 	dev->brdg = NULL;
 }
@@ -718,6 +717,8 @@
 
 	platform_device_unregister(dev->pdev);
 
+	usb_unlink_anchored_urbs(&dev->tx_submitted);
+
 	kfree(dev->in_ctlreq);
 	kfree(dev->readbuf);
 	kfree(dev->intbuf);
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index d04c234..c6fe765 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -1137,11 +1137,20 @@
 
 	if (legacy_power_supply) {
 		/* legacy support */
-		if (host_mode)
+		if (host_mode) {
 			power_supply_set_scope(psy, POWER_SUPPLY_SCOPE_SYSTEM);
-		else
+		} else {
 			power_supply_set_scope(psy, POWER_SUPPLY_SCOPE_DEVICE);
-		return;
+			/*
+			 * VBUS comparator is disabled by PMIC charging driver
+			 * when SYSTEM scope is selected.  For ID_GND->ID_A
+			 * transition, give 50 msec delay so that PMIC charger
+			 * driver detect the VBUS and ready for accepting
+			 * charging current value from USB.
+			 */
+			if (test_bit(ID_A, &motg->inputs))
+				msleep(50);
+		}
 	} else {
 		motg->host_mode = host_mode;
 		power_supply_changed(psy);
@@ -1573,6 +1582,26 @@
 	return 0;
 }
 
+static bool msm_otg_read_pmic_id_state(struct msm_otg *motg)
+{
+	unsigned long flags;
+	int id;
+
+	if (!motg->pdata->pmic_id_irq)
+		return -ENODEV;
+
+	local_irq_save(flags);
+	id = irq_read_line(motg->pdata->pmic_id_irq);
+	local_irq_restore(flags);
+
+	/*
+	 * If we can not read ID line state for some reason, treat
+	 * it as float. This would prevent MHL discovery and kicking
+	 * host mode unnecessarily.
+	 */
+	return !!id;
+}
+
 static int msm_otg_mhl_register_callback(struct msm_otg *motg,
 						void (*callback)(int on))
 {
@@ -1655,14 +1684,11 @@
 static bool msm_chg_mhl_detect(struct msm_otg *motg)
 {
 	bool ret, id;
-	unsigned long flags;
 
 	if (!motg->mhl_enabled)
 		return false;
 
-	local_irq_save(flags);
-	id = irq_read_line(motg->pdata->pmic_id_irq);
-	local_irq_restore(flags);
+	id = msm_otg_read_pmic_id_state(motg);
 
 	if (id)
 		return false;
@@ -2290,13 +2316,10 @@
 				clear_bit(B_SESS_VLD, &motg->inputs);
 		} else if (pdata->otg_control == OTG_PMIC_CONTROL) {
 			if (pdata->pmic_id_irq) {
-				unsigned long flags;
-				local_irq_save(flags);
-				if (irq_read_line(pdata->pmic_id_irq))
+				if (msm_otg_read_pmic_id_state(motg))
 					set_bit(ID, &motg->inputs);
 				else
 					clear_bit(ID, &motg->inputs);
-				local_irq_restore(flags);
 			}
 			/*
 			 * VBUS initial state is reported after PMIC
@@ -2444,6 +2467,18 @@
 			motg->chg_type = USB_INVALID_CHARGER;
 			msm_otg_notify_charger(motg, 0);
 			msm_otg_reset(otg->phy);
+			/*
+			 * There is a small window where ID interrupt
+			 * is not monitored during ID detection circuit
+			 * switch from ACA to PMIC.  Check ID state
+			 * before entering into low power mode.
+			 */
+			if (!msm_otg_read_pmic_id_state(motg)) {
+				pr_debug("process missed ID intr\n");
+				clear_bit(ID, &motg->inputs);
+				work = 1;
+				break;
+			}
 			pm_runtime_put_noidle(otg->phy->dev);
 			/*
 			 * Only if autosuspend was enabled in probe, it will be
@@ -3115,10 +3150,8 @@
 	struct msm_otg *motg = container_of(w, struct msm_otg,
 						pmic_id_status_work.work);
 	int work = 0;
-	unsigned long flags;
 
-	local_irq_save(flags);
-	if (irq_read_line(motg->pdata->pmic_id_irq)) {
+	if (msm_otg_read_pmic_id_state(motg)) {
 		if (!test_and_set_bit(ID, &motg->inputs)) {
 			pr_debug("PMIC: ID set\n");
 			work = 1;
@@ -3137,7 +3170,6 @@
 		else
 			queue_work(system_nrt_wq, &motg->sm_work);
 	}
-	local_irq_restore(flags);
 
 }
 
diff --git a/drivers/video/msm/external_common.c b/drivers/video/msm/external_common.c
index c6ffaf2..0411baa 100644
--- a/drivers/video/msm/external_common.c
+++ b/drivers/video/msm/external_common.c
@@ -1407,30 +1407,37 @@
 	struct hdmi_disp_mode_list_type *disp_mode_list,
 	uint32 video_format)
 {
-	const struct hdmi_disp_mode_timing_type *timing =
-		hdmi_common_get_supported_mode(video_format);
-	boolean supported = timing != NULL;
+	const struct hdmi_disp_mode_timing_type *timing;
+	boolean supported = false;
+	boolean mhl_supported = true;
 
 	if (video_format >= HDMI_VFRMT_MAX)
 		return;
 
+	timing = hdmi_common_get_supported_mode(video_format);
+	supported = timing != NULL;
 	DEV_DBG("EDID: format: %d [%s], %s\n",
 		video_format, video_format_2string(video_format),
 		supported ? "Supported" : "Not-Supported");
-	if (supported) {
-		if (mhl_is_enabled()) {
-			const struct hdmi_disp_mode_timing_type *mhl_timing =
-				hdmi_mhl_get_supported_mode(video_format);
-			boolean mhl_supported = mhl_timing != NULL;
-			DEV_DBG("EDID: format: %d [%s], %s by MHL\n",
+
+	if (mhl_is_enabled()) {
+		const struct hdmi_disp_mode_timing_type *mhl_timing =
+			hdmi_mhl_get_supported_mode(video_format);
+		mhl_supported = mhl_timing != NULL;
+		DEV_DBG("EDID: format: %d [%s], %s by MHL\n",
 			video_format, video_format_2string(video_format),
-				mhl_supported ? "Supported" : "Not-Supported");
-			if (mhl_supported)
-				disp_mode_list->disp_mode_list[
+			mhl_supported ? "Supported" : "Not-Supported");
+	}
+
+	if (supported && mhl_supported) {
+		disp_mode_list->disp_mode_list[
 			disp_mode_list->num_of_elements++] = video_format;
-		} else
-			disp_mode_list->disp_mode_list[
-			disp_mode_list->num_of_elements++] = video_format;
+		if (video_format == external_common_state->video_resolution) {
+			DEV_DBG("%s: Default resolution %d [%s] supported\n",
+					__func__, video_format,
+					video_format_2string(video_format));
+			external_common_state->default_res_supported = true;
+		}
 	}
 }
 
@@ -1866,6 +1873,7 @@
 	memset(&external_common_state->disp_mode_list, 0,
 		sizeof(external_common_state->disp_mode_list));
 	memset(edid_buf, 0, sizeof(edid_buf));
+	external_common_state->default_res_supported = false;
 
 	status = hdmi_common_read_edid_block(0, edid_buf);
 	if (status || !check_edid_header(edid_buf)) {
diff --git a/drivers/video/msm/external_common.h b/drivers/video/msm/external_common.h
index 43a8794..70a99ee 100644
--- a/drivers/video/msm/external_common.h
+++ b/drivers/video/msm/external_common.h
@@ -210,8 +210,10 @@
 	boolean hpd_state;
 	struct kobject *uevent_kobj;
 	uint32 video_resolution;
+	boolean default_res_supported;
 	struct device *dev;
 	struct switch_dev sdev;
+	struct switch_dev audio_sdev;
 #ifdef CONFIG_FB_MSM_HDMI_3D
 	boolean format_3d;
 	void (*switch_3d)(boolean on);
diff --git a/drivers/video/msm/hdmi_msm.c b/drivers/video/msm/hdmi_msm.c
index 7a92645..516c92c 100644
--- a/drivers/video/msm/hdmi_msm.c
+++ b/drivers/video/msm/hdmi_msm.c
@@ -57,6 +57,22 @@
 #define HDCP_DDC_CTRL_1		0x0124
 #define HDMI_DDC_CTRL		0x020C
 
+#define HPD_DISCONNECT_POLARITY	0
+#define HPD_CONNECT_POLARITY	1
+
+#define SWITCH_SET_HDMI_AUDIO(d, force) \
+	do {\
+		if (!hdmi_msm_is_dvi_mode() &&\
+			((force) ||\
+			 (external_common_state->audio_sdev.state != (d)))) {\
+			switch_set_state(&external_common_state->audio_sdev,\
+					(d));\
+			DEV_INFO("%s: hdmi_audio state switched to %d\n",\
+				__func__,\
+				external_common_state->audio_sdev.state);\
+		} \
+	} while (0)
+
 struct workqueue_struct *hdmi_work_queue;
 struct hdmi_msm_state_type *hdmi_msm_state;
 
@@ -74,6 +90,7 @@
 static int hdmi_msm_audio_off(void);
 static int hdmi_msm_read_edid(void);
 static void hdmi_msm_hpd_off(void);
+static boolean hdmi_msm_is_dvi_mode(void);
 
 #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT
 
@@ -768,8 +785,8 @@
 		/* Build EDID table */
 		hdmi_msm_read_edid();
 		switch_set_state(&external_common_state->sdev, 1);
-		DEV_INFO("Hdmi state switched to %d: %s\n",
-			external_common_state->sdev.state,  __func__);
+		DEV_INFO("%s: hdmi state switched to %d\n", __func__,
+				external_common_state->sdev.state);
 
 		DEV_INFO("HDMI HPD: CONNECTED: send ONLINE\n");
 		kobject_uevent(external_common_state->uevent_kobj, KOBJ_ONLINE);
@@ -783,8 +800,8 @@
 		}
 	} else {
 		switch_set_state(&external_common_state->sdev, 0);
-		DEV_INFO("hdmi: Hdmi state switch to %d: %s\n",
-			external_common_state->sdev.state,  __func__);
+		DEV_INFO("%s: hdmi state switch to %d\n", __func__,
+				external_common_state->sdev.state);
 		DEV_INFO("hdmi: HDMI HPD: sense DISCONNECTED: send OFFLINE\n");
 		kobject_uevent(external_common_state->uevent_kobj,
 			KOBJ_OFFLINE);
@@ -793,66 +810,13 @@
 
 static void hdmi_msm_hpd_state_work(struct work_struct *work)
 {
-	boolean hpd_state;
-
 	if (!hdmi_msm_state || !hdmi_msm_state->hpd_initialized ||
 		!MSM_HDMI_BASE) {
 		DEV_ERR("hdmi: %s: ignored, probe failed\n", __func__);
 		return;
 	}
 
-	mutex_lock(&hdmi_msm_state_mutex);
-	DEV_DBG("%s: Handling HPD event in the workqueue\n", __func__);
-
-	if (!hdmi_msm_state->hpd_cable_chg_detected) {
-		/* The work item got called from outside the ISR */
-		mutex_unlock(&hdmi_msm_state_mutex);
-		if (external_common_state->hpd_state) {
-			if (!external_common_state->
-					disp_mode_list.num_of_elements)
-				hdmi_msm_read_edid();
-		}
-	} else {
-		hdmi_msm_state->hpd_cable_chg_detected = FALSE;
-		mutex_unlock(&hdmi_msm_state_mutex);
-		mutex_lock(&external_common_state_hpd_mutex);
-		/*
-		 * Handle the connect event only if the cable is
-		 * still connected. This check is needed for the case
-		 * where we get a connect event followed by a disconnect
-		 * event in quick succession. In this case, there is no need
-		 * to process the connect event.
-		 */
-		if ((external_common_state->hpd_state) &&
-				!((HDMI_INP(0x0250) & 0x2) >> 1)) {
-			external_common_state->hpd_state = 0;
-			hdmi_msm_state->hpd_state_in_isr = 0;
-			mutex_unlock(&external_common_state_hpd_mutex);
-			DEV_DBG("%s: Ignoring HPD connect event\n", __func__);
-			return;
-		}
-		mutex_unlock(&external_common_state_hpd_mutex);
-		hdmi_msm_send_event(external_common_state->hpd_state);
-	}
-
-	/*
-	 * Wait for a short time before checking for
-	 * any changes in the connection status
-	 */
-	udelay(100);
-
-	mutex_lock(&external_common_state_hpd_mutex);
-	/* HPD_INT_STATUS[0x0250] */
-	hpd_state = (HDMI_INP(0x0250) & 0x2) >> 1;
-
-	if (external_common_state->hpd_state != hpd_state) {
-		external_common_state->hpd_state = hpd_state;
-		hdmi_msm_state->hpd_state_in_isr = hpd_state;
-		mutex_unlock(&external_common_state_hpd_mutex);
-		hdmi_msm_send_event(hpd_state);
-	} else {
-		mutex_unlock(&external_common_state_hpd_mutex);
-	}
+	hdmi_msm_send_event(external_common_state->hpd_state);
 }
 
 #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT
@@ -965,9 +929,7 @@
 		DEV_INFO("HDCP: AUTH_FAIL_INT received, LINK0_STATUS=0x%08x\n",
 			link_status);
 		if (hdmi_msm_state->full_auth_done) {
-			switch_set_state(&external_common_state->sdev, 0);
-			DEV_INFO("Hdmi state switched to %d: %s\n",
-				external_common_state->sdev.state,  __func__);
+			SWITCH_SET_HDMI_AUDIO(0, 0);
 
 			envp[0] = "HDCP_STATE=FAIL";
 			envp[1] = NULL;
@@ -1051,55 +1013,18 @@
 	/* HDMI_HPD_INT_CTRL[0x0254] */
 	hpd_int_ctrl = HDMI_INP_ND(0x0254);
 	if ((hpd_int_ctrl & (1 << 2)) && (hpd_int_status & (1 << 0))) {
-		boolean cable_detected = (hpd_int_status & 2) >> 1;
-		DEV_DBG("%s: HPD IRQ, Ctrl=%04x, State=%04x\n", __func__,
-				hpd_int_ctrl, hpd_int_status);
+		/*
+		 * Got HPD interrupt. Ack the interrupt and disable any
+		 * further HPD interrupts until we process this interrupt.
+		 */
+		HDMI_OUTP(0x0254, ((hpd_int_ctrl | (BIT(0))) & ~BIT(2)));
 
-		/* Ack the interrupt */
-		HDMI_OUTP(0x0254, (hpd_int_ctrl | (1 << 0)));
-
-		mutex_lock(&external_common_state_hpd_mutex);
-		if (hdmi_msm_state->hpd_state_in_isr == cable_detected) {
-			DEV_INFO("%s: HPD has the same state. Ignoring\n",
-					__func__);
-			mutex_unlock(&external_common_state_hpd_mutex);
-		} else {
-			if (!mod_timer(&hdmi_msm_state->hpd_state_timer,
-						jiffies + HZ/2)) {
-				hdmi_msm_state->hpd_state_in_isr =
-					cable_detected;
-				hdmi_msm_state->hpd_cable_chg_detected = TRUE;
-				DEV_DBG("%s: Scheduled work to handle HPD %s\n",
-						__func__,
-						cable_detected ? "connect"
-						: "disconnect");
-			}
-
-			mutex_unlock(&external_common_state_hpd_mutex);
-			/*
-			 * HDCP Compliance 1A-01:
-			 * The Quantum Data Box 882 triggers two consecutive
-			 * HPD events very close to each other as a part of this
-			 * test which can trigger two parallel HDCP auth threads
-			 * if HDCP authentication is going on and we get ISR
-			 * then stop the authentication , rather than
-			 * reauthenticating it again
-			 */
-			if (hdmi_msm_state->hdcp_activating &&
-					!(hdmi_msm_state->full_auth_done)) {
-				DEV_DBG("%s getting hpd while authenticating\n",
-					    __func__);
-				mutex_lock(&hdcp_auth_state_mutex);
-				hdmi_msm_state->hpd_during_auth = TRUE;
-				mutex_unlock(&hdcp_auth_state_mutex);
-			}
-		}
-
-		/* Set up HPD_CTRL to sense HPD event */
-		HDMI_OUTP(0x0254, 4 | (cable_detected ? 0 : 2));
-		DEV_DBG("%s: Setting HPD_CTRL=%d\n", __func__,
-				HDMI_INP(0x0254));
-
+		external_common_state->hpd_state =
+			(HDMI_INP(0x0250) & BIT(1)) >> 1;
+		DEV_DBG("%s: Queuing work to handle HPD %s event\n", __func__,
+				external_common_state->hpd_state ? "connect" :
+				"disconnect");
+		queue_work(hdmi_work_queue, &hdmi_msm_state->hpd_state_work);
 		return IRQ_HANDLED;
 	}
 
@@ -2468,8 +2393,8 @@
 		/* 0x0110 HDCP_CTRL
 			[8] ENCRYPTION_ENABLE
 			[0] ENABLE */
-		/* encryption_enable | enable  */
-		HDMI_OUTP(0x0110, (1 << 8) | (1 << 0));
+		/* Enable HDCP. Encryption should be enabled after reading R0 */
+		HDMI_OUTP(0x0110, BIT(0));
 
 		/*
 		 * Check to see if a HDCP DDC Failure is indicated in
@@ -2664,6 +2589,9 @@
 			goto error;
 		}
 
+		/* Enable HDCP Encryption */
+		HDMI_OUTP(0x0110, BIT(0) | BIT(8));
+
 		DEV_INFO("HDCP: authentication part I, successful\n");
 		is_part1_done = FALSE;
 		return 0;
@@ -3023,17 +2951,15 @@
 	mutex_unlock(&hdmi_msm_state_mutex);
 
 	mutex_lock(&hdcp_auth_state_mutex);
-	/*
-	 * Initialize this to zero here to make
-	 * sure HPD has not happened yet
-	 */
-	hdmi_msm_state->hpd_during_auth = FALSE;
 	/* This flag prevents other threads from re-authenticating
 	* after we've just authenticated (i.e., finished part3)
 	* We probably need to protect this in a mutex lock */
 	hdmi_msm_state->full_auth_done = FALSE;
 	mutex_unlock(&hdcp_auth_state_mutex);
 
+	/* Disable HDCP before we start part1 */
+	HDMI_OUTP(0x0110, 0x0);
+
 	/* PART I Authentication*/
 	ret = hdcp_authentication_part1();
 	if (ret)
@@ -3082,17 +3008,13 @@
 		envp[1] = NULL;
 		kobject_uevent_env(external_common_state->uevent_kobj,
 		    KOBJ_CHANGE, envp);
+
+		SWITCH_SET_HDMI_AUDIO(1, 0);
 	}
 
-	switch_set_state(&external_common_state->sdev, 1);
-	DEV_INFO("Hdmi state switched to %d: %s\n",
-		external_common_state->sdev.state, __func__);
 	return;
 
 error:
-	mutex_lock(&hdmi_msm_state_mutex);
-	hdmi_msm_state->hdcp_activating = FALSE;
-	mutex_unlock(&hdmi_msm_state_mutex);
 	if (hdmi_msm_state->hpd_during_auth) {
 		DEV_WARN("Calling Deauthentication: HPD occured during "
 			 "authentication  from [%s]\n", __func__);
@@ -3106,9 +3028,9 @@
 			queue_work(hdmi_work_queue,
 			    &hdmi_msm_state->hdcp_reauth_work);
 	}
-	switch_set_state(&external_common_state->sdev, 0);
-	DEV_INFO("Hdmi state switched to %d: %s\n",
-		external_common_state->sdev.state, __func__);
+	mutex_lock(&hdmi_msm_state_mutex);
+	hdmi_msm_state->hdcp_activating = FALSE;
+	mutex_unlock(&hdmi_msm_state_mutex);
 }
 
 static void hdmi_msm_video_setup(int video_format)
@@ -3663,28 +3585,24 @@
 
 static int hdmi_msm_audio_off(void)
 {
-	uint32 audio_pkt_ctrl, audio_cfg;
-	 /* Number of wait iterations */
-	int i = 10;
-	audio_pkt_ctrl = HDMI_INP_ND(0x0020);
-	audio_cfg = HDMI_INP_ND(0x01D0);
+	uint32 audio_cfg;
+	int i, timeout_val = 50;
 
-	/* Checking BIT[0] of AUDIO PACKET CONTROL and */
-	/* AUDIO CONFIGURATION register */
-	while (((audio_pkt_ctrl & 0x00000001) || (audio_cfg & 0x00000001))
-		&& (i--)) {
-		audio_pkt_ctrl = HDMI_INP_ND(0x0020);
-		audio_cfg = HDMI_INP_ND(0x01D0);
-		DEV_DBG("%d times :: HDMI AUDIO PACKET is %08x and "
-		"AUDIO CFG is %08x", i, audio_pkt_ctrl, audio_cfg);
-		msleep(100);
-		if (!i) {
-			DEV_ERR("%s:failed to set BIT[0] AUDIO PACKET"
-			"CONTROL or AUDIO CONFIGURATION REGISTER\n",
-				__func__);
-			return -ETIMEDOUT;
+	for (i = 0; (i < timeout_val) &&
+		((audio_cfg = HDMI_INP_ND(0x01D0)) & BIT(0)); i++) {
+		DEV_DBG("%s: %d times: AUDIO CFG is %08xi\n", __func__,
+				i+1, audio_cfg);
+		if (!((i+1) % 10)) {
+			DEV_ERR("%s: audio still on after %d sec. try again\n",
+				__func__, (i+1)/10);
+			SWITCH_SET_HDMI_AUDIO(0, 1);
 		}
+		msleep(100);
 	}
+
+	if (i == timeout_val)
+		DEV_ERR("%s: Error: cannot turn off audio engine\n", __func__);
+
 	hdmi_msm_audio_info_setup(FALSE, 0, 0, 0, FALSE);
 	hdmi_msm_audio_acr_setup(FALSE, 0, 0, 0);
 	DEV_INFO("HDMI Audio: Disabled\n");
@@ -4197,8 +4115,17 @@
 	hdmi_msm_set_mode(TRUE);
 
 	hdmi_msm_video_setup(external_common_state->video_resolution);
-	if (!hdmi_msm_is_dvi_mode())
+	if (!hdmi_msm_is_dvi_mode()) {
 		hdmi_msm_audio_setup();
+
+		/*
+		 * Send the audio switch device notification if HDCP is
+		 * not enabled. Otherwise, the notification would be
+		 * sent after HDCP authentication is successful.
+		 */
+		if (!hdmi_msm_state->hdcp_enable)
+			SWITCH_SET_HDMI_AUDIO(1, 0);
+	}
 	hdmi_msm_avi_info_frame();
 #ifdef CONFIG_FB_MSM_HDMI_3D
 	hdmi_msm_vendor_infoframe_packetsetup();
@@ -4223,36 +4150,6 @@
 	DEV_INFO("HDMI Core: Initialized\n");
 }
 
-static void hdmi_msm_hpd_state_timer(unsigned long data)
-{
-	if (!work_busy(&hdmi_msm_state->hpd_state_work)) {
-		/*
-		 * There is no event currently queued.
-		 * Only queue the work if this event has not already
-		 * been processed.
-		 */
-		if (external_common_state->hpd_state !=
-				hdmi_msm_state->hpd_state_in_isr) {
-			/*
-			 * There is no need to use any synchronization
-			 * construct for safeguarding these state vairables
-			 * here since the only other place these are modified
-			 * is in the HPD work thread, which is known to be not
-			 * pending/running.
-			 */
-			external_common_state->hpd_state =
-				hdmi_msm_state->hpd_state_in_isr;
-			DEV_DBG("%s: Queuing work to handle HPD %s event\n",
-					__func__,
-					external_common_state->hpd_state ?
-					"connect" : "disconnect");
-			queue_work(hdmi_work_queue,
-					&hdmi_msm_state->hpd_state_work);
-			return;
-		}
-	}
-}
-
 static void hdmi_msm_hdcp_timer(unsigned long data)
 {
 	if (!hdmi_msm_state->hdcp_enable) {
@@ -4270,6 +4167,27 @@
 }
 #endif
 
+static void hdmi_msm_hpd_polarity_setup(bool polarity, bool trigger)
+{
+	u32 cable_sense;
+	if (polarity)
+		HDMI_OUTP(0x0254, BIT(2) | BIT(1));
+	else
+		HDMI_OUTP(0x0254, BIT(2));
+
+	cable_sense = (HDMI_INP(0x0250) & BIT(1)) >> 1;
+	DEV_DBG("%s: listen=%s, sense=%s\n", __func__,
+		polarity ? "connect" : "disconnect",
+		cable_sense ? "connect" : "disconnect");
+	if (trigger && (cable_sense == polarity)) {
+		u32 reg_val = HDMI_INP(0x0258);
+
+		/* Toggle HPD circuit to trigger HPD sense */
+		HDMI_OUTP(0x0258, reg_val & ~BIT(28));
+		HDMI_OUTP(0x0258, reg_val | BIT(28));
+	}
+}
+
 static void hdmi_msm_hpd_off(void)
 {
 	int rc = 0;
@@ -4280,7 +4198,6 @@
 	}
 
 	DEV_DBG("%s: (timer, 5V, IRQ off)\n", __func__);
-	del_timer(&hdmi_msm_state->hpd_state_timer);
 	disable_irq(hdmi_msm_state->irq);
 
 	/* Disable HPD interrupt */
@@ -4349,27 +4266,22 @@
 		/* Set up HPD state variables */
 		mutex_lock(&external_common_state_hpd_mutex);
 		external_common_state->hpd_state = 0;
-		hdmi_msm_state->hpd_state_in_isr = 0;
 		mutex_unlock(&external_common_state_hpd_mutex);
 		mutex_lock(&hdmi_msm_state_mutex);
-		hdmi_msm_state->hpd_cable_chg_detected = TRUE;
 		mutex_unlock(&hdmi_msm_state_mutex);
 
-		/* Set up HPD_CTRL to sense HPD event */
-		HDMI_OUTP(0x0254, 0x6);
-		DEV_DBG("%s: Setting HPD_CTRL=%d\n", __func__,
-				HDMI_INP(0x0254));
+		enable_irq(hdmi_msm_state->irq);
 
 		hdmi_msm_state->hpd_initialized = TRUE;
 
-		enable_irq(hdmi_msm_state->irq);
-
 		/* set timeout to 4.1ms (max) for hardware debounce */
 		hpd_ctrl = HDMI_INP(0x0258) | 0x1FFF;
 
-		/* Toggle HPD circuit to trigger HPD sense */
-		HDMI_OUTP(0x0258, ~(1 << 28) & hpd_ctrl);
-		HDMI_OUTP(0x0258, (1 << 28) | hpd_ctrl);
+		/* Turn on HPD HW circuit */
+		HDMI_OUTP(0x0258, hpd_ctrl | BIT(28));
+
+		/* Set up HPD_CTRL to sense HPD event */
+		hdmi_msm_hpd_polarity_setup(HPD_CONNECT_POLARITY, true);
 	}
 
 	DEV_DBG("%s: (IRQ, 5V on)\n", __func__);
@@ -4413,35 +4325,54 @@
 	if (!hdmi_msm_state || !hdmi_msm_state->hdmi_app_clk || !MSM_HDMI_BASE)
 		return -ENODEV;
 
+	if (!hdmi_msm_state->hpd_initialized ||
+		!external_common_state->hpd_state) {
+		DEV_DBG("%s: HPD not initialized/cable not conn. Returning\n",
+				__func__);
+		return 0;
+	}
+
 	DEV_INFO("power: ON (%dx%d %d)\n", mfd->var_xres, mfd->var_yres,
 		mfd->var_pixclock);
 
+	/* Only start transmission with supported resolution */
 	changed = hdmi_common_get_video_format_from_drv_data(mfd);
-	hdmi_msm_audio_info_setup(TRUE, 0, 0, 0, FALSE);
+	if (changed || external_common_state->default_res_supported) {
+		hdmi_msm_audio_info_setup(TRUE, 0, 0, 0, FALSE);
+		mutex_lock(&external_common_state_hpd_mutex);
+		hdmi_msm_state->panel_power_on = TRUE;
+		if (external_common_state->hpd_state &&
+				hdmi_msm_is_power_on()) {
+			DEV_DBG("%s: Turning HDMI on\n", __func__);
+			mutex_unlock(&external_common_state_hpd_mutex);
+			hdmi_msm_turn_on();
 
-	mutex_lock(&external_common_state_hpd_mutex);
-	hdmi_msm_state->panel_power_on = TRUE;
-	if (external_common_state->hpd_state && hdmi_msm_is_power_on()) {
-		DEV_DBG("%s: Turning HDMI on\n", __func__);
-		mutex_unlock(&external_common_state_hpd_mutex);
-		hdmi_msm_turn_on();
-
-		if (hdmi_msm_state->hdcp_enable) {
-			/* Kick off HDCP Authentication */
-			mutex_lock(&hdcp_auth_state_mutex);
-			hdmi_msm_state->reauth = FALSE;
-			hdmi_msm_state->full_auth_done = FALSE;
-			mutex_unlock(&hdcp_auth_state_mutex);
-			mod_timer(&hdmi_msm_state->hdcp_timer, jiffies + HZ/2);
+			if (hdmi_msm_state->hdcp_enable) {
+				/* Kick off HDCP Authentication */
+				mutex_lock(&hdcp_auth_state_mutex);
+				hdmi_msm_state->reauth = FALSE;
+				hdmi_msm_state->full_auth_done = FALSE;
+				mutex_unlock(&hdcp_auth_state_mutex);
+				mod_timer(&hdmi_msm_state->hdcp_timer,
+						jiffies + HZ/2);
+			}
+		} else {
+			mutex_unlock(&external_common_state_hpd_mutex);
 		}
-	} else
-		mutex_unlock(&external_common_state_hpd_mutex);
 
-	hdmi_msm_dump_regs("HDMI-ON: ");
+		hdmi_msm_dump_regs("HDMI-ON: ");
+		DEV_INFO("power=%s DVI= %s\n",
+			hdmi_msm_is_power_on() ? "ON" : "OFF" ,
+			hdmi_msm_is_dvi_mode() ? "ON" : "OFF");
+	} else {
+		DEV_ERR("%s: Video fmt %d not supp. Returning\n",
+				__func__,
+				external_common_state->video_resolution);
+	}
 
-	DEV_INFO("power=%s DVI= %s\n",
-		hdmi_msm_is_power_on() ? "ON" : "OFF" ,
-		hdmi_msm_is_dvi_mode() ? "ON" : "OFF");
+	/* Enable HPD interrupt and listen to disconnect interrupts */
+	hdmi_msm_hpd_polarity_setup(HPD_DISCONNECT_POLARITY,
+			external_common_state->hpd_state);
 	return 0;
 }
 
@@ -4450,11 +4381,6 @@
 	char *envp[2];
 
 	/* Simulating a HPD event based on MHL event */
-	hdmi_msm_state->hpd_cable_chg_detected = FALSE;
-	/* QDSP OFF preceding the HPD event notification */
-	switch_set_state(&external_common_state->sdev, 0);
-	DEV_INFO("Hdmi state switched to %d: %s\n",
-		 external_common_state->sdev.state,  __func__);
 	if (on) {
 		hdmi_msm_read_edid();
 		hdmi_msm_state->reauth = FALSE ;
@@ -4472,8 +4398,8 @@
 			kobject_uevent_env(external_common_state->uevent_kobj,
 					   KOBJ_CHANGE, envp);
 			switch_set_state(&external_common_state->sdev, 1);
-			DEV_INFO("Hdmi state switched to %d: %s\n",
-				 external_common_state->sdev.state, __func__);
+			DEV_INFO("%s: hdmi state switched to %d\n",
+				 __func__, external_common_state->sdev.state);
 		} else {
 			hdmi_msm_hdcp_enable();
 		}
@@ -4482,8 +4408,8 @@
 		kobject_uevent(external_common_state->uevent_kobj,
 			       KOBJ_OFFLINE);
 		switch_set_state(&external_common_state->sdev, 0);
-		DEV_INFO("Hdmi state switched to %d: %s\n",
-			 external_common_state->sdev.state,  __func__);
+		DEV_INFO("%s: hdmi state switched to %d\n", __func__,
+				external_common_state->sdev.state);
 	}
 }
 EXPORT_SYMBOL(mhl_connect_api);
@@ -4499,21 +4425,48 @@
 	if (!hdmi_msm_state->hdmi_app_clk)
 		return -ENODEV;
 
-	mutex_lock(&hdmi_msm_state_mutex);
-	if (hdmi_msm_state->hdcp_activating) {
-		hdmi_msm_state->panel_power_on = FALSE;
-		mutex_unlock(&hdmi_msm_state_mutex);
-		DEV_INFO("HDCP: activating, returning\n");
+	if (!hdmi_msm_state->panel_power_on) {
+		DEV_DBG("%s: panel not on. returning\n", __func__);
 		return 0;
 	}
-	mutex_unlock(&hdmi_msm_state_mutex);
 
-	DEV_INFO("power: OFF (audio off, Reset Core)\n");
-	hdmi_msm_audio_off();
-	hdcp_deauthenticate();
+	if (hdmi_msm_state->hdcp_enable) {
+		if (hdmi_msm_state->hdcp_activating) {
+			/*
+			 * Let the HDCP work know that we got an HPD
+			 * disconnect so that it can stop the
+			 * reauthentication loop.
+			 */
+			mutex_lock(&hdcp_auth_state_mutex);
+			hdmi_msm_state->hpd_during_auth = TRUE;
+			mutex_unlock(&hdcp_auth_state_mutex);
+		}
+
+		/*
+		 * Cancel any pending reauth attempts.
+		 * If one is ongoing, wait for it to finish
+		 */
+		cancel_work_sync(&hdmi_msm_state->hdcp_reauth_work);
+		cancel_work_sync(&hdmi_msm_state->hdcp_work);
+		del_timer_sync(&hdmi_msm_state->hdcp_timer);
+
+		hdcp_deauthenticate();
+	}
+
+	SWITCH_SET_HDMI_AUDIO(0, 0);
+
+	if (!hdmi_msm_is_dvi_mode())
+		hdmi_msm_audio_off();
+
 	hdmi_msm_powerdown_phy();
 
 	hdmi_msm_state->panel_power_on = FALSE;
+	DEV_INFO("power: OFF (audio off)\n");
+
+	/* Enable HPD interrupt and listen to connect interrupts */
+	hdmi_msm_hpd_polarity_setup(HPD_CONNECT_POLARITY,
+				!external_common_state->hpd_state);
+
 	return 0;
 }
 
@@ -4649,13 +4602,6 @@
 	}
 	disable_irq(hdmi_msm_state->irq);
 
-	init_timer(&hdmi_msm_state->hpd_state_timer);
-	hdmi_msm_state->hpd_state_timer.function =
-		hdmi_msm_hpd_state_timer;
-	hdmi_msm_state->hpd_state_timer.data = (uint32)NULL;
-
-	hdmi_msm_state->hpd_state_timer.expires = 0xffffffffL;
-
 #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT
 	init_timer(&hdmi_msm_state->cec_read_timer);
 	hdmi_msm_state->cec_read_timer.function =
@@ -4689,8 +4635,19 @@
 		external_common_state->sdev.name = "hdmi_as_primary";
 	else
 		external_common_state->sdev.name = "hdmi";
-	if (switch_dev_register(&external_common_state->sdev) < 0)
+	if (switch_dev_register(&external_common_state->sdev) < 0) {
 		DEV_ERR("Hdmi switch registration failed\n");
+		rc = -ENODEV;
+		goto error;
+	}
+
+	external_common_state->audio_sdev.name = "hdmi_audio";
+	if (switch_dev_register(&external_common_state->audio_sdev) < 0) {
+		DEV_ERR("Hdmi audio switch registration failed\n");
+		switch_dev_unregister(&external_common_state->sdev);
+		rc = -ENODEV;
+		goto error;
+	}
 
 	/* Set the default video resolution for MHL-enabled display */
 	if (hdmi_msm_state->is_mhl_enabled) {
@@ -4733,6 +4690,7 @@
 
 	/* Unregister hdmi node from switch driver */
 	switch_dev_unregister(&external_common_state->sdev);
+	switch_dev_unregister(&external_common_state->audio_sdev);
 
 	hdmi_msm_hpd_off();
 	free_irq(hdmi_msm_state->irq, NULL);
@@ -4772,9 +4730,14 @@
 	if (on) {
 		rc = hdmi_msm_hpd_on();
 	} else {
+		external_common_state->hpd_state = 0;
 		hdmi_msm_hpd_off();
+		SWITCH_SET_HDMI_AUDIO(0, 0);
+
 		/* Set HDMI switch node to 0 on HPD feature disable */
 		switch_set_state(&external_common_state->sdev, 0);
+		DEV_INFO("%s: hdmi state switched to %d\n", __func__,
+				external_common_state->sdev.state);
 	}
 
 	return rc;
diff --git a/drivers/video/msm/hdmi_msm.h b/drivers/video/msm/hdmi_msm.h
index 20bd492..17cefdd 100644
--- a/drivers/video/msm/hdmi_msm.h
+++ b/drivers/video/msm/hdmi_msm.h
@@ -53,15 +53,12 @@
 struct hdmi_msm_state_type {
 	boolean panel_power_on;
 	boolean hpd_initialized;
-	boolean hpd_state_in_isr;
 #ifdef CONFIG_SUSPEND
 	boolean pm_suspended;
 #endif
-	boolean hpd_cable_chg_detected;
 	boolean full_auth_done;
 	boolean hpd_during_auth;
 	struct work_struct hpd_state_work;
-	struct timer_list hpd_state_timer;
 	struct completion ddc_sw_done;
 
 	bool hdcp_enable;
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 0c526fd..a827d6a 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -1333,7 +1333,7 @@
 }
 #endif
 
-static ssize_t vsync_show_event(struct device *dev,
+ssize_t mdp_dma_show_event(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	ssize_t ret = 0;
@@ -2244,15 +2244,6 @@
 }
 
 #endif
-static DEVICE_ATTR(vsync_event, S_IRUGO, vsync_show_event, NULL);
-static struct attribute *vsync_fs_attrs[] = {
-	&dev_attr_vsync_event.attr,
-	NULL,
-};
-static struct attribute_group vsync_fs_attr_group = {
-	.attrs = vsync_fs_attrs,
-};
-
 static int mdp_on(struct platform_device *pdev)
 {
 	int ret = 0;
@@ -2287,21 +2278,7 @@
 	if (mdp_rev == MDP_REV_303 && mfd->panel.type == MIPI_CMD_PANEL) {
 
 		vsync_cntrl.dev = mfd->fbi->dev;
-
-		if (!vsync_cntrl.sysfs_created) {
-			ret = sysfs_create_group(&vsync_cntrl.dev->kobj,
-				&vsync_fs_attr_group);
-			if (ret) {
-				pr_err("%s: sysfs creation failed, ret=%d\n",
-					__func__, ret);
-				return ret;
-			}
-
-			kobject_uevent(&vsync_cntrl.dev->kobj, KOBJ_ADD);
-			pr_debug("%s: kobject_uevent(KOBJ_ADD)\n", __func__);
-			vsync_cntrl.sysfs_created = 1;
-		}
-		atomic_set(&vsync_cntrl.suspend, 0);
+		atomic_set(&vsync_cntrl.suspend, 1);
 	}
 
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
@@ -2533,7 +2510,6 @@
 #if defined(CONFIG_FB_MSM_MIPI_DSI) && defined(CONFIG_FB_MSM_MDP40)
 	struct mipi_panel_info *mipi;
 #endif
-	unsigned int mdp_r = 0;
 
 	if ((pdev->id == 0) && (pdev->num_resources > 0)) {
 		mdp_init_pdev = pdev;
@@ -2555,14 +2531,6 @@
 		}
 
 		mdp_rev = mdp_pdata->mdp_rev;
-		if (mdp_rev == MDP_REV_42) {
-			mdp_r = inpdw(MDP_BASE + 0x0);
-			mdp_r = ((mdp_r & 0x30000) >> 16);
-			if (mdp_r == 3) {
-				mdp_rev = MDP_REV_43;
-				mdp_pdata->mdp_rev = MDP_REV_43;
-			}
-		}
 
 		mdp_iommu_split_domain = mdp_pdata->mdp_iommu_split_domain;
 
@@ -2615,6 +2583,7 @@
 	/* link to the latest pdev */
 	mfd->pdev = msm_fb_dev;
 	mfd->mdp_rev = mdp_rev;
+	mfd->vsync_init = NULL;
 
 	mfd->ov0_wb_buf = MDP_ALLOC(sizeof(struct mdp_buf_type));
 	mfd->ov1_wb_buf = MDP_ALLOC(sizeof(struct mdp_buf_type));
@@ -2780,7 +2749,8 @@
 	case MIPI_VIDEO_PANEL:
 #ifndef CONFIG_FB_MSM_MDP303
 		mipi = &mfd->panel_info.mipi;
-		mdp4_dsi_vsync_init(0);
+		mfd->vsync_init = mdp4_dsi_vsync_init;
+		mfd->vsync_show = mdp4_dsi_video_show_event;
 		mfd->hw_refresh = TRUE;
 		mfd->dma_fnc = mdp4_dsi_video_overlay;
 		mfd->lut_update = mdp_lut_update_lcdc;
@@ -2804,6 +2774,7 @@
 		mfd->start_histogram = mdp_histogram_start;
 		mfd->stop_histogram = mdp_histogram_stop;
 		mfd->vsync_ctrl = mdp_dma_video_vsync_ctrl;
+		mfd->vsync_show = mdp_dma_video_show_event;
 		if (mfd->panel_info.pdest == DISPLAY_1)
 			mfd->dma = &dma2_data;
 		else {
@@ -2824,7 +2795,8 @@
 #ifndef CONFIG_FB_MSM_MDP303
 		mfd->dma_fnc = mdp4_dsi_cmd_overlay;
 		mipi = &mfd->panel_info.mipi;
-		mdp4_dsi_rdptr_init(0);
+		mfd->vsync_init = mdp4_dsi_rdptr_init;
+		mfd->vsync_show = mdp4_dsi_cmd_show_event;
 		if (mfd->panel_info.pdest == DISPLAY_1) {
 			if_no = PRIMARY_INTF_SEL;
 			mfd->dma = &dma2_data;
@@ -2843,6 +2815,7 @@
 		mfd->start_histogram = mdp_histogram_start;
 		mfd->stop_histogram = mdp_histogram_stop;
 		mfd->vsync_ctrl = mdp_dma_vsync_ctrl;
+		mfd->vsync_show = mdp_dma_show_event;
 		if (mfd->panel_info.pdest == DISPLAY_1)
 			mfd->dma = &dma2_data;
 		else {
@@ -2860,7 +2833,8 @@
 
 #ifdef CONFIG_FB_MSM_DTV
 	case DTV_PANEL:
-		mdp4_dtv_vsync_init(0);
+		mfd->vsync_init = mdp4_dtv_vsync_init;
+		mfd->vsync_show = mdp4_dtv_show_event;
 		pdata->on = mdp4_dtv_on;
 		pdata->off = mdp4_dtv_off;
 		mfd->hw_refresh = TRUE;
@@ -2899,7 +2873,8 @@
 #endif
 
 #ifdef CONFIG_FB_MSM_MDP40
-		mdp4_lcdc_vsync_init(0);
+		mfd->vsync_init = mdp4_lcdc_vsync_init;
+		mfd->vsync_show = mdp4_lcdc_show_event;
 		if (mfd->panel.type == HDMI_PANEL) {
 			mfd->dma = &dma_e_data;
 			mdp4_display_intf_sel(EXTERNAL_INTF_SEL, LCDC_RGB_INTF);
@@ -2910,6 +2885,7 @@
 #else
 		mfd->dma = &dma2_data;
 		mfd->vsync_ctrl = mdp_dma_lcdc_vsync_ctrl;
+		mfd->vsync_show = mdp_dma_lcdc_show_event;
 		spin_lock_irqsave(&mdp_spin_lock, flag);
 		mdp_intr_mask &= ~MDP_DMA_P_DONE;
 		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
@@ -3010,6 +2986,29 @@
 
 	pdev_list[pdev_list_cnt++] = pdev;
 	mdp4_extn_disp = 0;
+
+	if (mfd->vsync_init != NULL) {
+		mfd->vsync_init(0);
+
+		if (!mfd->vsync_sysfs_created) {
+			mfd->dev_attr.attr.name = "vsync_event";
+			mfd->dev_attr.attr.mode = S_IRUGO;
+			mfd->dev_attr.show = mfd->vsync_show;
+			sysfs_attr_init(&mfd->dev_attr.attr);
+
+			rc = sysfs_create_file(&mfd->fbi->dev->kobj,
+							&mfd->dev_attr.attr);
+			if (rc) {
+				pr_err("%s: sysfs creation failed, ret=%d\n",
+					__func__, rc);
+				return rc;
+			}
+
+			kobject_uevent(&mfd->fbi->dev->kobj, KOBJ_ADD);
+			pr_debug("%s: kobject_uevent(KOBJ_ADD)\n", __func__);
+			mfd->vsync_sysfs_created = 1;
+		}
+	}
 	return 0;
 
       mdp_probe_err:
@@ -3119,7 +3118,8 @@
 {
 	mdp_suspend_sub();
 #ifdef CONFIG_FB_MSM_DTV
-	mdp4_dtv_set_black_screen(FALSE);
+	mdp4_solidfill_commit(MDP4_MIXER1);
+	mdp4_dtv_set_black_screen();
 #endif
 	mdp_footswitch_ctrl(FALSE);
 }
diff --git a/drivers/video/msm/mdp.h b/drivers/video/msm/mdp.h
index b4a7f79..0bc2532 100644
--- a/drivers/video/msm/mdp.h
+++ b/drivers/video/msm/mdp.h
@@ -862,6 +862,12 @@
 void mdp_dma_vsync_ctrl(int enable);
 void mdp_dma_video_vsync_ctrl(int enable);
 void mdp_dma_lcdc_vsync_ctrl(int enable);
+ssize_t mdp_dma_show_event(struct device *dev,
+		struct device_attribute *attr, char *buf);
+ssize_t mdp_dma_video_show_event(struct device *dev,
+		struct device_attribute *attr, char *buf);
+ssize_t mdp_dma_lcdc_show_event(struct device *dev,
+		struct device_attribute *attr, char *buf);
 
 #ifdef MDP_HW_VSYNC
 void vsync_clk_prepare_enable(void);
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index d9da6f9..6edc9f2 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -531,7 +531,7 @@
 }
 #endif /* CONFIG_FB_MSM_DTV */
 
-void mdp4_dtv_set_black_screen(bool commit);
+void mdp4_dtv_set_black_screen(void);
 
 int mdp4_overlay_dtv_set(struct msm_fb_data_type *mfd,
 			struct mdp4_overlay_pipe *pipe);
@@ -557,6 +557,14 @@
 void mdp4_dsi_vsync_init(int cndx);
 void mdp4_lcdc_vsync_init(int cndx);
 void mdp4_dtv_vsync_init(int cndx);
+ssize_t mdp4_dsi_cmd_show_event(struct device *dev,
+	struct device_attribute *attr, char *buf);
+ssize_t mdp4_dsi_video_show_event(struct device *dev,
+	struct device_attribute *attr, char *buf);
+ssize_t mdp4_lcdc_show_event(struct device *dev,
+	struct device_attribute *attr, char *buf);
+ssize_t mdp4_dtv_show_event(struct device *dev,
+	struct device_attribute *attr, char *buf);
 void mdp4_overlay_dsi_state_set(int state);
 int mdp4_overlay_dsi_state_get(void);
 void mdp4_overlay_rgb_setup(struct mdp4_overlay_pipe *pipe);
@@ -581,10 +589,11 @@
 int mdp4_overlay_play_wait(struct fb_info *info,
 	struct msmfb_overlay_data *req);
 int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req);
-int mdp4_overlay_commit(struct fb_info *info, int mixer);
+int mdp4_overlay_commit(struct fb_info *info);
 struct mdp4_overlay_pipe *mdp4_overlay_pipe_alloc(int ptype, int mixer);
 void mdp4_overlay_dma_commit(int mixer);
 void mdp4_overlay_vsync_commit(struct mdp4_overlay_pipe *pipe);
+void mdp4_solidfill_commit(int mixer);
 void mdp4_mixer_stage_commit(int mixer);
 void mdp4_dsi_cmd_do_update(int cndx, struct mdp4_overlay_pipe *pipe);
 void mdp4_lcdc_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
@@ -980,9 +989,15 @@
 {
 	/* empty */
 }
+static inline int mdp4_wfd_pipe_commit(struct msm_fb_data_type *mfd,
+					int cndx, int wait)
+{
+	return 0;
+}
 #else
 void mdp4_wfd_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
 void mdp4_wfd_init(int cndx);
+int mdp4_wfd_pipe_commit(struct msm_fb_data_type *mfd, int cndx, int wait);
 #endif
 
 #endif /* MDP_H */
diff --git a/drivers/video/msm/mdp4_dtv.c b/drivers/video/msm/mdp4_dtv.c
index bd0ce2f..4b83224 100644
--- a/drivers/video/msm/mdp4_dtv.c
+++ b/drivers/video/msm/mdp4_dtv.c
@@ -38,6 +38,7 @@
 
 static int dtv_off(struct platform_device *pdev);
 static int dtv_on(struct platform_device *pdev);
+static int dtv_off_sub(void);
 
 static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
 static int pdev_list_cnt;
@@ -45,6 +46,9 @@
 static struct clk *tv_src_clk;
 static struct clk *hdmi_clk;
 static struct clk *mdp_tv_clk;
+static struct platform_device *dtv_pdev;
+static struct workqueue_struct *dtv_work_queue;
+static struct work_struct dtv_off_work;
 
 
 static int mdp4_dtv_runtime_suspend(struct device *dev)
@@ -86,8 +90,48 @@
 static int dtv_off(struct platform_device *pdev)
 {
 	int ret = 0;
+	struct msm_fb_data_type *mfd = NULL;
 
-	ret = panel_next_off(pdev);
+	if (!pdev) {
+		pr_err("%s: FAILED: invalid arg\n", __func__);
+		return -EINVAL;
+	}
+
+	mfd = platform_get_drvdata(pdev);
+	if (!mfd) {
+		pr_err("%s: FAILED: invalid mfd\n", __func__);
+		return -EINVAL;
+	}
+
+	dtv_pdev = pdev;
+	/*
+	 * If it's a suspend operation then handle the device
+	 * power down synchronously.
+	 * Otherwise, queue work item to handle power down sequence.
+	 * This is needed since we need to wait for the audio engine
+	 * to shutdown first before we turn off the DTV device.
+	 */
+	if (!mfd->suspend.op_suspend) {
+		pr_debug("%s: Queuing work to turn off HDMI core\n", __func__);
+		queue_work(dtv_work_queue, &dtv_off_work);
+	} else {
+		pr_debug("%s: turning off HDMI core\n", __func__);
+		ret = dtv_off_sub();
+	}
+
+	return ret;
+}
+
+static int dtv_off_sub(void)
+{
+	int ret = 0;
+
+	if (!dtv_pdev) {
+		pr_err("%s: FAILED: invalid arg\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = panel_next_off(dtv_pdev);
 
 	pr_info("%s\n", __func__);
 
@@ -112,12 +156,20 @@
 	return ret;
 }
 
+static void dtv_off_work_func(struct work_struct *work)
+{
+	dtv_off_sub();
+}
+
 static int dtv_on(struct platform_device *pdev)
 {
 	int ret = 0;
 	struct msm_fb_data_type *mfd;
 	unsigned long panel_pixclock_freq , pm_qos_rate;
 
+	/* If a power down is already underway, wait for it to finish */
+	flush_work_sync(&dtv_off_work);
+
 	mfd = platform_get_drvdata(pdev);
 	panel_pixclock_freq = mfd->fbi->var.pixclock;
 
@@ -215,6 +267,8 @@
 		return 0;
 	}
 
+	dtv_work_queue = create_singlethread_workqueue("dtv_work");
+	INIT_WORK(&dtv_off_work, dtv_off_work_func);
 	mfd = platform_get_drvdata(pdev);
 
 	if (!mfd)
@@ -302,6 +356,8 @@
 
 static int dtv_remove(struct platform_device *pdev)
 {
+	if (dtv_work_queue)
+		destroy_workqueue(dtv_work_queue);
 #ifdef CONFIG_MSM_BUS_SCALING
 	if (dtv_pdata && dtv_pdata->bus_scale_table &&
 		dtv_bus_scale_handle > 0)
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 11952f3..59565c1 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -48,6 +48,7 @@
 	struct mdp4_overlay_pipe *stage[MDP4_MIXER_MAX][MDP4_MIXER_STAGE_MAX];
 	struct mdp4_overlay_pipe *baselayer[MDP4_MIXER_MAX];
 	struct blend_cfg blend[MDP4_MIXER_MAX][MDP4_MIXER_STAGE_MAX];
+	struct mdp4_overlay_pipe sf_plist[MDP4_MIXER_MAX][OVERLAY_PIPE_MAX];
 	uint32 mixer_cfg[MDP4_MIXER_MAX];
 	uint32 flush[MDP4_MIXER_MAX];
 	struct iommu_free_list iommu_free[MDP4_MIXER_MAX];
@@ -121,6 +122,7 @@
 
 static struct ion_client *display_iclient;
 
+static void mdp4_overlay_bg_solidfill(struct blend_cfg *blend);
 
 /*
  * mdp4_overlay_iommu_unmap_freelist()
@@ -778,6 +780,7 @@
 		case MDP_Y_CRCB_H2V1:
 		case MDP_Y_CBCR_H2V1:
 		case MDP_Y_CRCB_H1V2:
+		case MDP_Y_CBCR_H1V2:
 			*luma_off = pipe->src_x +
 				(pipe->src_y * pipe->srcp0_ystride);
 			*chroma_off = pipe->src_x +
@@ -987,6 +990,7 @@
 	case MDP_Y_CRCB_H2V1:
 	case MDP_Y_CBCR_H2V1:
 	case MDP_Y_CRCB_H1V2:
+	case MDP_Y_CBCR_H1V2:
 	case MDP_Y_CRCB_H2V2:
 	case MDP_Y_CBCR_H2V2:
 	case MDP_Y_CBCR_H2V2_TILE:
@@ -1171,6 +1175,7 @@
 	case MDP_Y_CRCB_H2V1:
 	case MDP_Y_CBCR_H2V1:
 	case MDP_Y_CRCB_H1V2:
+	case MDP_Y_CBCR_H1V2:
 	case MDP_Y_CRCB_H2V2:
 	case MDP_Y_CBCR_H2V2:
 	case MDP_Y_CRCB_H1V1:
@@ -1211,6 +1216,10 @@
 			pipe->element1 = C1_B_Cb;
 			pipe->element0 = C2_R_Cr;
 			pipe->chroma_sample = MDP4_CHROMA_H1V2;
+		} else if (pipe->src_format == MDP_Y_CBCR_H1V2) {
+			pipe->element1 = C2_R_Cr;
+			pipe->element0 = C1_B_Cb;
+			pipe->chroma_sample = MDP4_CHROMA_H1V2;
 		} else if (pipe->src_format == MDP_Y_CRCB_H2V2) {
 			pipe->element1 = C1_B_Cb;
 			pipe->element0 = C2_R_Cr;
@@ -1372,6 +1381,7 @@
 	case MDP_Y_CRCB_H2V2:
 	case MDP_Y_CRCB_H2V1:
 	case MDP_Y_CRCB_H1V2:
+	case MDP_Y_CBCR_H1V2:
 	case MDP_Y_CRCB_H1V1:
 	case MDP_Y_CBCR_H1V1:
 	case MDP_YCRCB_H1V1:
@@ -1604,6 +1614,35 @@
 	return cnt;
 }
 
+void mdp4_solidfill_commit(int mixer)
+{
+	struct blend_cfg bcfg;
+	struct mdp4_overlay_pipe *pp = NULL;
+	int i = 0;
+
+	for (i = 0; i < OVERLAY_PIPE_MAX; i++) {
+		pp = &ctrl->sf_plist[mixer][i];
+		if (pp->pipe_ndx && pp->solid_fill) {
+			bcfg.solidfill = 1;
+			bcfg.solidfill_pipe = pp;
+			mdp4_overlay_bg_solidfill(&bcfg);
+			mdp4_overlay_reg_flush(pp, 1);
+			mdp4_mixer_stage_up(pp, 0);
+		}
+	}
+	mdp4_mixer_stage_commit(MDP4_MIXER1);
+
+	for (i = 0; i < OVERLAY_PIPE_MAX; i++) {
+		pp = &ctrl->sf_plist[mixer][i];
+		if (pp->pipe_ndx && pp->solid_fill) {
+			mdp4_overlay_reg_flush(pp, 1);
+			mdp4_mixer_stage_down(pp, 0);
+			pp->solid_fill = 0;
+		}
+	}
+	mdp4_mixer_stage_commit(MDP4_MIXER1);
+}
+
 void mdp4_mixer_stage_commit(int mixer)
 {
 	struct mdp4_overlay_pipe *pipe;
@@ -1701,7 +1740,8 @@
 			ctrl->stage[mixer][i] = NULL;  /* clear it */
 	}
 
-	if (commit || (mixer > 0 && !hdmi_prim_display))
+	if (commit || ((mixer == 1) && !hdmi_prim_display) ||
+		(mixer == 2))
 		mdp4_mixer_stage_commit(mixer);
 }
 /*
@@ -1945,7 +1985,7 @@
 	struct mdp4_overlay_pipe *d_pipe;
 	struct mdp4_overlay_pipe *s_pipe;
 	struct blend_cfg *blend;
-	int i, off, alpha_drop = 0;
+	int i, off, alpha_drop;
 	int d_alpha, s_alpha;
 	unsigned char *overlay_base;
 	uint32 c0, c1, c2, base_premulti;
@@ -1971,8 +2011,10 @@
 			d_alpha = 0;
 			continue;
 		}
+		alpha_drop = 0;	/* per stage */
 		/* alpha channel is lost on VG pipe when using QSEED or M/N */
 		if (s_pipe->pipe_type == OVERLAY_TYPE_VIDEO &&
+			s_pipe->alpha_enable &&
 			((s_pipe->op_mode & MDP4_OP_SCALEY_EN) ||
 			(s_pipe->op_mode & MDP4_OP_SCALEX_EN)) &&
 			!(s_pipe->op_mode & (MDP4_OP_SCALEX_PIXEL_RPT |
@@ -2085,7 +2127,9 @@
 		outpdw(overlay_base + off + 0x108, blend->fg_alpha);
 		outpdw(overlay_base + off + 0x10c, blend->bg_alpha);
 
-		if (mdp_rev >= MDP_REV_42)
+		if (mdp_rev >= MDP_REV_42 ||
+			ctrl->panel_mode & MDP4_PANEL_MDDI ||
+			 ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
 			outpdw(overlay_base + off + 0x104, blend->op);
 
 		outpdw(overlay_base + (off << 5) + 0x1004, blend->co3_sel);
@@ -3113,9 +3157,6 @@
 								__func__);
 	}
 
-	if (hdmi_prim_display)
-		fill_black_screen(FALSE, pipe->pipe_num, pipe->mixer_num);
-
 	mdp4_overlay_mdp_pipe_req(pipe, mfd);
 
 	mutex_unlock(&mfd->dma->ov_mutex);
@@ -3189,9 +3230,13 @@
 
 	} else {	/* mixer1, DTV, ATV */
 		if (ctrl->panel_mode & MDP4_PANEL_DTV) {
-			if (hdmi_prim_display)
-				fill_black_screen(TRUE, pipe->pipe_num,
-					pipe->mixer_num);
+			if (hdmi_prim_display) {
+				struct mdp4_overlay_pipe *pp;
+				pp = &ctrl->sf_plist[pipe->mixer_num]
+					[pipe->pipe_ndx - 1];
+				*pp = *pipe; /* clone it */
+				pp->solid_fill = 1;
+			}
 			mdp4_overlay_dtv_unset(mfd, pipe);
 		}
 	}
@@ -3396,7 +3441,8 @@
 		pipe->srcp0_ystride = pipe->src_width;
 		if ((pipe->src_format == MDP_Y_CRCB_H1V1) ||
 			(pipe->src_format == MDP_Y_CBCR_H1V1) ||
-			(pipe->src_format == MDP_Y_CRCB_H1V2)) {
+			(pipe->src_format == MDP_Y_CRCB_H1V2) ||
+			(pipe->src_format == MDP_Y_CBCR_H1V2)) {
 			if (pipe->src_width > YUV_444_MAX_WIDTH)
 				pipe->srcp1_ystride = pipe->src_width << 2;
 			else
@@ -3496,8 +3542,9 @@
 	return ret;
 }
 
-int mdp4_overlay_commit(struct fb_info *info, int mixer)
+int mdp4_overlay_commit(struct fb_info *info)
 {
+	int ret = 0;
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
 
 	if (mfd == NULL)
@@ -3506,34 +3553,37 @@
 	if (!mfd->panel_power_on) /* suspended */
 		return -EINVAL;
 
-	if (mixer >= MDP4_MIXER_MAX)
-		return -EPERM;
-
 	mutex_lock(&mfd->dma->ov_mutex);
 
 	mdp4_overlay_mdp_perf_upd(mfd, 1);
 
-	if (mixer == MDP4_MIXER0) {
-		if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
-			/* cndx = 0 */
-			mdp4_dsi_cmd_pipe_commit(0, 1);
-		} else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
-			/* cndx = 0 */
-			mdp4_dsi_video_pipe_commit(0, 1);
-		} else if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
-			/* cndx = 0 */
-			mdp4_lcdc_pipe_commit(0, 1);
-		}
-	} else if (mixer == MDP4_MIXER1) {
-		if (ctrl->panel_mode & MDP4_PANEL_DTV)
-			mdp4_dtv_pipe_commit(0, 1);
+	switch (mfd->panel.type) {
+	case MIPI_CMD_PANEL:
+		mdp4_dsi_cmd_pipe_commit(0, 1);
+		break;
+	case MIPI_VIDEO_PANEL:
+		mdp4_dsi_video_pipe_commit(0, 1);
+		break;
+	case LCDC_PANEL:
+		mdp4_lcdc_pipe_commit(0, 1);
+		break;
+	case DTV_PANEL:
+		mdp4_dtv_pipe_commit(0, 1);
+		break;
+	case WRITEBACK_PANEL:
+		mdp4_wfd_pipe_commit(mfd, 0, 1);
+		break;
+	default:
+		pr_err("Panel Not Supported for Commit");
+		ret = -EINVAL;
+		break;
 	}
 
 	mdp4_overlay_mdp_perf_upd(mfd, 0);
 
 	mutex_unlock(&mfd->dma->ov_mutex);
 
-	return 0;
+	return ret;
 }
 
 struct msm_iommu_ctx {
diff --git a/drivers/video/msm/mdp4_overlay_dsi_cmd.c b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
index 0015403..9cb2b34 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_cmd.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
@@ -662,7 +662,7 @@
 	mutex_unlock(&vctrl->update_lock);
 }
 
-static ssize_t vsync_show_event(struct device *dev,
+ssize_t mdp4_dsi_cmd_show_event(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	int cndx;
@@ -683,9 +683,12 @@
 	vctrl->wait_vsync_cnt++;
 	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
 
-	ret = wait_for_completion_interruptible(&vctrl->vsync_comp);
-	if (ret)
-		return ret;
+	ret = wait_for_completion_interruptible_timeout(&vctrl->vsync_comp,
+		msecs_to_jiffies(VSYNC_PERIOD * 4));
+	if (ret <= 0) {
+		vctrl->wait_vsync_cnt = 0;
+		return -EBUSY;
+	}
 
 	spin_lock_irqsave(&vctrl->spin_lock, flags);
 	vsync_tick = ktime_to_ns(vctrl->vsync_time);
@@ -718,6 +721,7 @@
 	init_completion(&vctrl->dmap_comp);
 	init_completion(&vctrl->vsync_comp);
 	spin_lock_init(&vctrl->spin_lock);
+	atomic_set(&vctrl->suspend, 1);
 	INIT_WORK(&vctrl->clk_work, clk_ctrl_work);
 }
 
@@ -986,14 +990,6 @@
 	mdp4_dsi_cmd_do_blt(mfd, req->enable);
 }
 
-static DEVICE_ATTR(vsync_event, S_IRUGO, vsync_show_event, NULL);
-static struct attribute *vsync_fs_attrs[] = {
-	&dev_attr_vsync_event.attr,
-	NULL,
-};
-static struct attribute_group vsync_fs_attr_group = {
-	.attrs = vsync_fs_attrs,
-};
 int mdp4_dsi_cmd_on(struct platform_device *pdev)
 {
 	int ret = 0;
@@ -1018,22 +1014,7 @@
 
 	atomic_set(&vctrl->suspend, 0);
 
-	if (!vctrl->sysfs_created) {
-		ret = sysfs_create_group(&vctrl->dev->kobj,
-			&vsync_fs_attr_group);
-		if (ret) {
-			pr_err("%s: sysfs group creation failed, ret=%d\n",
-				__func__, ret);
-			return ret;
-		}
-
-		kobject_uevent(&vctrl->dev->kobj, KOBJ_ADD);
-		pr_debug("%s: kobject_uevent(KOBJ_ADD)\n", __func__);
-		vctrl->sysfs_created = 1;
-	}
-
 	pr_debug("%s-:\n", __func__);
-
 	return ret;
 }
 
diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c
index 501c4e6..239d9f5 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_video.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_video.c
@@ -168,6 +168,8 @@
 	pipe = vctrl->base_pipe;
 	mixer = pipe->mixer_num;
 
+	mdp_update_pm(vctrl->mfd, vctrl->vsync_time);
+
 	if (vp->update_cnt == 0) {
 		mutex_unlock(&vctrl->update_lock);
 		return cnt;
@@ -374,7 +376,7 @@
 	wait_for_completion(&vctrl->ov_comp);
 }
 
-static ssize_t vsync_show_event(struct device *dev,
+ssize_t mdp4_dsi_video_show_event(struct device *dev,
 	struct device_attribute *attr, char *buf)
 {
 	int cndx;
@@ -395,9 +397,12 @@
 		INIT_COMPLETION(vctrl->vsync_comp);
 	vctrl->wait_vsync_cnt++;
 	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
-	ret = wait_for_completion_interruptible(&vctrl->vsync_comp);
-	if (ret)
-		return ret;
+	ret = wait_for_completion_interruptible_timeout(&vctrl->vsync_comp,
+		msecs_to_jiffies(VSYNC_PERIOD * 4));
+	if (ret <= 0) {
+		vctrl->wait_vsync_cnt = 0;
+		return -EBUSY;
+	}
 
 	spin_lock_irqsave(&vctrl->spin_lock, flags);
 	vsync_tick = ktime_to_ns(vctrl->vsync_time);
@@ -417,7 +422,7 @@
 		return;
 	}
 
-	pr_info("%s: ndx=%d\n", __func__, cndx);
+	pr_debug("%s: ndx=%d\n", __func__, cndx);
 
 	vctrl = &vsync_ctrl_db[cndx];
 	if (vctrl->inited)
@@ -429,7 +434,7 @@
 	init_completion(&vctrl->vsync_comp);
 	init_completion(&vctrl->dmap_comp);
 	init_completion(&vctrl->ov_comp);
-	atomic_set(&vctrl->suspend, 0);
+	atomic_set(&vctrl->suspend, 1);
 	atomic_set(&vctrl->vsync_resume, 1);
 	spin_lock_init(&vctrl->spin_lock);
 }
@@ -447,16 +452,6 @@
 	vctrl->base_pipe = pipe;
 }
 
-static DEVICE_ATTR(vsync_event, S_IRUGO, vsync_show_event, NULL);
-
-static struct attribute *vsync_fs_attrs[] = {
-	&dev_attr_vsync_event.attr,
-	NULL,
-};
-
-static struct attribute_group vsync_fs_attr_group = {
-	.attrs = vsync_fs_attrs,
-};
 int mdp4_dsi_video_on(struct platform_device *pdev)
 {
 	int dsi_width;
@@ -673,20 +668,6 @@
 
 	mdp_histogram_ctrl_all(TRUE);
 
-	if (!vctrl->sysfs_created) {
-		ret = sysfs_create_group(&vctrl->dev->kobj,
-			&vsync_fs_attr_group);
-		if (ret) {
-			pr_err("%s: sysfs group creation failed, ret=%d\n",
-				__func__, ret);
-			return ret;
-		}
-
-		kobject_uevent(&vctrl->dev->kobj, KOBJ_ADD);
-		pr_debug("%s: kobject_uevent(KOBJ_ADD)\n", __func__);
-		vctrl->sysfs_created = 1;
-	}
-
 	return ret;
 }
 
@@ -1131,7 +1112,6 @@
 		mdp4_dsi_video_pipe_queue(0, pipe);
 	}
 
-	mdp_update_pm(mfd, vsync_ctrl_db[0].vsync_time);
 	mdp4_overlay_mdp_perf_upd(mfd, 1);
 
 	cnt = mdp4_dsi_video_pipe_commit(cndx, 0);
diff --git a/drivers/video/msm/mdp4_overlay_dtv.c b/drivers/video/msm/mdp4_overlay_dtv.c
index 2d48781..1de5d6e 100644
--- a/drivers/video/msm/mdp4_overlay_dtv.c
+++ b/drivers/video/msm/mdp4_overlay_dtv.c
@@ -72,6 +72,7 @@
 	struct completion dmae_comp;
 	struct completion vsync_comp;
 	spinlock_t spin_lock;
+	struct msm_fb_data_type *mfd;
 	struct mdp4_overlay_pipe *base_pipe;
 	struct vsync_update vlist[2];
 	int vsync_irq_enabled;
@@ -180,6 +181,8 @@
 	mixer = pipe->mixer_num;
 	mdp4_overlay_iommu_unmap_freelist(mixer);
 
+	mdp_update_pm(vctrl->mfd, vctrl->vsync_time);
+
 	if (vp->update_cnt == 0) {
 		mutex_unlock(&vctrl->update_lock);
 		return 0;
@@ -310,7 +313,7 @@
 	wait_for_completion(&vctrl->dmae_comp);
 }
 
-static ssize_t vsync_show_event(struct device *dev,
+ssize_t mdp4_dtv_show_event(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	int cndx;
@@ -337,9 +340,12 @@
 	vctrl->wait_vsync_cnt++;
 	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
 
-	ret = wait_for_completion_interruptible(&vctrl->vsync_comp);
-	if (ret)
-		return ret;
+	ret = wait_for_completion_interruptible_timeout(&vctrl->vsync_comp,
+		msecs_to_jiffies(VSYNC_PERIOD * 4));
+	if (ret <= 0) {
+		vctrl->wait_vsync_cnt = 0;
+		return -EBUSY;
+	}
 
 	spin_lock_irqsave(&vctrl->spin_lock, flags);
 	vg1fd = vctrl->vg1fd;
@@ -382,7 +388,7 @@
 	init_completion(&vctrl->vsync_comp);
 	init_completion(&vctrl->ov_comp);
 	init_completion(&vctrl->dmae_comp);
-	atomic_set(&vctrl->suspend, 0);
+	atomic_set(&vctrl->suspend, 1);
 	atomic_set(&vctrl->vsync_resume, 1);
 	spin_lock_init(&vctrl->spin_lock);
 }
@@ -439,6 +445,9 @@
 	int hsync_end_x;
 	struct fb_info *fbi;
 	struct fb_var_screeninfo *var;
+	struct vsycn_ctrl *vctrl;
+
+	vctrl = &vsync_ctrl_db[0];
 
 	if (!mfd)
 		return -ENODEV;
@@ -449,6 +458,8 @@
 	fbi = mfd->fbi;
 	var = &fbi->var;
 
+	vctrl->mfd = mfd;
+
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 	if (hdmi_prim_display) {
 		if (is_mdp4_hw_reset()) {
@@ -561,15 +572,6 @@
 	return 0;
 }
 
-static DEVICE_ATTR(vsync_event, S_IRUGO, vsync_show_event, NULL);
-static struct attribute *vsync_fs_attrs[] = {
-	&dev_attr_vsync_event.attr,
-	NULL,
-};
-static struct attribute_group vsync_fs_attr_group = {
-	.attrs = vsync_fs_attrs,
-};
-
 int mdp4_dtv_on(struct platform_device *pdev)
 {
 	struct msm_fb_data_type *mfd;
@@ -609,21 +611,6 @@
 		pr_warn("%s: panel_next_on failed", __func__);
 
 	atomic_set(&vctrl->suspend, 0);
-
-	if (!vctrl->sysfs_created) {
-		ret = sysfs_create_group(&vctrl->dev->kobj,
-			&vsync_fs_attr_group);
-		if (ret) {
-			pr_err("%s: sysfs group creation failed, ret=%d\n",
-				__func__, ret);
-			return ret;
-		}
-
-		kobject_uevent(&vctrl->dev->kobj, KOBJ_ADD);
-		pr_debug("%s: kobject_uevent(KOBJ_ADD)\n", __func__);
-		vctrl->sysfs_created = 1;
-	}
-
 	if (mfd->avtimer_phy && (vctrl->avtimer == NULL)) {
 		vctrl->avtimer = (uint32 *)ioremap(mfd->avtimer_phy, 8);
 		if (vctrl->avtimer == NULL)
@@ -1004,22 +991,27 @@
 	spin_unlock(&vctrl->spin_lock);
 }
 
-void mdp4_dtv_set_black_screen(bool commit)
+void mdp4_dtv_set_black_screen()
 {
 	char *rgb_base;
 	/*Black color*/
 	uint32 color = 0x00000000;
 	uint32 temp_src_format;
-	int cndx = 0;
+	int commit = 1, cndx = 0;
+	int pipe_num = OVERLAY_PIPE_RGB1;
 	struct vsycn_ctrl *vctrl;
 
 	vctrl = &vsync_ctrl_db[cndx];
-	if (vctrl->base_pipe == NULL || !hdmi_prim_display) {
-		pr_debug("dtv_pipe is not configured yet\n");
+	if (!hdmi_prim_display)
 		return;
-	}
+
+	if (vctrl->base_pipe == NULL)
+		commit = 0;
+	else
+		pipe_num = vctrl->base_pipe->pipe_num;
+
 	rgb_base = MDP_BASE;
-	rgb_base += (MDP4_RGB_OFF * (vctrl->base_pipe->pipe_num + 2));
+	rgb_base += (MDP4_RGB_OFF * (pipe_num + 2));
 
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 
@@ -1034,13 +1026,12 @@
 		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
 		mdp4_overlay_reg_flush(vctrl->base_pipe, 1);
-
 		mdp4_mixer_stage_up(vctrl->base_pipe, 0);
 		mdp4_mixer_stage_commit(vctrl->base_pipe->mixer_num);
 	} else {
 		/* MDP_OVERLAY_REG_FLUSH for pipe*/
 		MDP_OUTP(MDP_BASE + 0x18000,
-			BIT(vctrl->base_pipe->pipe_num + 2) | BIT(MDP4_MIXER1));
+			BIT(pipe_num + 2) | BIT(MDP4_MIXER1));
 		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 	}
 }
@@ -1153,7 +1144,6 @@
 		pipe->srcp0_addr = (uint32)mfd->ibuf.buf;
 		mdp4_dtv_pipe_queue(0, pipe);
 	}
-	mdp_update_pm(mfd, vsync_ctrl_db[0].vsync_time);
 
 	if (hdmi_prim_display)
 		wait = 1;
diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c
index df5c262..a7058ce 100644
--- a/drivers/video/msm/mdp4_overlay_lcdc.c
+++ b/drivers/video/msm/mdp4_overlay_lcdc.c
@@ -172,6 +172,8 @@
 	pipe = vctrl->base_pipe;
 	mixer = pipe->mixer_num;
 
+	mdp_update_pm(vctrl->mfd, vctrl->vsync_time);
+
 	if (vp->update_cnt == 0) {
 		mutex_unlock(&vctrl->update_lock);
 		return 0;
@@ -359,7 +361,7 @@
 	wait_for_completion(&vctrl->ov_comp);
 }
 
-static ssize_t vsync_show_event(struct device *dev,
+ssize_t mdp4_lcdc_show_event(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	int cndx;
@@ -380,9 +382,12 @@
 		INIT_COMPLETION(vctrl->vsync_comp);
 	vctrl->wait_vsync_cnt++;
 	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
-	ret = wait_for_completion_interruptible(&vctrl->vsync_comp);
-	if (ret)
-		return ret;
+	ret = wait_for_completion_interruptible_timeout(&vctrl->vsync_comp,
+		msecs_to_jiffies(VSYNC_PERIOD * 4));
+	if (ret <= 0) {
+		vctrl->wait_vsync_cnt = 0;
+		return -EBUSY;
+	}
 
 	spin_lock_irqsave(&vctrl->spin_lock, flags);
 	vsync_tick = ktime_to_ns(vctrl->vsync_time);
@@ -414,7 +419,7 @@
 	init_completion(&vctrl->vsync_comp);
 	init_completion(&vctrl->dmap_comp);
 	init_completion(&vctrl->ov_comp);
-	atomic_set(&vctrl->suspend, 0);
+	atomic_set(&vctrl->suspend, 1);
 	atomic_set(&vctrl->vsync_resume, 1);
 	spin_lock_init(&vctrl->spin_lock);
 }
@@ -432,15 +437,6 @@
 	vctrl->base_pipe = pipe;
 }
 
-static DEVICE_ATTR(vsync_event, S_IRUGO, vsync_show_event, NULL);
-static struct attribute *vsync_fs_attrs[] = {
-	&dev_attr_vsync_event.attr,
-	NULL,
-};
-static struct attribute_group vsync_fs_attr_group = {
-	.attrs = vsync_fs_attrs,
-};
-
 int mdp4_lcdc_on(struct platform_device *pdev)
 {
 	int lcdc_width;
@@ -655,20 +651,6 @@
 
 	mdp_histogram_ctrl_all(TRUE);
 
-	if (!vctrl->sysfs_created) {
-		ret = sysfs_create_group(&vctrl->dev->kobj,
-			&vsync_fs_attr_group);
-		if (ret) {
-			pr_err("%s: sysfs group creation failed, ret=%d\n",
-				__func__, ret);
-			return ret;
-		}
-
-		kobject_uevent(&vctrl->dev->kobj, KOBJ_ADD);
-		pr_debug("%s: kobject_uevent(KOBJ_ADD)\n", __func__);
-		vctrl->sysfs_created = 1;
-	}
-
 	return ret;
 }
 
@@ -989,7 +971,6 @@
 
 		mdp4_lcdc_pipe_queue(0, pipe);
 	}
-	mdp_update_pm(mfd, vsync_ctrl_db[0].vsync_time);
 
 	mdp4_overlay_mdp_perf_upd(mfd, 1);
 
diff --git a/drivers/video/msm/mdp4_overlay_writeback.c b/drivers/video/msm/mdp4_overlay_writeback.c
index 18c6635..aa50d94 100644
--- a/drivers/video/msm/mdp4_overlay_writeback.c
+++ b/drivers/video/msm/mdp4_overlay_writeback.c
@@ -88,6 +88,10 @@
 }
 
 static int mdp4_overlay_writeback_update(struct msm_fb_data_type *mfd);
+static void mdp4_wfd_queue_wakeup(struct msm_fb_data_type *mfd,
+		struct msmfb_writeback_data_list *node);
+static void mdp4_wfd_dequeue_update(struct msm_fb_data_type *mfd,
+		struct msmfb_writeback_data_list **wfdnode);
 
 int mdp4_overlay_writeback_on(struct platform_device *pdev)
 {
@@ -171,6 +175,8 @@
 	struct vsycn_ctrl *vctrl;
 	struct mdp4_overlay_pipe *pipe;
 	int ret = 0;
+	int undx;
+	struct vsync_update *vp;
 
 	pr_debug("%s+:\n", __func__);
 
@@ -189,6 +195,16 @@
 	mdp4_overlay_pipe_free(pipe);
 	vctrl->base_pipe = NULL;
 
+	undx =  vctrl->update_ndx;
+	vp = &vctrl->vlist[undx];
+	if (vp->update_cnt) {
+		/*
+		 * pipe's iommu will be freed at next overlay play
+		 * and iommu_drop statistic will be increased by one
+		 */
+		vp->update_cnt = 0;     /* empty queue */
+	}
+
 	ret = panel_next_off(pdev);
 
 	mdp_clk_ctrl(1);
@@ -253,6 +269,12 @@
 	mdp4_mixer_stage_up(pipe, 0);
 
 	mdp4_overlayproc_cfg(pipe);
+
+	if (hdmi_prim_display)
+		outpdw(MDP_BASE + 0x100F4, 0x01);
+	else
+		outpdw(MDP_BASE + 0x100F4, 0x02);
+
 	/* MDP cmd block disable */
 	mdp_clk_ctrl(0);
 
@@ -299,7 +321,8 @@
 
 static void mdp4_wfd_wait4ov(int cndx);
 
-int mdp4_wfd_pipe_commit(void)
+int mdp4_wfd_pipe_commit(struct msm_fb_data_type *mfd,
+			int cndx, int wait)
 {
 	int  i, undx;
 	int mixer = 0;
@@ -309,8 +332,9 @@
 	struct mdp4_overlay_pipe *real_pipe;
 	unsigned long flags;
 	int cnt = 0;
+	struct msmfb_writeback_data_list *node = NULL;
 
-	vctrl = &vsync_ctrl_db[0];
+	vctrl = &vsync_ctrl_db[cndx];
 
 	mutex_lock(&vctrl->update_lock);
 	undx =  vctrl->update_ndx;
@@ -328,6 +352,8 @@
 	vp->update_cnt = 0;     /* reset */
 	mutex_unlock(&vctrl->update_lock);
 
+	mdp4_wfd_dequeue_update(mfd, &node);
+
 	/* free previous committed iommu back to pool */
 	mdp4_overlay_iommu_unmap_freelist(mixer);
 
@@ -365,6 +391,11 @@
 
 	mdp4_stat.overlay_commit[pipe->mixer_num]++;
 
+	if (wait)
+		mdp4_wfd_wait4ov(cndx);
+
+	mdp4_wfd_queue_wakeup(mfd, node);
+
 	return cnt;
 }
 
@@ -426,7 +457,6 @@
 
 void mdp4_writeback_overlay(struct msm_fb_data_type *mfd)
 {
-	struct msmfb_writeback_data_list *node = NULL;
 	struct vsycn_ctrl *vctrl;
 	struct mdp4_overlay_pipe *pipe;
 
@@ -438,36 +468,7 @@
 	vctrl = &vsync_ctrl_db[0];
 	pipe = vctrl->base_pipe;
 
-	mutex_lock(&mfd->unregister_mutex);
-	mutex_lock(&mfd->writeback_mutex);
-	if (!list_empty(&mfd->writeback_free_queue)
-		&& mfd->writeback_state != WB_STOPING
-		&& mfd->writeback_state != WB_STOP) {
-		node = list_first_entry(&mfd->writeback_free_queue,
-				struct msmfb_writeback_data_list, active_entry);
-	}
-	if (node) {
-		list_del(&(node->active_entry));
-		node->state = IN_BUSY_QUEUE;
-		mfd->writeback_active_cnt++;
-	}
-	mutex_unlock(&mfd->writeback_mutex);
-
-	pipe->ov_blt_addr = (ulong) (node ? node->addr : NULL);
-
-	if (!pipe->ov_blt_addr) {
-		pr_err("%s: no writeback buffer 0x%x, %p\n", __func__,
-			(unsigned int)pipe->ov_blt_addr, node);
-		mutex_unlock(&mfd->unregister_mutex);
-		return;
-	}
-
 	mutex_lock(&mfd->dma->ov_mutex);
-	if (pipe && !pipe->ov_blt_addr) {
-		pr_err("%s: no writeback buffer 0x%x\n", __func__,
-				(unsigned int)pipe->ov_blt_addr);
-		goto fail_no_blt_addr;
-	}
 
 	if (pipe->pipe_type == OVERLAY_TYPE_RGB)
 		mdp4_wfd_pipe_queue(0, pipe);
@@ -475,26 +476,15 @@
 	mdp4_overlay_mdp_perf_upd(mfd, 1);
 
 	mdp_clk_ctrl(1);
-	mdp4_overlay_writeback_update(mfd);
 
-	mdp4_wfd_pipe_commit();
+	mdp4_wfd_pipe_commit(mfd, 0, 1);
 
 	mdp4_overlay_mdp_perf_upd(mfd, 0);
 
-	mdp4_wfd_wait4ov(0);
 	mdp_clk_ctrl(0);
 
-	mutex_lock(&mfd->writeback_mutex);
-	list_add_tail(&node->active_entry, &mfd->writeback_busy_queue);
-	mfd->writeback_active_cnt--;
-	mutex_unlock(&mfd->writeback_mutex);
-	wake_up(&mfd->wait_q);
-fail_no_blt_addr:
-	/*NOTE: This api was removed
-	  mdp4_overlay_resource_release();*/
 	mutex_unlock(&mfd->dma->ov_mutex);
-	mutex_unlock(&mfd->unregister_mutex);
-	pr_debug("%s:-\n", __func__);
+
 }
 
 static int mdp4_overlay_writeback_register_buffer(
@@ -745,3 +735,68 @@
 	mutex_unlock(&mfd->unregister_mutex);
 	return rc;
 }
+
+static void mdp4_wfd_dequeue_update(struct msm_fb_data_type *mfd,
+			struct msmfb_writeback_data_list **wfdnode)
+{
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	struct msmfb_writeback_data_list *node = NULL;
+
+	if (mfd && !mfd->panel_power_on)
+		return;
+
+	pr_debug("%s:+ mfd=%x\n", __func__, (int)mfd);
+
+	vctrl = &vsync_ctrl_db[0];
+	pipe = vctrl->base_pipe;
+
+	mutex_lock(&mfd->unregister_mutex);
+	mutex_lock(&mfd->writeback_mutex);
+	if (!list_empty(&mfd->writeback_free_queue)
+		&& mfd->writeback_state != WB_STOPING
+		&& mfd->writeback_state != WB_STOP) {
+		node = list_first_entry(&mfd->writeback_free_queue,
+				struct msmfb_writeback_data_list, active_entry);
+	}
+	if (node) {
+		list_del(&(node->active_entry));
+		node->state = IN_BUSY_QUEUE;
+		mfd->writeback_active_cnt++;
+	}
+	mutex_unlock(&mfd->writeback_mutex);
+
+	pipe->ov_blt_addr = (ulong) (node ? node->addr : NULL);
+
+	if (!pipe->ov_blt_addr) {
+		pr_err("%s: no writeback buffer 0x%x, %p\n", __func__,
+			(unsigned int)pipe->ov_blt_addr, node);
+		mutex_unlock(&mfd->unregister_mutex);
+		return;
+	}
+
+	mdp4_overlay_writeback_update(mfd);
+
+	*wfdnode = node;
+
+	mutex_unlock(&mfd->unregister_mutex);
+}
+
+static void mdp4_wfd_queue_wakeup(struct msm_fb_data_type *mfd,
+			struct msmfb_writeback_data_list *node)
+{
+
+	if (mfd && !mfd->panel_power_on)
+		return;
+
+	if (node == NULL)
+		return;
+
+	pr_debug("%s: mfd=%x node: %p", __func__, (int)mfd, node);
+
+	mutex_lock(&mfd->writeback_mutex);
+	list_add_tail(&node->active_entry, &mfd->writeback_busy_queue);
+	mfd->writeback_active_cnt--;
+	mutex_unlock(&mfd->writeback_mutex);
+	wake_up(&mfd->wait_q);
+}
diff --git a/drivers/video/msm/mdp_dma_dsi_video.c b/drivers/video/msm/mdp_dma_dsi_video.c
index e2fb8ba..cfbff9a 100644
--- a/drivers/video/msm/mdp_dma_dsi_video.c
+++ b/drivers/video/msm/mdp_dma_dsi_video.c
@@ -34,7 +34,7 @@
 static int first_pixel_start_x;
 static int first_pixel_start_y;
 
-static ssize_t vsync_show_event(struct device *dev,
+ssize_t mdp_dma_video_show_event(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	ssize_t ret = 0;
@@ -52,15 +52,6 @@
 	return ret;
 }
 
-static DEVICE_ATTR(vsync_event, S_IRUGO, vsync_show_event, NULL);
-static struct attribute *vsync_fs_attrs[] = {
-	&dev_attr_vsync_event.attr,
-	NULL,
-};
-static struct attribute_group vsync_fs_attr_group = {
-	.attrs = vsync_fs_attrs,
-};
-
 int mdp_dsi_video_on(struct platform_device *pdev)
 {
 	int dsi_width;
@@ -254,20 +245,6 @@
 	/* MDP cmd block disable */
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
-	if (!vsync_cntrl.sysfs_created) {
-		ret = sysfs_create_group(&vsync_cntrl.dev->kobj,
-			&vsync_fs_attr_group);
-		if (ret) {
-			pr_err("%s: sysfs creation failed, ret=%d\n",
-				__func__, ret);
-			return ret;
-		}
-
-		kobject_uevent(&vsync_cntrl.dev->kobj, KOBJ_ADD);
-		pr_debug("%s: kobject_uevent(KOBJ_ADD)\n", __func__);
-		vsync_cntrl.sysfs_created = 1;
-	}
-
 	return ret;
 }
 
diff --git a/drivers/video/msm/mdp_dma_lcdc.c b/drivers/video/msm/mdp_dma_lcdc.c
index fbfe35f..c51a99a 100644
--- a/drivers/video/msm/mdp_dma_lcdc.c
+++ b/drivers/video/msm/mdp_dma_lcdc.c
@@ -51,7 +51,7 @@
 int first_pixel_start_x;
 int first_pixel_start_y;
 
-static ssize_t vsync_show_event(struct device *dev,
+ssize_t mdp_dma_lcdc_show_event(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	ssize_t ret = 0;
@@ -69,15 +69,6 @@
 	return ret;
 }
 
-static DEVICE_ATTR(vsync_event, S_IRUGO, vsync_show_event, NULL);
-static struct attribute *vsync_fs_attrs[] = {
-	&dev_attr_vsync_event.attr,
-	NULL,
-};
-static struct attribute_group vsync_fs_attr_group = {
-	.attrs = vsync_fs_attrs,
-};
-
 int mdp_lcdc_on(struct platform_device *pdev)
 {
 	int lcdc_width;
@@ -320,20 +311,6 @@
 	/* MDP cmd block disable */
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
-	if (!vsync_cntrl.sysfs_created) {
-		ret = sysfs_create_group(&vsync_cntrl.dev->kobj,
-			&vsync_fs_attr_group);
-		if (ret) {
-			pr_err("%s: sysfs creation failed, ret=%d\n",
-				__func__, ret);
-			return ret;
-		}
-
-		kobject_uevent(&vsync_cntrl.dev->kobj, KOBJ_ADD);
-		pr_debug("%s: kobject_uevent(KOBJ_ADD)\n", __func__);
-		vsync_cntrl.sysfs_created = 1;
-	}
-
 	return ret;
 }
 
diff --git a/drivers/video/msm/mdss/Kconfig b/drivers/video/msm/mdss/Kconfig
index 424455f..56eb90c 100644
--- a/drivers/video/msm/mdss/Kconfig
+++ b/drivers/video/msm/mdss/Kconfig
@@ -11,3 +11,12 @@
 	---help---
 	The MDSS HDMI Panel provides support for transmitting TMDS signals of
 	MDSS frame buffer data to connected hdmi compliant TVs, monitors etc.
+
+config FB_MSM_MDSS_HDMI_MHL_8334
+	depends on FB_MSM_MDSS_HDMI_PANEL
+	bool 'MHL SII8334 support '
+	default n
+	---help---
+	  Support the HDMI to MHL conversion.
+	  MHL (Mobile High-Definition Link) technology
+	  uses USB connector to output HDMI content
diff --git a/drivers/video/msm/mdss/Makefile b/drivers/video/msm/mdss/Makefile
index b4bd31e..88a7c45 100644
--- a/drivers/video/msm/mdss/Makefile
+++ b/drivers/video/msm/mdss/Makefile
@@ -18,5 +18,6 @@
 obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_tx.o
 obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_util.o
 obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_edid.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_MHL_8334) += mhl_sii8334.o
 
 obj-$(CONFIG_FB_MSM_MDSS_WRITEBACK) += mdss_wb.o
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index 3c60c2b..d041125 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -21,6 +21,8 @@
 #include <linux/types.h>
 #include <linux/workqueue.h>
 
+#include <mach/iommu_domains.h>
+
 #define MDSS_REG_WRITE(addr, val) writel_relaxed(val, mdss_res->mdp_base + addr)
 #define MDSS_REG_READ(addr) readl_relaxed(mdss_res->mdp_base + addr)
 
@@ -34,6 +36,21 @@
 	MDSS_MAX_CLK
 };
 
+enum mdss_iommu_domain_type {
+	MDSS_IOMMU_DOMAIN_SECURE,
+	MDSS_IOMMU_DOMAIN_UNSECURE,
+	MDSS_IOMMU_MAX_DOMAIN
+};
+
+struct mdss_iommu_map_type {
+	char *client_name;
+	char *ctx_name;
+	struct device *ctx;
+	struct msm_iova_partition partitions[1];
+	int npartitions;
+	int domain_idx;
+};
+
 struct mdss_data_type {
 	u32 rev;
 	u32 mdp_rev;
@@ -72,8 +89,8 @@
 	u32 *mixer_type_map;
 
 	struct ion_client *iclient;
-	int iommu_domain;
 	int iommu_attached;
+	struct mdss_iommu_map_type *iommu_map;
 
 	struct early_suspend early_suspend;
 };
@@ -112,14 +129,14 @@
 	return mdss_res->iommu_attached;
 }
 
-static inline int mdss_get_iommu_domain(void)
+static inline int mdss_get_iommu_domain(u32 type)
 {
+	if (type >= MDSS_IOMMU_MAX_DOMAIN)
+		return -EINVAL;
+
 	if (!mdss_res)
 		return -ENODEV;
 
-	return mdss_res->iommu_domain;
+	return mdss_res->iommu_map[type].domain_idx;
 }
-
-int mdss_iommu_attach(void);
-int mdss_iommu_dettach(void);
 #endif /* MDSS_H */
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
index 8f4f4d5..980ed46 100644
--- a/drivers/video/msm/mdss/mdss_dsi.c
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -327,6 +327,26 @@
 	return ret;
 }
 
+static int mdss_dsi_event_handler(struct mdss_panel_data *pdata,
+				  int event, void *arg)
+{
+	int rc = 0;
+
+	pr_debug("%s: event=%d\n", __func__, event);
+	switch (event) {
+	case MDSS_EVENT_UNBLANK:
+		rc = mdss_dsi_on(pdata);
+		break;
+	case MDSS_EVENT_BLANK:
+		rc = mdss_dsi_ctrl_unprepare(pdata);
+		break;
+	case MDSS_EVENT_TIMEGEN_OFF:
+		rc = mdss_dsi_off(pdata);
+		break;
+	}
+	return rc;
+}
+
 static int mdss_dsi_resource_initialized;
 
 static int __devinit mdss_dsi_probe(struct platform_device *pdev)
@@ -476,9 +496,7 @@
 	if (!ctrl_pdata)
 		return -ENOMEM;
 
-	(ctrl_pdata->panel_data).on = mdss_dsi_on;
-	(ctrl_pdata->panel_data).off = mdss_dsi_off;
-	(ctrl_pdata->panel_data).intf_unprepare = mdss_dsi_ctrl_unprepare;
+	ctrl_pdata->panel_data.event_handler = mdss_dsi_event_handler;
 	memcpy(&((ctrl_pdata->panel_data).panel_info),
 				&(panel_data->panel_info),
 				       sizeof(struct mdss_panel_info));
diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
index 125644e..8c3b1a8 100644
--- a/drivers/video/msm/mdss/mdss_dsi_host.c
+++ b/drivers/video/msm/mdss/mdss_dsi_host.c
@@ -1199,6 +1199,7 @@
 {
 	int len;
 	int i;
+	int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
 	char *bp;
 	unsigned long size, addr;
 	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
@@ -1229,7 +1230,7 @@
 
 	if (is_mdss_iommu_attached()) {
 		int ret = msm_iommu_map_contig_buffer(tp->dmap,
-					mdss_get_iommu_domain(), 0,
+					mdss_get_iommu_domain(domain), 0,
 					size, SZ_4K, 0, &(addr));
 		if (IS_ERR_VALUE(ret)) {
 			pr_err("unable to map dma memory to iommu(%d)\n", ret);
@@ -1251,8 +1252,8 @@
 	wait_for_completion(&dsi_dma_comp);
 
 	if (is_mdss_iommu_attached())
-		msm_iommu_unmap_contig_buffer(addr, mdss_get_iommu_domain(),
-					      0, size);
+		msm_iommu_unmap_contig_buffer(addr,
+			mdss_get_iommu_domain(domain), 0, size);
 
 	dma_unmap_single(&dsi_dev, tp->dmap, size, DMA_TO_DEVICE);
 	tp->dmap = 0;
diff --git a/drivers/video/msm/mdss/mdss_edp.c b/drivers/video/msm/mdss/mdss_edp.c
index b35be75..1cf3101 100644
--- a/drivers/video/msm/mdss/mdss_edp.c
+++ b/drivers/video/msm/mdss/mdss_edp.c
@@ -153,7 +153,7 @@
 	if (ret) {
 		pr_err("%s: Request reset gpio_panel_en failed, ret=%d\n",
 				__func__, ret);
-		goto gpio_free;
+		return ret;
 	}
 
 	ret = gpio_direction_output(edp_drv->gpio_panel_en, 1);
@@ -163,8 +163,6 @@
 		goto gpio_free;
 	}
 
-	gpio_set_value(edp_drv->gpio_panel_en, 1);
-
 	return 0;
 
 gpio_free:
@@ -212,13 +210,11 @@
 	if (ret) {
 		pr_err("%s: Request reset gpio_panel_pwm failed, ret=%d\n",
 				__func__, ret);
-		goto edp_free_gpio_pwm;
+		goto edp_free_pwm;
 	}
 
 	return 0;
 
-edp_free_gpio_pwm:
-	gpio_free(edp_drv->gpio_panel_pwm);
 edp_free_pwm:
 	pwm_free(edp_drv->bl_pwm);
 	return -ENODEV;
@@ -323,6 +319,7 @@
 	mdss_edp_config_sw_div(edp_drv->edp_base);
 	mdss_edp_config_static_mdiv(edp_drv->edp_base);
 	mdss_edp_enable(edp_drv->edp_base, 1);
+	gpio_set_value(edp_drv->gpio_panel_en, 1);
 
 	return 0;
 }
@@ -340,6 +337,7 @@
 		return -EINVAL;
 	}
 
+	gpio_set_value(edp_drv->gpio_panel_en, 0);
 	pwm_disable(edp_drv->bl_pwm);
 	mdss_edp_enable(edp_drv->edp_base, 0);
 	mdss_edp_unconfig_clk(edp_drv->edp_base, edp_drv->mmss_cc_base);
@@ -355,6 +353,23 @@
 	return ret;
 }
 
+static int mdss_edp_event_handler(struct mdss_panel_data *pdata,
+				  int event, void *arg)
+{
+	int rc = 0;
+
+	pr_debug("%s: event=%d\n", __func__, event);
+	switch (event) {
+	case MDSS_EVENT_UNBLANK:
+		rc = mdss_edp_on(pdata);
+		break;
+	case MDSS_EVENT_TIMEGEN_OFF:
+		rc = mdss_edp_off(pdata);
+		break;
+	}
+	return rc;
+}
+
 /*
  * Converts from EDID struct to mdss_panel_info
  */
@@ -415,8 +430,7 @@
 	edp_drv->panel_data.panel_info.bl_min = 1;
 	edp_drv->panel_data.panel_info.bl_max = 255;
 
-	edp_drv->panel_data.on = mdss_edp_on;
-	edp_drv->panel_data.off = mdss_edp_off;
+	edp_drv->panel_data.event_handler = mdss_edp_event_handler;
 	edp_drv->panel_data.set_backlight = mdss_edp_set_backlight;
 
 	ret = mdss_register_panel(&edp_drv->panel_data);
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index b711fd9..4ec4046 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -325,6 +325,24 @@
 	return 0;
 }
 
+static inline int mdss_fb_send_panel_event(struct msm_fb_data_type *mfd, int e)
+{
+	struct mdss_panel_data *pdata;
+
+	pdata = dev_get_platdata(&mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("no panel connected\n");
+		return -ENODEV;
+	}
+
+	pr_debug("sending event=%d for fb%d\n", e, mfd->index);
+
+	if (pdata->event_handler)
+		return pdata->event_handler(pdata, e, NULL);
+
+	return 0;
+}
+
 static int mdss_fb_suspend_sub(struct msm_fb_data_type *mfd)
 {
 	int ret = 0;
@@ -334,6 +352,12 @@
 
 	pr_debug("mdss_fb suspend index=%d\n", mfd->index);
 
+	ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_SUSPEND);
+	if (ret) {
+		pr_warn("unable to suspend fb%d (%d)\n", mfd->index, ret);
+		return ret;
+	}
+
 	mfd->suspend.op_enable = mfd->op_enable;
 	mfd->suspend.panel_power_on = mfd->panel_power_on;
 
@@ -359,6 +383,12 @@
 
 	pr_debug("mdss_fb resume index=%d\n", mfd->index);
 
+	ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_RESUME);
+	if (ret) {
+		pr_warn("unable to resume fb%d (%d)\n", mfd->index, ret);
+		return ret;
+	}
+
 	/* resume state var recover */
 	mfd->op_enable = mfd->suspend.op_enable;
 
@@ -691,6 +721,7 @@
 	size *= mfd->fb_page;
 
 	if (mfd->index == 0) {
+		int dom;
 		virt = allocate_contiguous_memory(size, MEMTYPE_EBI1, SZ_1M, 0);
 		if (!virt) {
 			pr_err("unable to alloc fbmem size=%u\n", size);
@@ -698,9 +729,9 @@
 		}
 		phys = memory_pool_node_paddr(virt);
 		if (is_mdss_iommu_attached()) {
-			msm_iommu_map_contig_buffer(phys,
-				mdss_get_iommu_domain(), 0, size, SZ_4K, 0,
-				&(mfd->iova));
+			dom = mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE);
+			msm_iommu_map_contig_buffer(phys, dom, 0, size, SZ_4K,
+						    0, &(mfd->iova));
 		}
 		pr_info("allocating %u bytes at %p (%lx phys) for fb %d\n",
 			size, virt, phys, mfd->index);
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index 78f2b9a..b760388 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -93,6 +93,7 @@
 	u32 bl_scale;
 	u32 bl_min_lvl;
 	struct mutex lock;
+	struct mutex ov_lock;
 
 	struct platform_device *pdev;
 
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.c b/drivers/video/msm/mdss/mdss_hdmi_tx.c
index d932bc9..539cd49 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.c
@@ -66,6 +66,21 @@
 	.irq_handler = hdmi_tx_isr,
 };
 
+struct dss_gpio hpd_gpio_config[] = {
+	{0, 1, COMPATIBLE_NAME "-hpd"},
+	{0, 1, COMPATIBLE_NAME "-ddc-clk"},
+	{0, 1, COMPATIBLE_NAME "-ddc-data"},
+	{0, 1, COMPATIBLE_NAME "-mux-en"},
+	{0, 0, COMPATIBLE_NAME "-mux-sel"}
+};
+
+struct dss_gpio core_gpio_config[] = {
+};
+
+struct dss_gpio cec_gpio_config[] = {
+	{0, 1, COMPATIBLE_NAME "-cec"}
+};
+
 const char *hdmi_pm_name(enum hdmi_tx_power_module_type module)
 {
 	switch (module) {
@@ -497,6 +512,9 @@
 		DEV_INFO("%s: Hdmi state switch to %d\n", __func__,
 			hdmi_ctrl->sdev.state);
 	}
+
+	if (!completion_done(&hdmi_ctrl->hpd_done))
+		complete_all(&hdmi_ctrl->hpd_done);
 } /* hdmi_tx_hpd_int_work */
 
 static int hdmi_tx_check_capability(struct dss_io_data *io)
@@ -1764,7 +1782,6 @@
 	mutex_lock(&hdmi_ctrl->mutex);
 	hdmi_ctrl->panel_power_on = true;
 
-	/* todo: check hdmi_tx_is_controller_on when hpd is on */
 	if (hdmi_ctrl->hpd_state) {
 		DEV_DBG("%s: Turning HDMI on\n", __func__);
 		mutex_unlock(&hdmi_ctrl->mutex);
@@ -1984,9 +2001,13 @@
 	hdmi_ctrl->ddc_ctrl.io = &pdata->io[HDMI_TX_CORE_IO];
 	init_completion(&hdmi_ctrl->ddc_ctrl.ddc_sw_done);
 
+	hdmi_ctrl->panel_power_on = false;
+	hdmi_ctrl->panel_suspend = false;
+
 	hdmi_ctrl->hpd_state = false;
 	hdmi_ctrl->hpd_initialized = false;
 	hdmi_ctrl->hpd_off_pending = false;
+	init_completion(&hdmi_ctrl->hpd_done);
 	INIT_WORK(&hdmi_ctrl->hpd_int_work, hdmi_tx_hpd_int_work);
 
 	INIT_WORK(&hdmi_ctrl->power_off_work, hdmi_tx_power_off_work);
@@ -2020,6 +2041,101 @@
 	return rc;
 } /* hdmi_tx_dev_init */
 
+static int hdmi_tx_panel_event_handler(struct mdss_panel_data *panel_data,
+	int event, void *arg)
+{
+	int rc = 0;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_panel_data(panel_data);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	DEV_DBG("%s: event = %d suspend=%d, hpd_feature=%d\n", __func__,
+		event, hdmi_ctrl->panel_suspend, hdmi_ctrl->hpd_feature_on);
+
+	switch (event) {
+	case MDSS_EVENT_RESUME:
+		if (hdmi_ctrl->hpd_feature_on) {
+			INIT_COMPLETION(hdmi_ctrl->hpd_done);
+
+			rc = hdmi_tx_hpd_on(hdmi_ctrl);
+			if (rc)
+				DEV_ERR("%s: hdmi_tx_hpd_on failed. rc=%d\n",
+					__func__, rc);
+		}
+		break;
+
+	case MDSS_EVENT_RESET:
+		if (hdmi_ctrl->panel_suspend) {
+			u32 timeout;
+			hdmi_ctrl->panel_suspend = false;
+
+			timeout = wait_for_completion_interruptible_timeout(
+				&hdmi_ctrl->hpd_done, HZ/10);
+			if (!timeout & !hdmi_ctrl->hpd_state) {
+				DEV_INFO("%s: cable removed during suspend\n",
+					__func__);
+
+				kobject_uevent(hdmi_ctrl->kobj, KOBJ_OFFLINE);
+				switch_set_state(&hdmi_ctrl->sdev, 0);
+
+				rc = -EPERM;
+			} else {
+				DEV_DBG("%s: cable present after resume\n",
+					__func__);
+			}
+		}
+		break;
+
+	case MDSS_EVENT_UNBLANK:
+		rc = hdmi_tx_power_on(panel_data);
+		if (rc)
+			DEV_ERR("%s: hdmi_tx_power_on failed. rc=%d\n",
+				__func__, rc);
+		break;
+
+	case MDSS_EVENT_TIMEGEN_ON:
+		break;
+
+	case MDSS_EVENT_SUSPEND:
+		if (!hdmi_ctrl->panel_power_on) {
+			if (hdmi_ctrl->hpd_feature_on)
+				hdmi_tx_hpd_off(hdmi_ctrl);
+			else
+				DEV_ERR("%s: invalid state\n", __func__);
+
+			hdmi_ctrl->panel_suspend = false;
+		} else {
+			hdmi_ctrl->hpd_off_pending = true;
+			hdmi_ctrl->panel_suspend = true;
+		}
+		break;
+
+	case MDSS_EVENT_BLANK:
+		if (hdmi_ctrl->panel_power_on) {
+			rc = hdmi_tx_power_off(panel_data);
+			if (rc)
+				DEV_ERR("%s: hdmi_tx_power_off failed.rc=%d\n",
+					__func__, rc);
+
+		} else {
+			DEV_DBG("%s: hdmi is already powered off\n", __func__);
+		}
+		break;
+
+	case MDSS_EVENT_TIMEGEN_OFF:
+		/* If a power off is already underway, wait for it to finish */
+		if (hdmi_ctrl->panel_suspend)
+			flush_work_sync(&hdmi_ctrl->power_off_work);
+		break;
+	}
+
+	return rc;
+} /* hdmi_tx_panel_event_handler */
+
 static int hdmi_tx_register_panel(struct hdmi_tx_ctrl *hdmi_ctrl)
 {
 	int rc = 0;
@@ -2029,8 +2145,7 @@
 		return -EINVAL;
 	}
 
-	hdmi_ctrl->panel_data.on = hdmi_tx_power_on;
-	hdmi_ctrl->panel_data.off = hdmi_tx_power_off;
+	hdmi_ctrl->panel_data.event_handler = hdmi_tx_panel_event_handler;
 
 	hdmi_ctrl->video_resolution = DEFAULT_VIDEO_RESOLUTION;
 	rc = hdmi_tx_init_panel_info(hdmi_ctrl->video_resolution,
@@ -2433,29 +2548,32 @@
 static int hdmi_tx_get_dt_gpio_data(struct device *dev,
 	struct dss_module_power *mp, u32 module_type)
 {
-	int i, j, rc = 0;
-	int dt_gpio_total = 0, mod_gpio_total = 0;
-	u32 ndx_mask = 0;
-	const char *mod_name = NULL;
+	int i, j;
+	int mp_gpio_cnt = 0, gpio_list_size = 0;
+	struct dss_gpio *gpio_list = NULL;
 	struct device_node *of_node = NULL;
-	char prop_name[32];
-	snprintf(prop_name, 32, "%s-%s", COMPATIBLE_NAME, "gpio-names");
+
+	DEV_DBG("%s: module: '%s'\n", __func__, hdmi_tx_pm_name(module_type));
 
 	if (!dev || !mp) {
 		DEV_ERR("%s: invalid input\n", __func__);
-		rc = -EINVAL;
-		goto error;
+		return -EINVAL;
 	}
 
+	of_node = dev->of_node;
+
 	switch (module_type) {
 	case HDMI_TX_HPD_PM:
-		mod_name = "hpd";
+		gpio_list_size = ARRAY_SIZE(hpd_gpio_config);
+		gpio_list = hpd_gpio_config;
 		break;
 	case HDMI_TX_CORE_PM:
-		mod_name = "core";
+		gpio_list_size = ARRAY_SIZE(core_gpio_config);
+		gpio_list = core_gpio_config;
 		break;
 	case HDMI_TX_CEC_PM:
-		mod_name = "cec";
+		gpio_list_size = ARRAY_SIZE(cec_gpio_config);
+		gpio_list = cec_gpio_config;
 		break;
 	default:
 		DEV_ERR("%s: invalid module type=%d\n", __func__,
@@ -2463,90 +2581,49 @@
 		return -EINVAL;
 	}
 
-	DEV_DBG("%s: module: '%s'\n", __func__, hdmi_tx_pm_name(module_type));
+	for (i = 0; i < gpio_list_size; i++)
+		if (of_find_property(of_node, gpio_list[i].gpio_name, NULL))
+			mp_gpio_cnt++;
 
-	of_node = dev->of_node;
-
-	dt_gpio_total = of_gpio_count(of_node);
-	if (dt_gpio_total < 0) {
-		DEV_ERR("%s: gpio not found. rc=%d\n", __func__,
-			dt_gpio_total);
-		rc = dt_gpio_total;
-		goto error;
-	}
-
-	/* count how many gpio for particular hdmi module */
-	for (i = 0; i < dt_gpio_total; i++) {
-		const char *st = NULL;
-
-		rc = of_property_read_string_index(of_node,
-			prop_name, i, &st);
-		if (rc) {
-			DEV_ERR("%s: error reading name. i=%d, rc=%d\n",
-				__func__, i, rc);
-			goto error;
-		}
-
-		if (strnstr(st, mod_name, strlen(st))) {
-			ndx_mask |= BIT(i);
-			mod_gpio_total++;
-		}
-	}
-
-	if (mod_gpio_total > 0) {
-		mp->num_gpio = mod_gpio_total;
-		mp->gpio_config = devm_kzalloc(dev, sizeof(struct dss_gpio) *
-			mod_gpio_total, GFP_KERNEL);
-		if (!mp->gpio_config) {
-			DEV_ERR("%s: can't alloc '%s' gpio mem\n", __func__,
-				hdmi_tx_pm_name(module_type));
-			goto error;
-		}
-	} else {
+	if (!mp_gpio_cnt) {
 		DEV_DBG("%s: no gpio\n", __func__);
 		return 0;
 	}
 
+	DEV_DBG("%s: mp_gpio_cnt = %d\n", __func__, mp_gpio_cnt);
+	mp->num_gpio = mp_gpio_cnt;
 
-	for (i = 0, j = 0; (i < dt_gpio_total) && (j < mod_gpio_total); i++) {
-		const char *st = NULL;
+	mp->gpio_config = devm_kzalloc(dev, sizeof(struct dss_gpio) *
+		mp_gpio_cnt, GFP_KERNEL);
+	if (!mp->gpio_config) {
+		DEV_ERR("%s: can't alloc '%s' gpio mem\n", __func__,
+			hdmi_tx_pm_name(module_type));
 
-		if (!(ndx_mask & BIT(0))) {
-			ndx_mask >>= 1;
+		mp->num_gpio = 0;
+		return -ENOMEM;
+	}
+
+	for (i = 0, j = 0; i < gpio_list_size; i++) {
+		int gpio = of_get_named_gpio(of_node,
+			gpio_list[i].gpio_name, 0);
+		if (gpio < 0) {
+			DEV_DBG("%s: no gpio named %s\n", __func__,
+				gpio_list[i].gpio_name);
 			continue;
 		}
+		memcpy(&mp->gpio_config[j], &gpio_list[i],
+			sizeof(struct dss_gpio));
 
-		/* gpio-name */
-		rc = of_property_read_string_index(of_node,
-			prop_name, i, &st);
-		if (rc) {
-			DEV_ERR("%s: error reading name. i=%d, rc=%d\n",
-				__func__, i, rc);
-			goto error;
-		}
-		snprintf(mp->gpio_config[j].gpio_name, 32, "%s", st);
+		mp->gpio_config[j].gpio = (unsigned)gpio;
 
-		/* gpio-number */
-		mp->gpio_config[j].gpio = of_get_gpio(of_node, i);
-
-		DEV_DBG("%s: gpio num=%d, name=%s\n", __func__,
-			mp->gpio_config[j].gpio,
-			mp->gpio_config[j].gpio_name);
-
-		ndx_mask >>= 1;
+		DEV_DBG("%s: gpio num=%d, name=%s, value=%d\n",
+			__func__, mp->gpio_config[j].gpio,
+			mp->gpio_config[j].gpio_name,
+			mp->gpio_config[j].value);
 		j++;
 	}
 
-	return rc;
-
-error:
-	if (mp->gpio_config) {
-		devm_kfree(dev, mp->gpio_config);
-		mp->gpio_config = NULL;
-	}
-	mp->num_gpio = 0;
-
-	return rc;
+	return 0;
 } /* hdmi_tx_get_dt_gpio_data */
 
 static void hdmi_tx_put_dt_data(struct device *dev,
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.h b/drivers/video/msm/mdss/mdss_hdmi_tx.h
index ce19355..2d431b7 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.h
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.h
@@ -51,11 +51,13 @@
 
 	uint32_t video_resolution;
 	u32 panel_power_on;
+	u32 panel_suspend;
 
-	u32 hpd_initialized;
 	u32 hpd_state;
 	u32 hpd_off_pending;
 	u32 hpd_feature_on;
+	u32 hpd_initialized;
+	struct completion hpd_done;
 	struct work_struct hpd_int_work;
 
 	struct work_struct power_off_work;
diff --git a/drivers/video/msm/mdss/mdss_io_util.c b/drivers/video/msm/mdss/mdss_io_util.c
index 0a14056..2bf2d74 100644
--- a/drivers/video/msm/mdss/mdss_io_util.c
+++ b/drivers/video/msm/mdss/mdss_io_util.c
@@ -15,6 +15,7 @@
 #include <linux/io.h>
 #include "mdss_io_util.h"
 
+#define MAX_I2C_CMDS  16
 void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug)
 {
 	u32 in_val;
@@ -248,6 +249,10 @@
 	int i = 0, rc = 0;
 	if (enable) {
 		for (i = 0; i < num_gpio; i++) {
+			DEV_DBG("%pS->%s: %s enable\n",
+				__builtin_return_address(0), __func__,
+				in_gpio[i].gpio_name);
+
 			rc = gpio_request(in_gpio[i].gpio,
 				in_gpio[i].gpio_name);
 			if (rc < 0) {
@@ -256,10 +261,16 @@
 					in_gpio[i].gpio_name);
 				goto disable_gpio;
 			}
+			gpio_set_value(in_gpio[i].gpio, in_gpio[i].value);
 		}
 	} else {
-		for (i = num_gpio-1; i >= 0; i--)
+		for (i = num_gpio-1; i >= 0; i--) {
+			DEV_DBG("%pS->%s: %s disable\n",
+				__builtin_return_address(0), __func__,
+				in_gpio[i].gpio_name);
+
 			gpio_free(in_gpio[i].gpio);
+		}
 	}
 	return rc;
 
@@ -382,3 +393,59 @@
 
 	return rc;
 } /* msm_dss_enable_clk */
+
+
+int mdss_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+			uint8_t reg_offset, uint8_t *read_buf)
+{
+	struct i2c_msg msgs[2];
+	int ret = -1;
+
+	pr_debug("%s: reading from slave_addr=[%x] and offset=[%x]\n",
+		 __func__, slave_addr, reg_offset);
+
+	msgs[0].addr = slave_addr >> 1;
+	msgs[0].flags = 0;
+	msgs[0].buf = &reg_offset;
+	msgs[0].len = 1;
+
+	msgs[1].addr = slave_addr >> 1;
+	msgs[1].flags = I2C_M_RD;
+	msgs[1].buf = read_buf;
+	msgs[1].len = 1;
+
+	ret = i2c_transfer(client->adapter, msgs, 2);
+	if (ret < 1) {
+		pr_err("%s: I2C READ FAILED=[%d]\n", __func__, ret);
+		return -EACCES;
+	}
+	pr_debug("%s: i2c buf is [%x]\n", __func__, *read_buf);
+	return 0;
+}
+
+int mdss_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+			uint8_t reg_offset, uint8_t *value)
+{
+	struct i2c_msg msgs[1];
+	uint8_t data[2];
+	int status = -EACCES;
+
+	pr_debug("%s: writing from slave_addr=[%x] and offset=[%x]\n",
+		 __func__, slave_addr, reg_offset);
+
+	data[0] = reg_offset;
+	data[1] = *value;
+
+	msgs[0].addr = slave_addr >> 1;
+	msgs[0].flags = 0;
+	msgs[0].len = 2;
+	msgs[0].buf = data;
+
+	status = i2c_transfer(client->adapter, msgs, 1);
+	if (status < 1) {
+		pr_err("I2C WRITE FAILED=[%d]\n", status);
+		return -EACCES;
+	}
+	pr_debug("%s: I2C write status=%x\n", __func__, status);
+	return status;
+}
diff --git a/drivers/video/msm/mdss/mdss_io_util.h b/drivers/video/msm/mdss/mdss_io_util.h
index 51e9e54..85826f7 100644
--- a/drivers/video/msm/mdss/mdss_io_util.h
+++ b/drivers/video/msm/mdss/mdss_io_util.h
@@ -16,6 +16,8 @@
 #include <linux/gpio.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
 
 #ifdef DEBUG
 #define DEV_DBG(fmt, args...)   pr_err(fmt, ##args)
@@ -56,6 +58,7 @@
 
 struct dss_gpio {
 	unsigned gpio;
+	unsigned value;
 	char gpio_name[32];
 };
 
@@ -97,4 +100,9 @@
 int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
 int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
 
+int mdss_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+		       uint8_t reg_offset, uint8_t *read_buf);
+int mdss_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+			uint8_t reg_offset, uint8_t *value);
+
 #endif /* __MDSS_IO_UTIL_H__ */
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index 11b0831..bcb3aee 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -95,28 +95,29 @@
 	.name = "mdss_mdp",
 };
 
-struct msm_iova_partition mdp_iommu_partitions[] = {
-	{
-		.start = SZ_128K,
-		.size = SZ_2G - SZ_128K,
+struct mdss_iommu_map_type mdss_iommu_map[MDSS_IOMMU_MAX_DOMAIN] = {
+	[MDSS_IOMMU_DOMAIN_UNSECURE] = {
+		.client_name = "mdp_ns",
+		.ctx_name = "mdp_0",
+		.partitions = {
+			{
+				.start = SZ_128K,
+				.size = SZ_1G - SZ_128K,
+			},
+		},
+		.npartitions = 1,
 	},
-};
-struct msm_iova_layout mdp_iommu_layout = {
-	.client_name = "mdss_mdp",
-	.partitions = mdp_iommu_partitions,
-	.npartitions = ARRAY_SIZE(mdp_iommu_partitions),
-};
-
-struct {
-	char *name;
-	struct device *ctx;
-} mdp_iommu_ctx[] = {
-	{
-		.name = "mdp_0",
+	[MDSS_IOMMU_DOMAIN_SECURE] = {
+		.client_name = "mdp_secure",
+		.ctx_name = "mdp_1",
+		.partitions = {
+			{
+				.start = SZ_1G,
+				.size = SZ_1G,
+			},
+		},
+		.npartitions = 1,
 	},
-	{
-		.name = "mdp_1",
-	}
 };
 
 struct mdss_hw mdss_mdp_hw = {
@@ -670,85 +671,103 @@
 	return 0;
 }
 
-int mdss_iommu_attach(void)
+int mdss_iommu_attach(struct mdss_data_type *mdata)
 {
 	struct iommu_domain *domain;
-	int i, domain_idx;
+	struct mdss_iommu_map_type *iomap;
+	int i;
 
-	if (mdss_res->iommu_attached) {
+	if (mdata->iommu_attached) {
 		pr_warn("mdp iommu already attached\n");
 		return 0;
 	}
 
-	domain_idx = mdss_get_iommu_domain();
-	domain = msm_get_iommu_domain(domain_idx);
-	if (!domain) {
-		pr_err("unable to get iommu domain(%d)\n", domain_idx);
-		return -EINVAL;
+	for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+		iomap = mdata->iommu_map + i;
+
+		domain = msm_get_iommu_domain(iomap->domain_idx);
+		if (!domain) {
+			WARN(1, "could not attach iommu client %s to ctx %s\n",
+				iomap->client_name, iomap->ctx_name);
+			continue;
+		}
+		iommu_attach_device(domain, iomap->ctx);
 	}
 
-	for (i = 0; i < ARRAY_SIZE(mdp_iommu_ctx); i++) {
-		if (iommu_attach_device(domain, mdp_iommu_ctx[i].ctx)) {
-			WARN(1, "could not attach iommu domain %d to ctx %s\n",
-				domain_idx, mdp_iommu_ctx[i].name);
-			return -EINVAL;
-		}
-	}
-	mdss_res->iommu_attached = true;
+	mdata->iommu_attached = true;
 
 	return 0;
 }
 
-int mdss_iommu_dettach(void)
+int mdss_iommu_dettach(struct mdss_data_type *mdata)
 {
 	struct iommu_domain *domain;
-	int i, domain_idx;
+	struct mdss_iommu_map_type *iomap;
+	int i;
 
-	if (!mdss_res->iommu_attached) {
+	if (!mdata->iommu_attached) {
 		pr_warn("mdp iommu already dettached\n");
 		return 0;
 	}
 
-	domain_idx = mdss_get_iommu_domain();
-	domain = msm_get_iommu_domain(domain_idx);
-	if (!domain) {
-		pr_err("unable to get iommu domain(%d)\n", domain_idx);
-		return -EINVAL;
+	for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+		iomap = mdata->iommu_map + i;
+
+		domain = msm_get_iommu_domain(iomap->domain_idx);
+		if (!domain) {
+			pr_err("unable to get iommu domain(%d)\n",
+				iomap->domain_idx);
+			continue;
+		}
+		iommu_detach_device(domain, iomap->ctx);
 	}
 
-	for (i = 0; i < ARRAY_SIZE(mdp_iommu_ctx); i++)
-		iommu_detach_device(domain, mdp_iommu_ctx[i].ctx);
-	mdss_res->iommu_attached = false;
+	mdata->iommu_attached = false;
 
 	return 0;
 }
 
-int mdss_iommu_init(void)
+int mdss_iommu_init(struct mdss_data_type *mdata)
 {
+	struct msm_iova_layout layout;
 	struct iommu_domain *domain;
-	int domain_idx, i;
+	struct mdss_iommu_map_type *iomap;
+	int i;
 
-	domain_idx = msm_register_domain(&mdp_iommu_layout);
-	if (IS_ERR_VALUE(domain_idx))
-		return -EINVAL;
-
-	domain = msm_get_iommu_domain(domain_idx);
-	if (!domain) {
-		pr_err("unable to get iommu domain(%d)\n", domain_idx);
-		return -EINVAL;
+	if (mdata->iommu_map) {
+		pr_warn("iommu already initialized\n");
+		return 0;
 	}
 
-	iommu_set_fault_handler(domain, mdss_iommu_fault_handler);
+	for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+		iomap = &mdss_iommu_map[i];
 
-	for (i = 0; i < ARRAY_SIZE(mdp_iommu_ctx); i++) {
-		mdp_iommu_ctx[i].ctx = msm_iommu_get_ctx(mdp_iommu_ctx[i].name);
-		if (!mdp_iommu_ctx[i].ctx) {
+		layout.client_name = iomap->client_name;
+		layout.partitions = iomap->partitions;
+		layout.npartitions = iomap->npartitions;
+		layout.is_secure = (i == MDSS_IOMMU_DOMAIN_SECURE);
+
+		iomap->domain_idx = msm_register_domain(&layout);
+		if (IS_ERR_VALUE(iomap->domain_idx))
+			return -EINVAL;
+
+		domain = msm_get_iommu_domain(iomap->domain_idx);
+		if (!domain) {
+			pr_err("unable to get iommu domain(%d)\n",
+				iomap->domain_idx);
+			return -EINVAL;
+		}
+		iommu_set_fault_handler(domain, mdss_iommu_fault_handler);
+
+		iomap->ctx = msm_iommu_get_ctx(iomap->ctx_name);
+		if (!iomap->ctx) {
 			pr_warn("unable to get iommu ctx(%s)\n",
-					mdp_iommu_ctx[i].name);
+				iomap->ctx_name);
 			return -EINVAL;
 		}
 	}
-	mdss_res->iommu_domain = domain_idx;
+
+	mdata->iommu_map = mdss_iommu_map;
 
 	return 0;
 }
@@ -815,9 +834,9 @@
 		mdata->iclient = NULL;
 	}
 
-	rc = mdss_iommu_init();
+	rc = mdss_iommu_init(mdata);
 	if (!IS_ERR_VALUE(rc))
-		mdss_iommu_attach();
+		mdss_iommu_attach(mdata);
 
 	rc = mdss_hw_init(mdata);
 
@@ -934,11 +953,11 @@
 	if (on && !mdata->fs_ena) {
 		pr_debug("Enable MDP FS\n");
 		regulator_enable(mdata->fs);
-		mdss_iommu_attach();
+		mdss_iommu_attach(mdata);
 		mdata->fs_ena = true;
 	} else if (!on && mdata->fs_ena) {
 		pr_debug("Disable MDP FS\n");
-		mdss_iommu_dettach();
+		mdss_iommu_dettach(mdata);
 		regulator_disable(mdata->fs);
 		mdata->fs_ena = false;
 	}
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 808babb..2e92591 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -229,6 +229,7 @@
 	u8 mixer_stage;
 	u8 is_fg;
 	u8 alpha;
+	u8 overfetch_disable;
 	u32 transp;
 
 	struct msm_fb_data_type *mfd;
@@ -283,13 +284,13 @@
 void mdss_mdp_clk_ctrl(int enable, int isr);
 
 int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd);
-int mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd);
 int mdss_mdp_overlay_vsync_ctrl(struct msm_fb_data_type *mfd, int en);
 int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl);
 int mdss_mdp_writeback_start(struct mdss_mdp_ctl *ctl);
 
 int mdss_mdp_ctl_on(struct msm_fb_data_type *mfd);
 int mdss_mdp_ctl_off(struct msm_fb_data_type *mfd);
+int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg);
 
 struct mdss_mdp_mixer *mdss_mdp_wb_mixer_alloc(int rotator);
 int mdss_mdp_wb_mixer_destroy(struct mdss_mdp_mixer *mixer);
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index b5f6ddf..00f5874 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -68,6 +68,11 @@
 		bus_ab_quota = bus_ab_quota << MDSS_MDP_BUS_FACTOR_SHIFT;
 		bus_ib_quota = MDSS_MDP_BUS_FUDGE_FACTOR(bus_ib_quota);
 		bus_ib_quota <<= MDSS_MDP_BUS_FACTOR_SHIFT;
+
+		if ((bus_ib_quota == 0) && (clk_rate > 0)) {
+			/* allocate min bw for panel cmds if mdp is active */
+			bus_ib_quota = SZ_16M;
+		}
 		mdss_mdp_bus_scale_set_quota(bus_ab_quota, bus_ib_quota);
 	}
 	if (flags & MDSS_MDP_PERF_UPDATE_CLK) {
@@ -531,9 +536,28 @@
 	return 0;
 }
 
-int mdss_mdp_ctl_on(struct msm_fb_data_type *mfd)
+int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg)
 {
 	struct mdss_panel_data *pdata;
+	if (!ctl || !ctl->mfd)
+		return -ENODEV;
+
+	pdata = dev_get_platdata(&ctl->mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("no panel connected\n");
+		return -ENODEV;
+	}
+
+	pr_debug("sending ctl=%d event=%d\n", ctl->num, event);
+
+	if (pdata->event_handler)
+		return pdata->event_handler(pdata, event, arg);
+
+	return 0;
+}
+
+int mdss_mdp_ctl_on(struct msm_fb_data_type *mfd)
+{
 	struct mdss_mdp_ctl *ctl;
 	struct mdss_mdp_mixer *mixer;
 	u32 outsize, temp, off;
@@ -545,12 +569,6 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	pdata = dev_get_platdata(&mfd->pdev->dev);
-	if (!pdata) {
-		pr_err("no panel connected\n");
-		return -ENODEV;
-	}
-
 	if (mdss_mdp_ctl_init(mfd)) {
 		pr_err("unable to initialize ctl\n");
 		return -ENODEV;
@@ -568,6 +586,12 @@
 	ctl->power_on = true;
 
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+	ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_RESET, NULL);
+	if (ret) {
+		pr_err("panel power on failed ctl=%d\n", ctl->num);
+		goto start_fail;
+	}
+
 	if (ctl->start_fnc)
 		ret = ctl->start_fnc(ctl);
 	else
@@ -579,17 +603,6 @@
 		goto start_fail;
 	}
 
-	/* request bus bandwidth for panel commands */
-	ctl->clk_rate = MDP_CLK_DEFAULT_RATE;
-	ctl->bus_ib_quota = SZ_1M;
-	mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
-
-	ret = pdata->on(pdata);
-	if (ret) {
-		pr_err("panel power on failed ctl=%d\n", ctl->num);
-		goto panel_fail;
-	}
-
 	pr_debug("ctl_num=%d\n", ctl->num);
 
 	mixer = ctl->mixer_left;
@@ -617,23 +630,18 @@
 		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_OUT_SIZE, outsize);
 		mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_PACK_3D, 0);
 	}
-panel_fail:
-	if (ret && ctl->stop_fnc)
-		ctl->stop_fnc(ctl);
+
 start_fail:
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 	mutex_unlock(&ctl->lock);
-	if (ret) {
+	if (ret)
 		mdss_mdp_ctl_destroy(mfd);
-		mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
-	}
 
 	return ret;
 }
 
 int mdss_mdp_ctl_off(struct msm_fb_data_type *mfd)
 {
-	struct mdss_panel_data *pdata;
 	struct mdss_mdp_ctl *ctl;
 	int ret = 0;
 
@@ -648,12 +656,6 @@
 		return -ENODEV;
 	}
 
-	pdata = dev_get_platdata(&mfd->pdev->dev);
-	if (!pdata) {
-		pr_err("no panel connected\n");
-		return -ENODEV;
-	}
-
 	ctl = mfd->ctl;
 
 	if (!ctl->power_on) {
@@ -663,43 +665,33 @@
 
 	pr_debug("ctl_num=%d\n", mfd->ctl->num);
 
-	mdss_mdp_overlay_release_all(mfd);
-
-	/* request bus bandwidth for panel commands */
-	ctl->bus_ib_quota = SZ_1M;
-	mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
-
 	mutex_lock(&ctl->lock);
-	ctl->power_on = false;
 
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 
-	if (pdata->intf_unprepare)
-		ret = pdata->intf_unprepare(pdata);
-
-	if (ret)
-		pr_err("%s: intf_unprepare failed\n", __func__);
-
 	if (ctl->stop_fnc)
 		ret = ctl->stop_fnc(ctl);
 	else
 		pr_warn("no stop func for ctl=%d\n", ctl->num);
 
-	if (ret)
+	if (ret) {
 		pr_warn("error powering off intf ctl=%d\n", ctl->num);
-
-	ret = pdata->off(pdata);
+	} else {
+		ctl->power_on = false;
+		ctl->play_cnt = 0;
+		ctl->clk_rate = 0;
+		mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
+	}
 
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 
-	ctl->play_cnt = 0;
-
-	mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
-
 	mutex_unlock(&ctl->lock);
 
-	if (!mfd->ref_cnt)
+	if (!ret && !mfd->ref_cnt) {
+		ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CLOSE, NULL);
+		WARN(ret, "unable to close intf %d\n", ctl->intf_num);
 		mdss_mdp_ctl_destroy(mfd);
+	}
 
 	return ret;
 }
@@ -707,7 +699,7 @@
 static int mdss_mdp_mixer_setup(struct mdss_mdp_ctl *ctl,
 				struct mdss_mdp_mixer *mixer)
 {
-	struct mdss_mdp_pipe *pipe, *bgpipe = NULL;
+	struct mdss_mdp_pipe *pipe;
 	u32 off, blend_op, blend_stage;
 	u32 mixercfg = 0, blend_color_out = 0, bgalpha = 0;
 	int stage;
@@ -717,26 +709,24 @@
 
 	pr_debug("setup mixer=%d\n", mixer->num);
 
-	for (stage = MDSS_MDP_STAGE_BASE; stage < MDSS_MDP_MAX_STAGE; stage++) {
+	pipe = mixer->stage_pipe[MDSS_MDP_STAGE_BASE];
+	if (pipe == NULL) {
+		mixercfg = MDSS_MDP_LM_BORDER_COLOR;
+	} else {
+		mixercfg = 1 << (3 * pipe->num);
+		if (pipe->src_fmt->alpha_enable)
+			bgalpha = 1;
+	}
+
+	for (stage = MDSS_MDP_STAGE_0; stage < MDSS_MDP_MAX_STAGE; stage++) {
 		pipe = mixer->stage_pipe[stage];
-		if (pipe == NULL) {
-			if (stage == MDSS_MDP_STAGE_BASE)
-				mixercfg |= MDSS_MDP_LM_BORDER_COLOR;
+		if (pipe == NULL)
 			continue;
-		}
 
 		if (stage != pipe->mixer_stage) {
 			mixer->stage_pipe[stage] = NULL;
 			continue;
 		}
-		mixercfg |= stage << (3 * pipe->num);
-
-		if (stage == MDSS_MDP_STAGE_BASE) {
-			bgpipe = pipe;
-			if (pipe->src_fmt->alpha_enable)
-				bgalpha = 1;
-			continue;
-		}
 
 		blend_stage = stage - MDSS_MDP_STAGE_0;
 		off = MDSS_MDP_REG_LM_OFFSET(mixer->num) +
@@ -744,10 +734,8 @@
 
 		if (pipe->is_fg) {
 			bgalpha = 0;
-			if (bgpipe) {
-				mixercfg &= ~(0x7 << (3 * bgpipe->num));
-				mixercfg |= MDSS_MDP_LM_BORDER_COLOR;
-			}
+			mixercfg = MDSS_MDP_LM_BORDER_COLOR;
+
 			blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
 				    MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
 			/* keep fg alpha */
@@ -778,6 +766,8 @@
 					stage);
 		}
 
+		mixercfg |= stage << (3 * pipe->num);
+
 		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_OP_MODE, blend_op);
 		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_BLEND_FG_ALPHA,
 				   pipe->alpha);
@@ -928,13 +918,16 @@
 		return -ENODEV;
 	}
 
-	if (!ctl->power_on)
-		return 0;
-
 	pr_debug("commit ctl=%d play_cnt=%d\n", ctl->num, ctl->play_cnt);
 
-	if (mutex_lock_interruptible(&ctl->lock))
-		return -EINTR;
+	ret = mutex_lock_interruptible(&ctl->lock);
+	if (ret)
+		return ret;
+
+	if (!ctl->power_on) {
+		mutex_unlock(&ctl->lock);
+		return 0;
+	}
 
 	mixer1_changed = (ctl->mixer_left && ctl->mixer_left->params_changed);
 	mixer2_changed = (ctl->mixer_right && ctl->mixer_right->params_changed);
@@ -994,7 +987,7 @@
 	mutex_lock(&mdss_mdp_ctl_lock);
 	for (i = 0; i < MDSS_MDP_MAX_CTL; i++) {
 		ctl = &mdss_mdp_ctl_list[i];
-		if ((ctl->power_on) &&
+		if ((ctl->power_on) && (ctl->mfd) &&
 			(ctl->mfd->index == fb_num)) {
 			if (ctl->mixer_left) {
 				mixer_id[mixer_cnt] = ctl->mixer_left->num;
diff --git a/drivers/video/msm/mdss/mdss_mdp_hwio.h b/drivers/video/msm/mdss/mdss_mdp_hwio.h
index 1da30b8..b6ac126 100644
--- a/drivers/video/msm/mdss/mdss_mdp_hwio.h
+++ b/drivers/video/msm/mdss/mdss_mdp_hwio.h
@@ -166,6 +166,7 @@
 #define MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR		0x03C
 #define MDSS_MDP_REG_SSPP_FETCH_CONFIG			0x048
 #define MDSS_MDP_REG_SSPP_VC1_RANGE			0x04C
+#define MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS		0x070
 
 #define MDSS_MDP_REG_SSPP_CURRENT_SRC0_ADDR		0x0A4
 #define MDSS_MDP_REG_SSPP_CURRENT_SRC1_ADDR		0x0A8
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index 4d3fbf0..9508846 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -201,7 +201,7 @@
 static int mdss_mdp_video_stop(struct mdss_mdp_ctl *ctl)
 {
 	struct mdss_mdp_video_ctx *ctx;
-	int off;
+	int rc, off;
 
 	pr_debug("stop ctl=%d\n", ctl->num);
 
@@ -211,16 +211,27 @@
 		return -ENODEV;
 	}
 
-	if (ctx->vsync_handler)
-		mdss_mdp_video_set_vsync_handler(ctl, NULL);
-
 	if (ctx->timegen_en) {
+		rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_BLANK, NULL);
+		if (rc == -EBUSY) {
+			pr_debug("intf #%d busy don't turn off\n",
+				 ctl->intf_num);
+			return rc;
+		}
+		WARN(rc, "intf %d blank error (%d)\n", ctl->intf_num, rc);
+
 		off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
 		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0);
 		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 		ctx->timegen_en = false;
+
+		rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_TIMEGEN_OFF, NULL);
+		WARN(rc, "intf %d timegen off error (%d)\n", ctl->intf_num, rc);
 	}
 
+	if (ctx->vsync_handler)
+		mdss_mdp_video_set_vsync_handler(ctl, NULL);
+
 	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num,
 				   NULL, NULL);
 	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_COMP, ctx->pp_num,
@@ -288,6 +299,7 @@
 static int mdss_mdp_video_display(struct mdss_mdp_ctl *ctl, void *arg)
 {
 	struct mdss_mdp_video_ctx *ctx;
+	int rc;
 
 	pr_debug("kickoff ctl=%d\n", ctl->num);
 
@@ -306,15 +318,23 @@
 	if (!ctx->timegen_en) {
 		int off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
 
+		rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_UNBLANK, NULL);
+		WARN(rc, "intf %d unblank error (%d)\n", ctl->intf_num, rc);
+
 		pr_debug("enabling timing gen for intf=%d\n", ctl->intf_num);
 
 		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 1);
-		ctx->timegen_en = true;
 		wmb();
 	}
 
 	wait_for_completion(&ctx->vsync_comp);
+
+	if (!ctx->timegen_en) {
+		ctx->timegen_en = true;
+		rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_TIMEGEN_ON, NULL);
+		WARN(rc, "intf %d timegen on error (%d)\n", ctl->intf_num, rc);
+	}
 	if (!ctx->vsync_handler)
 		mdss_mdp_irq_disable(MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num);
 	mutex_unlock(&ctx->vsync_lock);
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
index c9acc65..a1f1bcc 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
@@ -245,7 +245,7 @@
 
 	ctx->format = rot->format;
 
-	ctx->rot90 = !!(rot->rotations & MDP_ROT_90);
+	ctx->rot90 = !!(rot->flags & MDP_ROT_90);
 	if (ctx->rot90) {
 		ctx->opmode |= BIT(5); /* ROT 90 */
 		swap(ctx->width, ctx->height);
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index e4621d4..f537c39 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,8 @@
 #define CHECK_BOUNDS(offset, size, max_size) \
 	(((size) > (max_size)) || ((offset) > ((max_size) - (size))))
 
+static atomic_t ov_active_panels = ATOMIC_INIT(0);
+
 static int mdss_mdp_overlay_get(struct msm_fb_data_type *mfd,
 				struct mdp_overlay *req)
 {
@@ -196,7 +198,9 @@
 		return -EINVAL;
 	}
 
-	rot->rotations = req->flags & (MDP_ROT_90 | MDP_FLIP_LR | MDP_FLIP_UD);
+	/* keep only flags of interest to rotator */
+	rot->flags = req->flags & (MDP_ROT_90 | MDP_FLIP_LR | MDP_FLIP_UD |
+				   MDP_SECURE_OVERLAY_SESSION);
 
 	rot->format = fmt->format;
 	rot->img_width = req->src.width;
@@ -207,7 +211,7 @@
 	rot->src_rect.h = req->src_rect.h;
 
 	if (req->flags & MDP_DEINTERLACE) {
-		rot->rotations |= MDP_DEINTERLACE;
+		rot->flags |= MDP_DEINTERLACE;
 		rot->src_rect.h /= 2;
 	}
 
@@ -318,6 +322,7 @@
 	pipe->is_fg = req->is_fg;
 	pipe->alpha = req->alpha;
 	pipe->transp = req->transp_mask;
+	pipe->overfetch_disable = fmt->is_yuv;
 
 	pipe->req_data = *req;
 
@@ -355,8 +360,14 @@
 {
 	int ret;
 
-	if (!mfd->panel_power_on)
+	ret = mutex_lock_interruptible(&mfd->ov_lock);
+	if (ret)
+		return ret;
+
+	if (!mfd->panel_power_on) {
+		mutex_unlock(&mfd->ov_lock);
 		return -EPERM;
+	}
 
 	if (req->flags & MDSS_MDP_ROT_ONLY) {
 		ret = mdss_mdp_overlay_rotator_setup(mfd, req);
@@ -371,18 +382,22 @@
 		req->z_order -= MDSS_MDP_STAGE_0;
 	}
 
+	mutex_unlock(&mfd->ov_lock);
+
 	return ret;
 }
 
 static inline int mdss_mdp_overlay_get_buf(struct msm_fb_data_type *mfd,
 					   struct mdss_mdp_data *data,
 					   struct msmfb_data *planes,
-					   int num_planes)
+					   int num_planes,
+					   u32 flags)
 {
 	int i;
 
 	memset(data, 0, sizeof(*data));
 	for (i = 0; i < num_planes; i++) {
+		data->p[i].flags = flags;
 		mdss_mdp_get_img(&planes[i], &data->p[i]);
 		if (data->p[0].len == 0)
 			break;
@@ -410,35 +425,18 @@
 	return 0;
 }
 
-static int mdss_mdp_overlay_kickoff(struct mdss_mdp_ctl *ctl)
+static int mdss_mdp_overlay_cleanup(struct msm_fb_data_type *mfd)
 {
 	struct mdss_mdp_pipe *pipe, *tmp;
-	struct msm_fb_data_type *mfd = ctl->mfd;
-	int i, ret;
+	LIST_HEAD(destroy_pipes);
+	int i;
 
-	if (mfd->kickoff_fnc)
-		ret = mfd->kickoff_fnc(ctl);
-	else
-		ret = mdss_mdp_display_commit(ctl, NULL);
-	if (IS_ERR_VALUE(ret))
-		return ret;
-
-	complete(&mfd->update.comp);
-	mutex_lock(&mfd->no_update.lock);
-	if (mfd->no_update.timer.function)
-		del_timer(&(mfd->no_update.timer));
-
-	mfd->no_update.timer.expires = jiffies + (2 * HZ);
-	add_timer(&mfd->no_update.timer);
-	mutex_unlock(&mfd->no_update.lock);
-
+	mutex_lock(&mfd->ov_lock);
 	mutex_lock(&mfd->lock);
 	list_for_each_entry_safe(pipe, tmp, &mfd->pipes_cleanup, cleanup_list) {
-		list_del(&pipe->cleanup_list);
+		list_move(&pipe->cleanup_list, &destroy_pipes);
 		for (i = 0; i < ARRAY_SIZE(pipe->buffers); i++)
 			mdss_mdp_overlay_free_buf(&pipe->buffers[i]);
-
-		mdss_mdp_pipe_destroy(pipe);
 	}
 
 	if (!list_empty(&mfd->pipes_used)) {
@@ -457,36 +455,44 @@
 		}
 	}
 	mutex_unlock(&mfd->lock);
+	list_for_each_entry_safe(pipe, tmp, &destroy_pipes, cleanup_list)
+		mdss_mdp_pipe_destroy(pipe);
+	mutex_unlock(&mfd->ov_lock);
+
+	return 0;
+}
+
+static int mdss_mdp_overlay_kickoff(struct mdss_mdp_ctl *ctl)
+{
+	struct msm_fb_data_type *mfd = ctl->mfd;
+	int ret;
+
+	if (mfd->kickoff_fnc)
+		ret = mfd->kickoff_fnc(ctl);
+	else
+		ret = mdss_mdp_display_commit(ctl, NULL);
+	if (IS_ERR_VALUE(ret))
+		return ret;
+
+	complete(&mfd->update.comp);
+	mutex_lock(&mfd->no_update.lock);
+	if (mfd->no_update.timer.function)
+		del_timer(&(mfd->no_update.timer));
+
+	mfd->no_update.timer.expires = jiffies + (2 * HZ);
+	add_timer(&mfd->no_update.timer);
+	mutex_unlock(&mfd->no_update.lock);
+
+	ret = mdss_mdp_overlay_cleanup(mfd);
 
 	return ret;
 }
 
-static int mdss_mdp_overlay_unset(struct msm_fb_data_type *mfd, int ndx)
+static int mdss_mdp_overlay_release(struct msm_fb_data_type *mfd, int ndx)
 {
 	struct mdss_mdp_pipe *pipe;
-	int i, ret = 0;
 	u32 pipe_ndx, unset_ndx = 0;
-
-	if (!mfd || !mfd->ctl)
-		return -ENODEV;
-
-	pr_debug("unset ndx=%x\n", ndx);
-
-	if (ndx & MDSS_MDP_ROT_SESSION_MASK) {
-		struct mdss_mdp_rotator_session *rot;
-		rot = mdss_mdp_rotator_session_get(ndx);
-		if (rot) {
-			mdss_mdp_rotator_finish(rot);
-		} else {
-			pr_warn("unknown session id=%x\n", ndx);
-			ret = -ENODEV;
-		}
-
-		return ret;
-	}
-
-	if (!mfd->ctl->power_on)
-		return 0;
+	int i;
 
 	for (i = 0; unset_ndx != ndx && i < MDSS_MDP_MAX_SSPP; i++) {
 		pipe_ndx = BIT(i);
@@ -504,37 +510,59 @@
 			mdss_mdp_mixer_pipe_unstage(pipe);
 		}
 	}
+	return 0;
+}
+
+static int mdss_mdp_overlay_unset(struct msm_fb_data_type *mfd, int ndx)
+{
+	int ret = 0;
+
+	if (!mfd || !mfd->ctl)
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&mfd->ov_lock);
+	if (ret)
+		return ret;
+
+	if (!mfd->panel_power_on) {
+		mutex_unlock(&mfd->ov_lock);
+		return -EPERM;
+	}
+
+	pr_debug("unset ndx=%x\n", ndx);
+
+	if (ndx & MDSS_MDP_ROT_SESSION_MASK)
+		ret = mdss_mdp_rotator_release(ndx);
+	else
+		ret = mdss_mdp_overlay_release(mfd, ndx);
+
+	mutex_unlock(&mfd->ov_lock);
 
 	return ret;
 }
 
-int mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd)
+static int mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd)
 {
 	struct mdss_mdp_pipe *pipe;
 	u32 unset_ndx = 0;
 	int cnt = 0;
 
+	mutex_lock(&mfd->ov_lock);
 	mutex_lock(&mfd->lock);
-	if (!list_empty(&mfd->pipes_used)) {
-		list_for_each_entry(pipe, &mfd->pipes_used, used_list) {
-			if (pipe->ndx & MDSS_MDP_ROT_SESSION_MASK) {
-				struct mdss_mdp_rotator_session *rot;
-				rot = mdss_mdp_rotator_session_get(pipe->ndx);
-				if (rot)
-					mdss_mdp_rotator_finish(rot);
-			} else {
-				unset_ndx |= pipe->ndx;
-				cnt++;
-			}
-		}
+	list_for_each_entry(pipe, &mfd->pipes_used, used_list) {
+		unset_ndx |= pipe->ndx;
+		cnt++;
 	}
 	mutex_unlock(&mfd->lock);
 
 	if (unset_ndx) {
 		pr_debug("%d pipes need cleanup (%x)\n", cnt, unset_ndx);
-		mdss_mdp_overlay_unset(mfd, unset_ndx);
-		mdss_mdp_overlay_kickoff(mfd->ctl);
+		mdss_mdp_overlay_release(mfd, unset_ndx);
 	}
+	mutex_unlock(&mfd->ov_lock);
+
+	if (cnt)
+		mdss_mdp_overlay_kickoff(mfd->ctl);
 
 	return 0;
 }
@@ -560,26 +588,28 @@
 	struct mdss_mdp_rotator_session *rot;
 	struct mdss_mdp_data src_data, dst_data;
 	int ret;
+	u32 flgs;
 
-	ret = mdss_mdp_overlay_get_buf(mfd, &src_data, &req->data, 1);
+	rot = mdss_mdp_rotator_session_get(req->id);
+	if (!rot) {
+		pr_err("invalid session id=%x\n", req->id);
+		return -ENOENT;
+	}
+
+	flgs = rot->flags & MDP_SECURE_OVERLAY_SESSION;
+
+	ret = mdss_mdp_overlay_get_buf(mfd, &src_data, &req->data, 1, flgs);
 	if (ret) {
 		pr_err("src_data pmem error\n");
 		goto rotate_done;
 	}
 
-	ret = mdss_mdp_overlay_get_buf(mfd, &dst_data, &req->dst_data, 1);
+	ret = mdss_mdp_overlay_get_buf(mfd, &dst_data, &req->dst_data, 1, flgs);
 	if (ret) {
 		pr_err("dst_data pmem error\n");
 		goto rotate_done;
 	}
 
-	rot = mdss_mdp_rotator_session_get(req->id);
-	if (!rot) {
-		pr_err("invalid session id=%x\n", req->id);
-		ret = -ENODEV;
-		goto rotate_done;
-	}
-
 	ret = mdss_mdp_rotator_queue(rot, &src_data, &dst_data);
 	if (ret) {
 		pr_err("rotator queue error session id=%x\n", req->id);
@@ -600,6 +630,7 @@
 	struct mdss_mdp_pipe *pipe;
 	struct mdss_mdp_data *src_data;
 	int ret, buf_ndx;
+	u32 flags;
 
 	pipe = mdss_mdp_pipe_get_locked(req->id);
 	if (pipe == NULL) {
@@ -609,11 +640,13 @@
 
 	pr_debug("ov queue pnum=%d\n", pipe->num);
 
+	flags = (pipe->flags & MDP_SECURE_OVERLAY_SESSION);
+
 	buf_ndx = (pipe->play_cnt + 1) & 1; /* next buffer */
 	src_data = &pipe->buffers[buf_ndx];
 	mdss_mdp_overlay_free_buf(src_data);
 
-	ret = mdss_mdp_overlay_get_buf(mfd, src_data, &req->data, 1);
+	ret = mdss_mdp_overlay_get_buf(mfd, src_data, &req->data, 1, flags);
 	if (IS_ERR_VALUE(ret)) {
 		pr_err("src_data pmem error\n");
 	} else {
@@ -624,9 +657,6 @@
 	ctl = pipe->mixer->ctl;
 	mdss_mdp_pipe_unlock(pipe);
 
-	if ((ret == 0) && (mfd->panel_info.type == WRITEBACK_PANEL))
-		ret = mdss_mdp_overlay_kickoff(ctl);
-
 	return ret;
 }
 
@@ -637,14 +667,29 @@
 
 	pr_debug("play req id=%x\n", req->id);
 
-	if (!mfd->panel_power_on)
-		return -EPERM;
+	ret = mutex_lock_interruptible(&mfd->ov_lock);
+	if (ret)
+		return ret;
 
-	if (req->id & MDSS_MDP_ROT_SESSION_MASK)
+	if (!mfd->panel_power_on) {
+		mutex_unlock(&mfd->ov_lock);
+		return -EPERM;
+	}
+
+	if (req->id & MDSS_MDP_ROT_SESSION_MASK) {
 		ret = mdss_mdp_overlay_rotate(mfd, req);
-	else
+	} else {
 		ret = mdss_mdp_overlay_queue(mfd, req);
 
+		if ((ret == 0) && (mfd->panel_info.type == WRITEBACK_PANEL)) {
+			mutex_unlock(&mfd->ov_lock);
+			ret = mdss_mdp_overlay_kickoff(mfd->ctl);
+			return ret;
+		}
+	}
+
+	mutex_unlock(&mfd->ov_lock);
+
 	return ret;
 }
 
@@ -718,10 +763,7 @@
 	u32 offset;
 	int bpp, ret;
 
-	if (!mfd)
-		return;
-
-	if (!mfd->ctl || !mfd->panel_power_on)
+	if (!mfd || !mfd->ctl)
 		return;
 
 	fbi = mfd->fbi;
@@ -731,6 +773,14 @@
 		return;
 	}
 
+	if (mutex_lock_interruptible(&mfd->ov_lock))
+		return;
+
+	if (!mfd->panel_power_on) {
+		mutex_unlock(&mfd->ov_lock);
+		return;
+	}
+
 	memset(&data, 0, sizeof(data));
 
 	bpp = fbi->var.bits_per_pixel / 8;
@@ -781,6 +831,7 @@
 			return;
 		}
 	}
+	mutex_unlock(&mfd->ov_lock);
 
 	if (fbi->var.activate & FB_ACTIVATE_VBL)
 		mdss_mdp_overlay_kickoff(mfd->ctl);
@@ -852,9 +903,9 @@
 		}
 
 		ret = msm_iommu_map_contig_buffer(mfd->cursor_buf_phys,
-						mdss_get_iommu_domain(), 0,
-						MDSS_MDP_CURSOR_SIZE, SZ_4K,
-						0, &(mfd->cursor_buf_iova));
+			mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE),
+			0, MDSS_MDP_CURSOR_SIZE, SZ_4K, 0,
+			&(mfd->cursor_buf_iova));
 		if (IS_ERR_VALUE(ret)) {
 			dma_free_coherent(NULL, MDSS_MDP_CURSOR_SIZE,
 					  mfd->cursor_buf,
@@ -1075,10 +1126,36 @@
 	return ret;
 }
 
+static int mdss_mdp_overlay_on(struct msm_fb_data_type *mfd)
+{
+	int rc;
+
+	rc = mdss_mdp_ctl_on(mfd);
+	if (rc == 0)
+		atomic_inc(&ov_active_panels);
+
+	return rc;
+}
+
+static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd)
+{
+	int rc;
+
+	mdss_mdp_overlay_release_all(mfd);
+
+	rc = mdss_mdp_ctl_off(mfd);
+	if (rc == 0) {
+		if (atomic_dec_return(&ov_active_panels) == 0)
+			mdss_mdp_rotator_release_all();
+	}
+
+	return rc;
+}
+
 int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd)
 {
-	mfd->on_fnc = mdss_mdp_ctl_on;
-	mfd->off_fnc = mdss_mdp_ctl_off;
+	mfd->on_fnc = mdss_mdp_overlay_on;
+	mfd->off_fnc = mdss_mdp_overlay_off;
 	mfd->hw_refresh = true;
 	mfd->do_histogram = NULL;
 	mfd->overlay_play_enable = true;
@@ -1091,6 +1168,7 @@
 
 	INIT_LIST_HEAD(&mfd->pipes_used);
 	INIT_LIST_HEAD(&mfd->pipes_cleanup);
+	mutex_init(&mfd->ov_lock);
 
 	return 0;
 }
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index c28bcfd..459cf14 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -268,9 +268,7 @@
 			atomic_read(&pipe->ref_cnt));
 
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
-	mutex_lock(&mdss_mdp_sspp_lock);
 	mdss_mdp_pipe_free(pipe);
-	mutex_unlock(&mdss_mdp_sspp_lock);
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 
 	return 0;
@@ -502,6 +500,11 @@
 	ystride1 =  (pipe->src_planes.ystride[2]) |
 		    (pipe->src_planes.ystride[3] << 16);
 
+	if (pipe->overfetch_disable) {
+		img_size = src_size;
+		src_xy = 0;
+	}
+
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_IMG_SIZE, img_size);
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_SIZE, src_size);
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_XY, src_xy);
@@ -517,9 +520,13 @@
 {
 	struct mdss_mdp_format_params *fmt;
 	u32 opmode, chroma_samp, unpack, src_format;
+	u32 secure = 0;
 
 	fmt = pipe->src_fmt;
 
+	if (pipe->flags & MDP_SECURE_OVERLAY_SESSION)
+		secure = 0xF;
+
 	opmode = pipe->bwc_mode;
 	if (pipe->flags & MDP_FLIP_LR)
 		opmode |= MDSS_MDP_OP_FLIP_LR;
@@ -566,6 +573,7 @@
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_FORMAT, src_format);
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN, unpack);
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_OP_MODE, opmode);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure);
 
 	return 0;
 }
@@ -607,6 +615,28 @@
 	return 0;
 }
 
+static void mdss_mdp_addr_add_offset(struct mdss_mdp_pipe *pipe,
+				    struct mdss_mdp_data *data)
+{
+	data->p[0].addr += pipe->src.x +
+		(pipe->src.y * pipe->src_planes.ystride[0]);
+	if (data->num_planes > 1) {
+		u8 hmap[] = { 1, 2, 1, 2 };
+		u8 vmap[] = { 1, 1, 2, 2 };
+		u16 xoff = pipe->src.x / hmap[pipe->src_fmt->chroma_sample];
+		u16 yoff = pipe->src.y / vmap[pipe->src_fmt->chroma_sample];
+
+		if (data->num_planes == 2) /* pseudo planar */
+			xoff *= 2;
+		data->p[1].addr += xoff + (yoff * pipe->src_planes.ystride[1]);
+
+		if (data->num_planes > 2) { /* planar */
+			data->p[2].addr += xoff +
+				(yoff * pipe->src_planes.ystride[2]);
+		}
+	}
+}
+
 static int mdss_mdp_src_addr_setup(struct mdss_mdp_pipe *pipe,
 				   struct mdss_mdp_data *data)
 {
@@ -622,6 +652,9 @@
 	if (ret)
 		return ret;
 
+	if (pipe->overfetch_disable)
+		mdss_mdp_addr_add_offset(pipe, data);
+
 	/* planar format expects YCbCr, swap chroma planes if YCrCb */
 	if (!is_rot && (pipe->src_fmt->fetch_planes == MDSS_MDP_PLANE_PLANAR) &&
 	    (pipe->src_fmt->element[0] == C2_R_Cr))
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
index dc1cb0d..1e58269 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -187,12 +187,17 @@
 {
 	struct mdss_mdp_pipe *rot_pipe;
 	struct mdss_mdp_ctl *ctl;
-	int ret;
+	int ret, need_wait = false;
 
-	if (!rot)
+	ret = mutex_lock_interruptible(&rotator_lock);
+	if (ret)
+		return ret;
+
+	if (!rot || !rot->ref_cnt) {
+		mutex_unlock(&rotator_lock);
 		return -ENODEV;
+	}
 
-	mutex_lock(&rotator_lock);
 	ret = mdss_mdp_rotator_pipe_dequeue(rot);
 	if (ret) {
 		pr_err("unable to acquire rotator\n");
@@ -207,7 +212,7 @@
 
 	if (rot->params_changed) {
 		rot->params_changed = 0;
-		rot_pipe->flags = rot->rotations;
+		rot_pipe->flags = rot->flags;
 		rot_pipe->src_fmt = mdss_mdp_get_format_params(rot->format);
 		rot_pipe->img_width = rot->img_width;
 		rot_pipe->img_height = rot->img_height;
@@ -225,16 +230,18 @@
 
 	ret = mdss_mdp_rotator_kickoff(ctl, rot, dst_data);
 
+	if (ret == 0 && !rot->no_wait)
+		need_wait = true;
 done:
 	mutex_unlock(&rotator_lock);
 
-	if (!rot->no_wait)
+	if (need_wait)
 		mdss_mdp_rotator_busy_wait(rot);
 
 	return ret;
 }
 
-int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot)
+static int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot)
 {
 	struct mdss_mdp_pipe *rot_pipe;
 
@@ -243,7 +250,6 @@
 
 	pr_debug("finish rot id=%x\n", rot->session_id);
 
-	mutex_lock(&rotator_lock);
 	rot_pipe = rot->pipe;
 	if (rot_pipe) {
 		mdss_mdp_rotator_busy_wait(rot);
@@ -255,7 +261,43 @@
 		mdss_mdp_pipe_destroy(rot_pipe);
 		mdss_mdp_wb_mixer_destroy(mixer);
 	}
+
+	return 0;
+}
+
+int mdss_mdp_rotator_release(u32 ndx)
+{
+	struct mdss_mdp_rotator_session *rot;
+	mutex_lock(&rotator_lock);
+	rot = mdss_mdp_rotator_session_get(ndx);
+	if (rot) {
+		mdss_mdp_rotator_finish(rot);
+	} else {
+		pr_warn("unknown session id=%x\n", ndx);
+		return -ENOENT;
+	}
 	mutex_unlock(&rotator_lock);
 
 	return 0;
 }
+
+int mdss_mdp_rotator_release_all(void)
+{
+	struct mdss_mdp_rotator_session *rot;
+	int i, cnt;
+
+	mutex_lock(&rotator_lock);
+	for (i = 0, cnt = 0; i < MAX_ROTATOR_SESSIONS; i++) {
+		rot = &rotator_session[i];
+		if (rot->ref_cnt) {
+			mdss_mdp_rotator_finish(rot);
+			cnt++;
+		}
+	}
+	mutex_unlock(&rotator_lock);
+
+	if (cnt)
+		pr_debug("cleaned up %d rotator sessions\n", cnt);
+
+	return 0;
+}
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.h b/drivers/video/msm/mdss/mdss_mdp_rotator.h
index eb5b47a..70ef6bf 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.h
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -25,7 +25,7 @@
 	u32 params_changed;
 
 	u32 format;
-	u32 rotations;
+	u32 flags;
 
 	u16 img_width;
 	u16 img_height;
@@ -48,7 +48,8 @@
 int mdss_mdp_rotator_queue(struct mdss_mdp_rotator_session *rot,
 			   struct mdss_mdp_data *src_data,
 			   struct mdss_mdp_data *dst_data);
-int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot);
-int mdss_mdp_rotator_ctl_busy_wait(struct mdss_mdp_ctl *ctl);
+
+int mdss_mdp_rotator_release(u32 ndx);
+int mdss_mdp_rotator_release_all(void);
 
 #endif /* MDSS_MDP_ROTATOR_H */
diff --git a/drivers/video/msm/mdss/mdss_mdp_util.c b/drivers/video/msm/mdss/mdss_mdp_util.c
index 95c92fc..9f2df85 100644
--- a/drivers/video/msm/mdss/mdss_mdp_util.c
+++ b/drivers/video/msm/mdss/mdss_mdp_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,7 @@
 #include <linux/msm_kgsl.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
+#include <media/msm_media_info.h>
 
 #include <mach/iommu_domains.h>
 
@@ -210,6 +211,13 @@
 		ps->num_planes = 1;
 		ps->plane_size[0] = w * h * bpp;
 		ps->ystride[0] = w * bpp;
+	} else if (format == MDP_Y_CBCR_H2V2_VENUS) {
+		int cf = COLOR_FMT_NV12;
+		ps->num_planes = 2;
+		ps->ystride[0] = VENUS_Y_STRIDE(cf, w);
+		ps->ystride[1] = VENUS_UV_STRIDE(cf, w);
+		ps->plane_size[0] = VENUS_Y_SCANLINES(cf, h) * ps->ystride[0];
+		ps->plane_size[1] = VENUS_UV_SCANLINES(cf, h) * ps->ystride[1];
 	} else {
 		u8 hmap[] = { 1, 2, 1, 2 };
 		u8 vmap[] = { 1, 1, 2, 2 };
@@ -223,10 +231,6 @@
 			stride_align = 16;
 			height_align = 1;
 			break;
-		case MDP_Y_CBCR_H2V2_VENUS:
-			stride_align = 32;
-			height_align = 32;
-			break;
 		default:
 			stride_align = 1;
 			height_align = 1;
@@ -312,9 +316,20 @@
 	} else if (!IS_ERR_OR_NULL(data->srcp_ihdl)) {
 		pr_debug("ion hdl=%p buf=0x%x\n", data->srcp_ihdl, data->addr);
 
-		if (is_mdss_iommu_attached())
+		if (is_mdss_iommu_attached()) {
+			int domain;
+			if (data->flags & MDP_SECURE_OVERLAY_SESSION)
+				domain = MDSS_IOMMU_DOMAIN_SECURE;
+			else
+				domain = MDSS_IOMMU_DOMAIN_UNSECURE;
 			ion_unmap_iommu(iclient, data->srcp_ihdl,
-					mdss_get_iommu_domain(), 0);
+					mdss_get_iommu_domain(domain), 0);
+
+			if (domain == MDSS_IOMMU_DOMAIN_SECURE) {
+				msm_ion_unsecure_buffer(iclient,
+					data->srcp_ihdl);
+			}
+		}
 
 		ion_free(iclient, data->srcp_ihdl);
 		data->srcp_ihdl = NULL;
@@ -335,7 +350,7 @@
 
 	start = (unsigned long *) &data->addr;
 	len = (unsigned long *) &data->len;
-	data->flags = img->flags;
+	data->flags |= img->flags;
 	data->p_need = 0;
 
 	if (img->flags & MDP_BLIT_SRC_GEM) {
@@ -370,8 +385,24 @@
 		}
 
 		if (is_mdss_iommu_attached()) {
+			int domain;
+			if (data->flags & MDP_SECURE_OVERLAY_SESSION) {
+				domain = MDSS_IOMMU_DOMAIN_SECURE;
+				ret = msm_ion_secure_buffer(iclient,
+					data->srcp_ihdl, 0x2,
+					ION_UNSECURE_DELAYED);
+				if (IS_ERR_VALUE(ret)) {
+					ion_free(iclient, data->srcp_ihdl);
+					pr_err("failed to secure handle (%d)\n",
+						ret);
+					return ret;
+				}
+			} else {
+				domain = MDSS_IOMMU_DOMAIN_UNSECURE;
+			}
+
 			ret = ion_map_iommu(iclient, data->srcp_ihdl,
-					    mdss_get_iommu_domain(),
+					    mdss_get_iommu_domain(domain),
 					    0, SZ_4K, 0, start, len, 0,
 					    ION_IOMMU_UNMAP_DELAYED);
 		} else {
diff --git a/drivers/video/msm/mdss/mdss_mdp_wb.c b/drivers/video/msm/mdss/mdss_mdp_wb.c
index b18efbe..b74523b 100644
--- a/drivers/video/msm/mdss/mdss_mdp_wb.c
+++ b/drivers/video/msm/mdss/mdss_mdp_wb.c
@@ -96,8 +96,9 @@
 		ion_phys(iclient, ihdl, &mdss_wb_mem, &img_size);
 
 		if (is_mdss_iommu_attached()) {
+			int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
 			rc = ion_map_iommu(iclient, ihdl,
-					   mdss_get_iommu_domain(),
+					   mdss_get_iommu_domain(domain),
 					   0, SZ_4K, 0,
 					   (unsigned long *) &img->addr,
 					   (unsigned long *) &img->len,
@@ -569,6 +570,6 @@
 
 int msm_fb_get_iommu_domain(void)
 {
-	return mdss_get_iommu_domain();
+	return mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE);
 }
 EXPORT_SYMBOL(msm_fb_get_iommu_domain);
diff --git a/drivers/video/msm/mdss/mdss_panel.h b/drivers/video/msm/mdss/mdss_panel.h
index 5cdfe34..28d7051 100644
--- a/drivers/video/msm/mdss/mdss_panel.h
+++ b/drivers/video/msm/mdss/mdss_panel.h
@@ -55,6 +55,17 @@
 	MAX_PHYS_TARGET_NUM,
 };
 
+enum mdss_intf_events {
+	MDSS_EVENT_RESET,
+	MDSS_EVENT_UNBLANK,
+	MDSS_EVENT_TIMEGEN_ON,
+	MDSS_EVENT_BLANK,
+	MDSS_EVENT_TIMEGEN_OFF,
+	MDSS_EVENT_CLOSE,
+	MDSS_EVENT_SUSPEND,
+	MDSS_EVENT_RESUME,
+};
+
 /* panel info type */
 struct lcd_panel_info {
 	u32 vsync_enable;
@@ -178,14 +189,11 @@
 
 struct mdss_panel_data {
 	struct mdss_panel_info panel_info;
-	void (*set_backlight) (struct mdss_panel_data *pdata,
-							u32 bl_level);
-	int (*intf_unprepare) (struct mdss_panel_data *pdata);
+	void (*set_backlight) (struct mdss_panel_data *pdata, u32 bl_level);
 	unsigned char *mmss_cc_base;
 
 	/* function entry chain */
-	int (*on) (struct mdss_panel_data *pdata);
-	int (*off) (struct mdss_panel_data *pdata);
+	int (*event_handler) (struct mdss_panel_data *pdata, int e, void *arg);
 };
 
 int mdss_register_panel(struct mdss_panel_data *pdata);
diff --git a/drivers/video/msm/mdss/mdss_wb.c b/drivers/video/msm/mdss/mdss_wb.c
index d4c924f..c3dc06b 100644
--- a/drivers/video/msm/mdss/mdss_wb.c
+++ b/drivers/video/msm/mdss/mdss_wb.c
@@ -25,15 +25,10 @@
 
 #include "mdss_panel.h"
 
-static int mdss_wb_on(struct mdss_panel_data *pdata)
+static int mdss_wb_event_handler(struct mdss_panel_data *pdata,
+				 int event, void *arg)
 {
-	pr_debug("%s\n", __func__);
-	return 0;
-}
-
-static int mdss_wb_off(struct mdss_panel_data *pdata)
-{
-	pr_debug("%s\n", __func__);
+	pr_debug("%s: event=%d\n", __func__, event);
 	return 0;
 }
 
@@ -75,8 +70,7 @@
 	pdata->panel_info.pdest = DISPLAY_3;
 	pdata->panel_info.out_format = MDP_Y_CBCR_H2V2_VENUS;
 
-	pdata->on = mdss_wb_on;
-	pdata->off = mdss_wb_off;
+	pdata->event_handler = mdss_wb_event_handler;
 	pdev->dev.platform_data = pdata;
 
 	rc = mdss_register_panel(pdata);
diff --git a/drivers/video/msm/mdss/mhl_sii8334.c b/drivers/video/msm/mdss/mhl_sii8334.c
new file mode 100644
index 0000000..6a63964
--- /dev/null
+++ b/drivers/video/msm/mdss/mhl_sii8334.c
@@ -0,0 +1,1184 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/types.h>
+#include <linux/mhl_8334.h>
+
+#include "mdss_fb.h"
+#include "mdss_hdmi_tx.h"
+#include "mdss_hdmi_edid.h"
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mdss_io_util.h"
+
+#define MHL_DRIVER_NAME "sii8334"
+#define COMPATIBLE_NAME "qcom,mhl-sii8334"
+
+#define pr_debug_intr(...) pr_debug("\n")
+
+enum mhl_gpio_type {
+	MHL_TX_RESET_GPIO,
+	MHL_TX_INTR_GPIO,
+	MHL_TX_PMIC_PWR_GPIO,
+	MHL_TX_MAX_GPIO,
+};
+
+enum mhl_vreg_type {
+	MHL_TX_3V_VREG,
+	MHL_TX_MAX_VREG,
+};
+
+struct mhl_tx_platform_data {
+	/* Data filled from device tree nodes */
+	struct dss_gpio *gpios[MHL_TX_MAX_GPIO];
+	struct dss_vreg *vregs[MHL_TX_MAX_VREG];
+	int irq;
+};
+
+struct mhl_tx_ctrl {
+	struct platform_device *pdev;
+	struct mhl_tx_platform_data *pdata;
+	struct i2c_client *i2c_handle;
+	uint8_t cur_state;
+	uint8_t chip_rev_id;
+	int mhl_mode;
+};
+
+
+uint8_t slave_addrs[MAX_PAGES] = {
+	DEV_PAGE_TPI_0    ,
+	DEV_PAGE_TX_L0_0  ,
+	DEV_PAGE_TX_L1_0  ,
+	DEV_PAGE_TX_2_0   ,
+	DEV_PAGE_TX_3_0   ,
+	DEV_PAGE_CBUS     ,
+	DEV_PAGE_DDC_EDID ,
+	DEV_PAGE_DDC_SEGM ,
+};
+
+static irqreturn_t mhl_tx_isr(int irq, void *dev_id);
+static void switch_mode(struct mhl_tx_ctrl *mhl_ctrl,
+			enum mhl_st_type to_mode);
+static void mhl_drive_hpd(struct mhl_tx_ctrl *mhl_ctrl,
+			  uint8_t to_state);
+
+static int mhl_i2c_reg_read(struct i2c_client *client,
+			    uint8_t slave_addr_index, uint8_t reg_offset)
+{
+	int rc = -1;
+	uint8_t buffer = 0;
+
+	rc = mdss_i2c_byte_read(client, slave_addrs[slave_addr_index],
+				reg_offset, &buffer);
+	if (rc) {
+		pr_err("%s: slave=%x, off=%x\n",
+		       __func__, slave_addrs[slave_addr_index], reg_offset);
+		return rc;
+	}
+	return buffer;
+}
+
+
+static int mhl_i2c_reg_write(struct i2c_client *client,
+			     uint8_t slave_addr_index, uint8_t reg_offset,
+			     uint8_t value)
+{
+	return mdss_i2c_byte_write(client, slave_addrs[slave_addr_index],
+				 reg_offset, &value);
+}
+
+static void mhl_i2c_reg_modify(struct i2c_client *client,
+			       uint8_t slave_addr_index, uint8_t reg_offset,
+			       uint8_t mask, uint8_t val)
+{
+	uint8_t temp;
+
+	temp = mhl_i2c_reg_read(client, slave_addr_index, reg_offset);
+	temp &= (~mask);
+	temp |= (mask & val);
+	mhl_i2c_reg_write(client, slave_addr_index, reg_offset, temp);
+}
+
+
+static int mhl_tx_get_dt_data(struct device *dev,
+	struct mhl_tx_platform_data *pdata)
+{
+	int i, rc = 0;
+	struct device_node *of_node = NULL;
+	struct dss_gpio *temp_gpio = NULL;
+	i = 0;
+
+	if (!dev || !pdata) {
+		pr_err("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	of_node = dev->of_node;
+	if (!of_node) {
+		pr_err("%s: invalid of_node\n", __func__);
+		goto error;
+	}
+
+	pr_debug("%s: id=%d\n", __func__, dev->id);
+
+	/* GPIOs */
+	temp_gpio = NULL;
+	temp_gpio = devm_kzalloc(dev, sizeof(struct dss_gpio), GFP_KERNEL);
+	pr_debug("%s: gpios allocd\n", __func__);
+	if (!(temp_gpio)) {
+		pr_err("%s: can't alloc %d gpio mem\n", __func__, i);
+		goto error;
+	}
+	/* RESET */
+	temp_gpio->gpio = of_get_named_gpio(of_node, "mhl-rst-gpio", 0);
+	snprintf(temp_gpio->gpio_name, 32, "%s", "mhl-rst-gpio");
+	pr_debug("%s: rst gpio=[%d]\n", __func__,
+		 temp_gpio->gpio);
+	pdata->gpios[MHL_TX_RESET_GPIO] = temp_gpio;
+
+	/* PWR */
+	temp_gpio = NULL;
+	temp_gpio = devm_kzalloc(dev, sizeof(struct dss_gpio), GFP_KERNEL);
+	pr_debug("%s: gpios allocd\n", __func__);
+	if (!(temp_gpio)) {
+		pr_err("%s: can't alloc %d gpio mem\n", __func__, i);
+		goto error;
+	}
+	temp_gpio->gpio = of_get_named_gpio(of_node, "mhl-pwr-gpio", 0);
+	snprintf(temp_gpio->gpio_name, 32, "%s", "mhl-pwr-gpio");
+	pr_debug("%s: pmic gpio=[%d]\n", __func__,
+		 temp_gpio->gpio);
+	pdata->gpios[MHL_TX_PMIC_PWR_GPIO] = temp_gpio;
+
+	/* INTR */
+	temp_gpio = NULL;
+	temp_gpio = devm_kzalloc(dev, sizeof(struct dss_gpio), GFP_KERNEL);
+	pr_debug("%s: gpios allocd\n", __func__);
+	if (!(temp_gpio)) {
+		pr_err("%s: can't alloc %d gpio mem\n", __func__, i);
+		goto error;
+	}
+	temp_gpio->gpio = of_get_named_gpio(of_node, "mhl-intr-gpio", 0);
+	snprintf(temp_gpio->gpio_name, 32, "%s", "mhl-intr-gpio");
+	pr_debug("%s: intr gpio=[%d]\n", __func__,
+		 temp_gpio->gpio);
+	pdata->gpios[MHL_TX_INTR_GPIO] = temp_gpio;
+
+	return 0;
+error:
+	pr_err("%s: ret due to err\n", __func__);
+	for (i = 0; i < MHL_TX_MAX_GPIO; i++)
+		if (pdata->gpios[i])
+			devm_kfree(dev, pdata->gpios[i]);
+	return rc;
+} /* mhl_tx_get_dt_data */
+
+static int mhl_sii_reset_pin(struct mhl_tx_ctrl *mhl_ctrl, int on)
+{
+	gpio_set_value(mhl_ctrl->pdata->gpios[MHL_TX_RESET_GPIO]->gpio,
+		       on);
+	return 0;
+}
+
+static void cbus_reset(struct i2c_client *client)
+{
+	uint8_t i;
+
+	/*
+	 * REG_SRST
+	 */
+	MHL_SII_REG_NAME_MOD(REG_SRST, BIT3, BIT3);
+	msleep(20);
+	MHL_SII_REG_NAME_MOD(REG_SRST, BIT3, 0x00);
+	/*
+	 * REG_INTR1 and REG_INTR4
+	 */
+	MHL_SII_REG_NAME_WR(REG_INTR1_MASK, BIT6);
+	MHL_SII_REG_NAME_WR(REG_INTR4_MASK,
+		BIT0 | BIT2 | BIT3 | BIT4 | BIT5 | BIT6);
+
+	MHL_SII_REG_NAME_WR(REG_INTR5_MASK, 0x00);
+
+	/* Unmask CBUS1 Intrs */
+	MHL_SII_CBUS_WR(0x0009,
+		BIT2 | BIT3 | BIT4 | BIT5 | BIT6);
+
+	/* Unmask CBUS2 Intrs */
+	MHL_SII_CBUS_WR(0x001F, BIT2 | BIT3);
+
+	for (i = 0; i < 4; i++) {
+		/*
+		 * Enable WRITE_STAT interrupt for writes to
+		 * all 4 MSC Status registers.
+		 */
+		MHL_SII_CBUS_WR((0xE0 + i), 0xFF);
+
+		/*
+		 * Enable SET_INT interrupt for writes to
+		 * all 4 MSC Interrupt registers.
+		 */
+		MHL_SII_CBUS_WR((0xF0 + i), 0xFF);
+	}
+	return;
+}
+
+static void init_cbus_regs(struct i2c_client *client)
+{
+	uint8_t		regval;
+
+	/* Increase DDC translation layer timer*/
+	MHL_SII_CBUS_WR(0x0007, 0xF2);
+	/* Drive High Time */
+	MHL_SII_CBUS_WR(0x0036, 0x03);
+	/* Use programmed timing */
+	MHL_SII_CBUS_WR(0x0039, 0x30);
+	/* CBUS Drive Strength */
+	MHL_SII_CBUS_WR(0x0040, 0x03);
+	/*
+	 * Write initial default settings
+	 * to devcap regs: default settings
+	 */
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_DEV_STATE, DEVCAP_VAL_DEV_STATE);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_MHL_VERSION, DEVCAP_VAL_MHL_VERSION);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_DEV_CAT, DEVCAP_VAL_DEV_CAT);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_ADOPTER_ID_H, DEVCAP_VAL_ADOPTER_ID_H);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_ADOPTER_ID_L, DEVCAP_VAL_ADOPTER_ID_L);
+	MHL_SII_CBUS_WR(0x0080 | DEVCAP_OFFSET_VID_LINK_MODE,
+			  DEVCAP_VAL_VID_LINK_MODE);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_AUD_LINK_MODE,
+			  DEVCAP_VAL_AUD_LINK_MODE);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_VIDEO_TYPE, DEVCAP_VAL_VIDEO_TYPE);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_LOG_DEV_MAP, DEVCAP_VAL_LOG_DEV_MAP);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_BANDWIDTH, DEVCAP_VAL_BANDWIDTH);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_FEATURE_FLAG, DEVCAP_VAL_FEATURE_FLAG);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_DEVICE_ID_H, DEVCAP_VAL_DEVICE_ID_H);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_DEVICE_ID_L, DEVCAP_VAL_DEVICE_ID_L);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_SCRATCHPAD_SIZE,
+			  DEVCAP_VAL_SCRATCHPAD_SIZE);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_INT_STAT_SIZE,
+			  DEVCAP_VAL_INT_STAT_SIZE);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_RESERVED, DEVCAP_VAL_RESERVED);
+
+	/* Make bits 2,3 (initiator timeout) to 1,1
+	 * for register CBUS_LINK_CONTROL_2
+	 * REG_CBUS_LINK_CONTROL_2
+	 */
+	regval = MHL_SII_CBUS_RD(0x0031);
+	regval = (regval | 0x0C);
+	/* REG_CBUS_LINK_CONTROL_2 */
+	MHL_SII_CBUS_WR(0x0031, regval);
+	 /* REG_MSC_TIMEOUT_LIMIT */
+	MHL_SII_CBUS_WR(0x0022, 0x0F);
+	/* REG_CBUS_LINK_CONTROL_1 */
+	MHL_SII_CBUS_WR(0x0030, 0x01);
+	/* disallow vendor specific commands */
+	MHL_SII_CBUS_MOD(0x002E, BIT4, BIT4);
+}
+
+/*
+ * Configure the initial reg settings
+ */
+static void mhl_init_reg_settings(struct i2c_client *client, bool mhl_disc_en)
+{
+	/*
+	 * ============================================
+	 * POWER UP
+	 * ============================================
+	 */
+
+	/* Power up 1.2V core */
+	MHL_SII_PAGE1_WR(0x003D, 0x3F);
+	/*
+	 * Wait for the source power to be enabled
+	 * before enabling pll clocks.
+	 */
+	msleep(50);
+	/* Enable Tx PLL Clock */
+	MHL_SII_PAGE2_WR(0x0011, 0x01);
+	/* Enable Tx Clock Path and Equalizer */
+	MHL_SII_PAGE2_WR(0x0012, 0x11);
+	/* Tx Source Termination ON */
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0x10);
+	/* Enable 1X MHL Clock output */
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL6, 0xAC);
+	/* Tx Differential Driver Config */
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL2, 0x3C);
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL4, 0xD9);
+	/* PLL Bandwidth Control */
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL8, 0x02);
+	/*
+	 * ============================================
+	 * Analog PLL Control
+	 * ============================================
+	 */
+	/* Enable Rx PLL clock */
+	MHL_SII_REG_NAME_WR(REG_TMDS_CCTRL,  0x00);
+	MHL_SII_PAGE0_WR(0x00F8, 0x0C);
+	MHL_SII_PAGE0_WR(0x0085, 0x02);
+	MHL_SII_PAGE2_WR(0x0000, 0x00);
+	MHL_SII_PAGE2_WR(0x0013, 0x60);
+	/* PLL Cal ref sel */
+	MHL_SII_PAGE2_WR(0x0017, 0x03);
+	/* VCO Cal */
+	MHL_SII_PAGE2_WR(0x001A, 0x20);
+	/* Auto EQ */
+	MHL_SII_PAGE2_WR(0x0022, 0xE0);
+	MHL_SII_PAGE2_WR(0x0023, 0xC0);
+	MHL_SII_PAGE2_WR(0x0024, 0xA0);
+	MHL_SII_PAGE2_WR(0x0025, 0x80);
+	MHL_SII_PAGE2_WR(0x0026, 0x60);
+	MHL_SII_PAGE2_WR(0x0027, 0x40);
+	MHL_SII_PAGE2_WR(0x0028, 0x20);
+	MHL_SII_PAGE2_WR(0x0029, 0x00);
+	/* Rx PLL Bandwidth 4MHz */
+	MHL_SII_PAGE2_WR(0x0031, 0x0A);
+	/* Rx PLL Bandwidth value from I2C */
+	MHL_SII_PAGE2_WR(0x0045, 0x06);
+	MHL_SII_PAGE2_WR(0x004B, 0x06);
+	/* Manual zone control */
+	MHL_SII_PAGE2_WR(0x004C, 0xE0);
+	/* PLL Mode value */
+	MHL_SII_PAGE2_WR(0x004D, 0x00);
+	MHL_SII_PAGE0_WR(0x0008, 0x35);
+	/*
+	 * Discovery Control and Status regs
+	 * Setting De-glitch time to 50 ms (default)
+	 * Switch Control Disabled
+	 */
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL2, 0xAD);
+	/* 1.8V CBUS VTH */
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL5, 0x55);
+	/* RGND and single Discovery attempt */
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL6, 0x11);
+	/* Ignore VBUS */
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL8, 0x82);
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL9, 0x24);
+
+	/* Enable CBUS Discovery */
+	if (mhl_disc_en) {
+		/* Enable MHL Discovery */
+		MHL_SII_REG_NAME_WR(REG_DISC_CTRL1, 0x27);
+		/* Pull-up resistance off for IDLE state */
+		MHL_SII_REG_NAME_WR(REG_DISC_CTRL4, 0xA4);
+	} else {
+		/* Disable MHL Discovery */
+		MHL_SII_REG_NAME_WR(REG_DISC_CTRL1, 0x26);
+		MHL_SII_REG_NAME_WR(REG_DISC_CTRL4, 0x8C);
+	}
+
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL7, 0x20);
+	/* MHL CBUS Discovery - immediate comm.  */
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL3, 0x86);
+
+	MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT5 | BIT4, BIT4);
+
+	/* Enable Auto Soft RESET */
+	MHL_SII_REG_NAME_WR(REG_SRST, 0x084);
+	/* HDMI Transcode mode enable */
+	MHL_SII_PAGE0_WR(0x000D, 0x1C);
+
+	cbus_reset(client);
+	init_cbus_regs(client);
+}
+
+
+static void switch_mode(struct mhl_tx_ctrl *mhl_ctrl, enum mhl_st_type to_mode)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	switch (to_mode) {
+	case POWER_STATE_D0_NO_MHL:
+		break;
+	case POWER_STATE_D0_MHL:
+		mhl_init_reg_settings(client, true);
+		/* REG_DISC_CTRL1 */
+		MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT1 | BIT0, BIT0);
+
+		/* TPI_DEVICE_POWER_STATE_CTRL_REG */
+		mhl_i2c_reg_modify(client, TX_PAGE_TPI, 0x001E, BIT1 | BIT0,
+			0x00);
+		break;
+	case POWER_STATE_D3:
+		if (mhl_ctrl->cur_state == POWER_STATE_D3)
+			break;
+
+		/* Force HPD to 0 when not in MHL mode.  */
+		mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+		/*
+		 * Change TMDS termination to high impedance
+		 * on disconnection.
+		 */
+		MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0xD0);
+		msleep(50);
+		MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT1 | BIT0, 0x00);
+		MHL_SII_PAGE3_MOD(0x003D, BIT0,
+				   0x00);
+		mhl_ctrl->cur_state = POWER_STATE_D3;
+		break;
+	default:
+		break;
+	}
+}
+
+static void mhl_drive_hpd(struct mhl_tx_ctrl *mhl_ctrl, uint8_t to_state)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	pr_debug("%s: To state=[0x%x]\n", __func__, to_state);
+	if (to_state == HPD_UP) {
+		/*
+		 * Drive HPD to UP state
+		 *
+		 * The below two reg configs combined
+		 * enable TMDS output.
+		 */
+
+		/* Enable TMDS on TMDS_CCTRL */
+		MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, BIT4);
+
+		/*
+		 * Set HPD_OUT_OVR_EN = HPD State
+		 * EDID read and Un-force HPD (from low)
+		 * propogate to src let HPD float by clearing
+		 * HPD OUT OVRRD EN
+		 */
+		MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT4, 0x00);
+	} else {
+		/*
+		 * Drive HPD to DOWN state
+		 * Disable TMDS Output on REG_TMDS_CCTRL
+		 * Enable/Disable TMDS output (MHL TMDS output only)
+		 */
+		MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT4, BIT4);
+		MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, 0x00);
+	}
+	return;
+}
+
+static void mhl_msm_connection(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t val;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	pr_debug("%s: cur st [0x%x]\n", __func__,
+		mhl_ctrl->cur_state);
+
+	if (mhl_ctrl->cur_state == POWER_STATE_D0_MHL) {
+		/* Already in D0 - MHL power state */
+		pr_err("%s: cur st not D0\n", __func__);
+		return;
+	}
+	/* spin_lock_irqsave(&mhl_state_lock, flags); */
+	mhl_ctrl->cur_state = POWER_STATE_D0_MHL;
+	/* spin_unlock_irqrestore(&mhl_state_lock, flags); */
+
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0x10);
+	MHL_SII_CBUS_WR(0x07, 0xF2);
+
+	/*
+	 * Keep the discovery enabled. Need RGND interrupt
+	 * Possibly chip disables discovery after MHL_EST??
+	 * Need to re-enable here
+	 */
+	val = MHL_SII_PAGE3_RD(0x10);
+	MHL_SII_PAGE3_WR(0x10, val | BIT0);
+
+	return;
+}
+
+static void mhl_msm_disconnection(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+	/*
+	 * MHL TX CTL1
+	 * Disabling Tx termination
+	 */
+	MHL_SII_PAGE3_WR(0x30, 0xD0);
+
+	switch_mode(mhl_ctrl, POWER_STATE_D3);
+	/*
+	 * Only if MHL-USB handshake is not implemented
+	 */
+	mhl_init_reg_settings(client, true);
+	return;
+}
+
+static int  mhl_msm_read_rgnd_int(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t rgnd_imp;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+	/* DISC STATUS REG 2 */
+	rgnd_imp = (mhl_i2c_reg_read(client,
+				     TX_PAGE_3, 0x001C) & (BIT1 | BIT0));
+	pr_debug("imp range read=%02X\n", (int)rgnd_imp);
+
+	if (0x02 == rgnd_imp) {
+		pr_debug("%s: mhl sink\n", __func__);
+		MHL_SII_REG_NAME_MOD(REG_DISC_CTRL9, BIT0, BIT0);
+		mhl_ctrl->mhl_mode = 1;
+	} else {
+		pr_debug("%s: non-mhl sink\n", __func__);
+		mhl_ctrl->mhl_mode = 0;
+		MHL_SII_REG_NAME_MOD(REG_DISC_CTRL9, BIT3, BIT3);
+		switch_mode(mhl_ctrl, POWER_STATE_D3);
+	}
+	return mhl_ctrl->mhl_mode ?
+		MHL_DISCOVERY_RESULT_MHL : MHL_DISCOVERY_RESULT_USB;
+}
+
+static void force_usb_switch_open(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	/*disable discovery*/
+	MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT0, 0);
+	/* force USB ID switch to open*/
+	MHL_SII_REG_NAME_MOD(REG_DISC_CTRL6, BIT6, BIT6);
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL3, 0x86);
+	/* force HPD to 0 when not in mhl mode. */
+	MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT5 | BIT4, BIT4);
+}
+
+static void release_usb_switch_open(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	msleep(50);
+	MHL_SII_REG_NAME_MOD(REG_DISC_CTRL6, BIT6, 0x00);
+	MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT0, BIT0);
+}
+
+static void scdt_st_chg(struct i2c_client *client)
+{
+	uint8_t tmds_cstat;
+	uint8_t mhl_fifo_status;
+
+	/* tmds cstat */
+	tmds_cstat = MHL_SII_PAGE3_RD(0x0040);
+	pr_debug("%s: tmds cstat: 0x%02x\n", __func__,
+		 tmds_cstat);
+
+	if (!(tmds_cstat & BIT1))
+		return;
+
+	mhl_fifo_status = MHL_SII_REG_NAME_RD(REG_INTR5);
+	pr_debug("%s: mhl fifo st: 0x%02x\n", __func__,
+		 mhl_fifo_status);
+	if (mhl_fifo_status & 0x0C) {
+		MHL_SII_REG_NAME_WR(REG_INTR5,  0x0C);
+		pr_debug("%s: mhl fifo rst\n", __func__);
+		MHL_SII_REG_NAME_WR(REG_SRST, 0x94);
+		MHL_SII_REG_NAME_WR(REG_SRST, 0x84);
+	}
+}
+
+
+static void dev_detect_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t status, reg ;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	/* INTR_STATUS4 */
+	status = MHL_SII_REG_NAME_RD(REG_INTR4);
+	pr_debug("%s: reg int4 st=%02X\n", __func__, status);
+
+	if ((0x00 == status) &&\
+	    (mhl_ctrl->cur_state == POWER_STATE_D3)) {
+		pr_err("%s: invalid intr\n", __func__);
+		return;
+	}
+
+	if (0xFF == status) {
+		pr_debug("%s: invalid intr 0xff\n", __func__);
+		MHL_SII_REG_NAME_WR(REG_INTR4, status);
+		return;
+	}
+
+	if ((status & BIT0) && (mhl_ctrl->chip_rev_id < 1)) {
+		pr_debug("%s: scdt intr\n", __func__);
+		scdt_st_chg(client);
+	}
+
+	if (status & BIT1)
+		pr_debug("mhl: int4 bit1 set\n");
+
+	/* mhl_est interrupt */
+	if (status & BIT2) {
+		pr_debug("%s: mhl_est st=%02X\n", __func__,
+			 (int) status);
+		mhl_msm_connection(mhl_ctrl);
+	} else if (status & BIT3) {
+		pr_debug("%s: uUSB-a type dev detct\n", __func__);
+		MHL_SII_REG_NAME_WR(REG_DISC_STAT2, 0x80);
+		switch_mode(mhl_ctrl, POWER_STATE_D3);
+	}
+
+	if (status & BIT5) {
+		/* clr intr - reg int4 */
+		pr_debug("%s: mhl discon: int4 st=%02X\n", __func__,
+			 (int)status);
+		reg = MHL_SII_REG_NAME_RD(REG_INTR4);
+		MHL_SII_REG_NAME_WR(REG_INTR4, reg);
+		mhl_msm_disconnection(mhl_ctrl);
+	}
+
+	if ((mhl_ctrl->cur_state != POWER_STATE_D0_MHL) &&\
+	    (status & BIT6)) {
+		/* rgnd rdy Intr */
+		pr_debug("%s: rgnd ready intr\n", __func__);
+		switch_mode(mhl_ctrl, POWER_STATE_D0_MHL);
+		mhl_msm_read_rgnd_int(mhl_ctrl);
+	}
+
+	/* Can't succeed at these in D3 */
+	if ((mhl_ctrl->cur_state != POWER_STATE_D3) &&\
+	     (status & BIT4)) {
+		/* cbus lockout interrupt?
+		 * Hardware detection mechanism figures that
+		 * CBUS line is latched and raises this intr
+		 * where we force usb switch open and release
+		 */
+		pr_warn("%s: cbus locked out!\n", __func__);
+		force_usb_switch_open(mhl_ctrl);
+		release_usb_switch_open(mhl_ctrl);
+	}
+	MHL_SII_REG_NAME_WR(REG_INTR4, status);
+
+	return;
+}
+
+static void mhl_misc_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t intr_5_stat;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	/*
+	 * Clear INT 5
+	 * INTR5 is related to FIFO underflow/overflow reset
+	 * which is handled in 8334 by auto FIFO reset
+	 */
+	intr_5_stat = MHL_SII_REG_NAME_RD(REG_INTR5);
+	MHL_SII_REG_NAME_WR(REG_INTR5,  intr_5_stat);
+}
+
+
+static void mhl_hpd_stat_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t intr_1_stat;
+	uint8_t cbus_stat;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	/* INTR STATUS 1 */
+	intr_1_stat = MHL_SII_PAGE0_RD(0x0071);
+
+	if (!intr_1_stat)
+		return;
+
+	/* Clear interrupts */
+	MHL_SII_PAGE0_WR(0x0071, intr_1_stat);
+	if (BIT6 & intr_1_stat) {
+		/*
+		 * HPD status change event is pending
+		 * Read CBUS HPD status for this info
+		 * MSC REQ ABRT REASON
+		 */
+		cbus_stat = MHL_SII_CBUS_RD(0x0D);
+		if (BIT6 & cbus_stat)
+			mhl_drive_hpd(mhl_ctrl, HPD_UP);
+	}
+	return;
+}
+
+static void clear_all_intrs(struct i2c_client *client)
+{
+	uint8_t regval = 0x00;
+
+	pr_debug_intr("********* exiting isr mask check ?? *************\n");
+	pr_debug_intr("int1 mask = %02X\n",
+		(int) MHL_SII_REG_NAME_RD(REG_INTR1));
+	pr_debug_intr("int3 mask = %02X\n",
+		(int) MHL_SII_PAGE0_RD(0x0077));
+	pr_debug_intr("int4 mask = %02X\n",
+		(int) MHL_SII_REG_NAME_RD(REG_INTR4));
+	pr_debug_intr("int5 mask = %02X\n",
+		(int) MHL_SII_REG_NAME_RD(REG_INTR5));
+	pr_debug_intr("cbus1 mask = %02X\n",
+		(int) MHL_SII_CBUS_RD(0x0009));
+	pr_debug_intr("cbus2 mask = %02X\n",
+		(int) MHL_SII_CBUS_RD(0x001F));
+	pr_debug_intr("********* end of isr mask check *************\n");
+
+	regval = MHL_SII_REG_NAME_RD(REG_INTR1);
+	pr_debug_intr("int1 st = %02X\n", (int)regval);
+	MHL_SII_REG_NAME_WR(REG_INTR1, regval);
+
+	regval =  MHL_SII_REG_NAME_RD(REG_INTR2);
+	pr_debug_intr("int2 st = %02X\n", (int)regval);
+	MHL_SII_REG_NAME_WR(REG_INTR2, regval);
+
+	regval =  MHL_SII_PAGE0_RD(0x0073);
+	pr_debug_intr("int3 st = %02X\n", (int)regval);
+	MHL_SII_PAGE0_WR(0x0073, regval);
+
+	regval =  MHL_SII_REG_NAME_RD(REG_INTR4);
+	pr_debug_intr("int4 st = %02X\n", (int)regval);
+	MHL_SII_REG_NAME_WR(REG_INTR4, regval);
+
+	regval =  MHL_SII_REG_NAME_RD(REG_INTR5);
+	pr_debug_intr("int5 st = %02X\n", (int)regval);
+	MHL_SII_REG_NAME_WR(REG_INTR5, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x0008);
+	pr_debug_intr("cbusInt st = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x0008, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x001E);
+	pr_debug_intr("CBUS intR_2: %d\n", (int)regval);
+	MHL_SII_CBUS_WR(0x001E, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00A0);
+	pr_debug_intr("A0 int set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00A0, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00A1);
+	pr_debug_intr("A1 int set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00A1, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00A2);
+	pr_debug_intr("A2 int set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00A2, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00A3);
+	pr_debug_intr("A3 int set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00A3, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00B0);
+	pr_debug_intr("B0 st set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00B0, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00B1);
+	pr_debug_intr("B1 st set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00B1, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00B2);
+	pr_debug_intr("B2 st set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00B2, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00B3);
+	pr_debug_intr("B3 st set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00B3, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00E0);
+	pr_debug_intr("E0 st set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00E0, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00E1);
+	pr_debug_intr("E1 st set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00E1, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00E2);
+	pr_debug_intr("E2 st set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00E2, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00E3);
+	pr_debug_intr("E3 st set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00E3, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00F0);
+	pr_debug_intr("F0 int set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00F0, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00F1);
+	pr_debug_intr("F1 int set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00F1, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00F2);
+	pr_debug_intr("F2 int set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00F2, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00F3);
+	pr_debug_intr("F3 int set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00F3, regval);
+	pr_debug_intr("********* end of exiting in isr *************\n");
+}
+
+
+static irqreturn_t mhl_tx_isr(int irq, void *data)
+{
+	struct mhl_tx_ctrl *mhl_ctrl = (struct mhl_tx_ctrl *)data;
+	pr_debug("%s: Getting Interrupts\n", __func__);
+
+	/*
+	 * Check RGND, MHL_EST, CBUS_LOCKOUT, SCDT
+	 * interrupts. In D3, we get only RGND
+	 */
+	dev_detect_isr(mhl_ctrl);
+
+	pr_debug("%s: cur pwr state is [0x%x]\n",
+		 __func__, mhl_ctrl->cur_state);
+	if (mhl_ctrl->cur_state == POWER_STATE_D0_MHL) {
+		/*
+		 * If dev_detect_isr() didn't move the tx to D3
+		 * on disconnect, continue to check other
+		 * interrupt sources.
+		 */
+		mhl_misc_isr(mhl_ctrl);
+
+		/*
+		 * Check for any peer messages for DCAP_CHG etc
+		 * Dispatch to have the CBUS module working only
+		 * once connected.
+		mhl_cbus_isr(mhl_ctrl);
+		 */
+		mhl_hpd_stat_isr(mhl_ctrl);
+	}
+
+	clear_all_intrs(mhl_ctrl->i2c_handle);
+
+	return IRQ_HANDLED;
+}
+
+static int mhl_tx_chip_init(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t chip_rev_id = 0x00;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	/* Reset the TX chip */
+	mhl_sii_reset_pin(mhl_ctrl, 0);
+	msleep(20);
+	mhl_sii_reset_pin(mhl_ctrl, 1);
+	/* TX PR-guide requires a 100 ms wait here */
+	msleep(100);
+
+	/* Read the chip rev ID */
+	chip_rev_id = MHL_SII_PAGE0_RD(0x04);
+	pr_debug("MHL: chip rev ID read=[%x]\n", chip_rev_id);
+
+	/*
+	 * Need to disable MHL discovery if
+	 * MHL-USB handshake is implemented
+	 */
+	mhl_init_reg_settings(client, true);
+	return 0;
+}
+
+static int mhl_sii_reg_config(struct i2c_client *client, bool enable)
+{
+	static struct regulator *reg_8941_l24;
+	static struct regulator *reg_8941_l02;
+	int rc;
+
+	pr_debug("Inside %s\n", __func__);
+	if (!reg_8941_l24) {
+		reg_8941_l24 = regulator_get(&client->dev,
+			"avcc_18");
+		if (IS_ERR(reg_8941_l24)) {
+			pr_err("could not get reg_8038_l20, rc = %ld\n",
+				PTR_ERR(reg_8941_l24));
+			return -ENODEV;
+		}
+		if (enable)
+			rc = regulator_enable(reg_8941_l24);
+		else
+			rc = regulator_disable(reg_8941_l24);
+		if (rc) {
+			pr_err("'%s' regulator config[%u] failed, rc=%d\n",
+			       "avcc_1.8V", enable, rc);
+			return rc;
+		} else {
+			pr_debug("%s: vreg L24 %s\n",
+				 __func__, (enable ? "enabled" : "disabled"));
+		}
+	}
+
+	if (!reg_8941_l02) {
+		reg_8941_l02 = regulator_get(&client->dev,
+			"avcc_12");
+		if (IS_ERR(reg_8941_l02)) {
+			pr_err("could not get reg_8941_l02, rc = %ld\n",
+				PTR_ERR(reg_8941_l02));
+			return -ENODEV;
+		}
+		if (enable)
+			rc = regulator_enable(reg_8941_l02);
+		else
+			rc = regulator_disable(reg_8941_l02);
+		if (rc) {
+			pr_debug("'%s' regulator configure[%u] failed, rc=%d\n",
+				 "avcc_1.2V", enable, rc);
+			return rc;
+		} else {
+			pr_debug("%s: vreg L02 %s\n",
+				 __func__, (enable ? "enabled" : "disabled"));
+		}
+	}
+
+	return rc;
+}
+
+
+static int mhl_vreg_config(struct mhl_tx_ctrl *mhl_ctrl, uint8_t on)
+{
+	int ret;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+	int pwr_gpio = mhl_ctrl->pdata->gpios[MHL_TX_PMIC_PWR_GPIO]->gpio;
+
+	pr_debug("%s\n", __func__);
+	if (on) {
+		ret = gpio_request(pwr_gpio,
+		    mhl_ctrl->pdata->gpios[MHL_TX_PMIC_PWR_GPIO]->gpio_name);
+		if (ret < 0) {
+			pr_err("%s: mhl pwr gpio req failed: %d\n",
+			       __func__, ret);
+			return ret;
+		}
+		ret = gpio_direction_output(pwr_gpio, 1);
+		if (ret < 0) {
+			pr_err("%s: set gpio MHL_PWR_EN dircn failed: %d\n",
+			       __func__, ret);
+			return ret;
+		}
+
+		ret = mhl_sii_reg_config(client, true);
+		if (ret) {
+			pr_err("%s: regulator enable failed\n", __func__);
+			return -EINVAL;
+		}
+		pr_debug("%s: mhl sii power on successful\n", __func__);
+	} else {
+		pr_warn("%s: turning off pwr controls\n", __func__);
+		mhl_sii_reg_config(client, false);
+		gpio_free(pwr_gpio);
+	}
+	pr_debug("%s: successful\n", __func__);
+	return 0;
+}
+
+/*
+ * Request for GPIO allocations
+ * Set appropriate GPIO directions
+ */
+static int mhl_gpio_config(struct mhl_tx_ctrl *mhl_ctrl, int on)
+{
+	int ret;
+	struct dss_gpio *temp_reset_gpio, *temp_intr_gpio;
+
+	/* caused too many line spills */
+	temp_reset_gpio = mhl_ctrl->pdata->gpios[MHL_TX_RESET_GPIO];
+	temp_intr_gpio = mhl_ctrl->pdata->gpios[MHL_TX_INTR_GPIO];
+
+	if (on) {
+		if (gpio_is_valid(temp_reset_gpio->gpio)) {
+			ret = gpio_request(temp_reset_gpio->gpio,
+					   temp_reset_gpio->gpio_name);
+			if (ret < 0) {
+				pr_err("%s:rst_gpio=[%d] req failed:%d\n",
+				       __func__, temp_reset_gpio->gpio, ret);
+				return -EBUSY;
+			}
+			ret = gpio_direction_output(temp_reset_gpio->gpio, 0);
+			if (ret < 0) {
+				pr_err("%s: set dirn rst failed: %d\n",
+				       __func__, ret);
+				return -EBUSY;
+			}
+		}
+		if (gpio_is_valid(temp_intr_gpio->gpio)) {
+			ret = gpio_request(temp_intr_gpio->gpio,
+					   temp_intr_gpio->gpio_name);
+			if (ret < 0) {
+				pr_err("%s: intr_gpio req failed: %d\n",
+				       __func__, ret);
+				return -EBUSY;
+			}
+			ret = gpio_direction_input(temp_intr_gpio->gpio);
+			if (ret < 0) {
+				pr_err("%s: set dirn intr failed: %d\n",
+				       __func__, ret);
+				return -EBUSY;
+			}
+			mhl_ctrl->i2c_handle->irq = gpio_to_irq(
+				temp_intr_gpio->gpio);
+			pr_debug("%s: gpio_to_irq=%d\n",
+				 __func__, mhl_ctrl->i2c_handle->irq);
+		}
+	} else {
+		pr_warn("%s: freeing gpios\n", __func__);
+		gpio_free(temp_intr_gpio->gpio);
+		gpio_free(temp_reset_gpio->gpio);
+	}
+	pr_debug("%s: successful\n", __func__);
+	return 0;
+}
+
+static int mhl_i2c_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	int rc = 0;
+	struct mhl_tx_platform_data *pdata = NULL;
+	struct mhl_tx_ctrl *mhl_ctrl;
+
+	mhl_ctrl = devm_kzalloc(&client->dev, sizeof(*mhl_ctrl), GFP_KERNEL);
+	if (!mhl_ctrl) {
+		pr_err("%s: FAILED: cannot alloc hdmi tx ctrl\n", __func__);
+		rc = -ENOMEM;
+		goto failed_no_mem;
+	}
+
+	if (client->dev.of_node) {
+		pdata = devm_kzalloc(&client->dev,
+			     sizeof(struct mhl_tx_platform_data), GFP_KERNEL);
+		if (!pdata) {
+			dev_err(&client->dev, "Failed to allocate memory\n");
+			rc = -ENOMEM;
+			goto failed_no_mem;
+		}
+
+		rc = mhl_tx_get_dt_data(&client->dev, pdata);
+		if (rc) {
+			pr_err("%s: FAILED: parsing device tree data; rc=%d\n",
+				__func__, rc);
+			goto failed_dt_data;
+		}
+		mhl_ctrl->i2c_handle = client;
+		mhl_ctrl->pdata = pdata;
+		i2c_set_clientdata(client, mhl_ctrl);
+	}
+
+	/*
+	 * Regulator init
+	 */
+	rc = mhl_vreg_config(mhl_ctrl, 1);
+	if (rc) {
+		pr_err("%s: vreg init failed [%d]\n",
+			__func__, rc);
+		goto failed_probe;
+	}
+
+	/*
+	 * GPIO init
+	 */
+	rc = mhl_gpio_config(mhl_ctrl, 1);
+	if (rc) {
+		pr_err("%s: gpio init failed [%d]\n",
+			__func__, rc);
+		goto failed_probe;
+	}
+
+	/*
+	 * Other initializations
+	 * such tx specific
+	 */
+	rc = mhl_tx_chip_init(mhl_ctrl);
+	if (rc) {
+		pr_err("%s: tx chip init failed [%d]\n",
+			__func__, rc);
+		goto failed_probe;
+	}
+
+	pr_debug("%s: IRQ from GPIO INTR = %d\n",
+		__func__, mhl_ctrl->i2c_handle->irq);
+	pr_debug("%s: Driver name = [%s]\n", __func__,
+		client->dev.driver->name);
+	rc = request_threaded_irq(mhl_ctrl->i2c_handle->irq, NULL,
+				   &mhl_tx_isr,
+				  IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+				 client->dev.driver->name, mhl_ctrl);
+	if (rc) {
+		pr_err("request_threaded_irq failed, status: %d\n",
+			rc);
+		goto failed_probe;
+	} else {
+		pr_debug("request_threaded_irq succeeded\n");
+	}
+	pr_debug("%s: i2c client addr is [%x]\n", __func__, client->addr);
+	return 0;
+failed_probe:
+failed_dt_data:
+	if (pdata)
+		devm_kfree(&client->dev, pdata);
+failed_no_mem:
+	if (mhl_ctrl)
+		devm_kfree(&client->dev, mhl_ctrl);
+	pr_err("%s: PROBE FAILED, rc=%d\n", __func__, rc);
+	return rc;
+}
+
+
+static int mhl_i2c_remove(struct i2c_client *client)
+{
+	struct mhl_tx_ctrl *mhl_ctrl = i2c_get_clientdata(client);
+
+	if (!mhl_ctrl) {
+		pr_warn("%s: i2c get client data failed\n", __func__);
+		return -EINVAL;
+	}
+
+	free_irq(mhl_ctrl->i2c_handle->irq, mhl_ctrl);
+	mhl_gpio_config(mhl_ctrl, 0);
+	mhl_vreg_config(mhl_ctrl, 0);
+	if (mhl_ctrl->pdata)
+		devm_kfree(&client->dev, mhl_ctrl->pdata);
+	devm_kfree(&client->dev, mhl_ctrl);
+	return 0;
+}
+
+static struct i2c_device_id mhl_sii_i2c_id[] = {
+	{ MHL_DRIVER_NAME, 0 },
+	{ }
+};
+
+
+MODULE_DEVICE_TABLE(i2c, mhl_sii_i2c_id);
+
+static struct of_device_id mhl_match_table[] = {
+	{.compatible = COMPATIBLE_NAME,},
+	{ },
+};
+
+static struct i2c_driver mhl_sii_i2c_driver = {
+	.driver = {
+		.name = MHL_DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = mhl_match_table,
+	},
+	.probe = mhl_i2c_probe,
+	.remove =  mhl_i2c_remove,
+	.id_table = mhl_sii_i2c_id,
+};
+
+module_i2c_driver(mhl_sii_i2c_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHL SII 8334 TX Driver");
diff --git a/drivers/video/msm/mipi_novatek.c b/drivers/video/msm/mipi_novatek.c
index 69ca0a3..ecac82d 100644
--- a/drivers/video/msm/mipi_novatek.c
+++ b/drivers/video/msm/mipi_novatek.c
@@ -460,6 +460,12 @@
 {
 	struct dcs_cmd_req cmdreq;
 
+	if (mipi_novatek_pdata &&
+	    mipi_novatek_pdata->gpio_set_backlight) {
+		mipi_novatek_pdata->gpio_set_backlight(mfd->bl_level);
+		return;
+	}
+
 	if ((mipi_novatek_pdata->enable_wled_bl_ctrl)
 	    && (wled_trigger_initialized)) {
 		led_trigger_event(bkl_led_trigger, mfd->bl_level);
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 5189b6d..993ec01 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -532,6 +532,7 @@
 	mfd->suspend.sw_refreshing_enable = mfd->sw_refreshing_enable;
 	mfd->suspend.op_enable = mfd->op_enable;
 	mfd->suspend.panel_power_on = mfd->panel_power_on;
+	mfd->suspend.op_suspend = true;
 
 	if (mfd->op_enable) {
 		ret =
@@ -597,6 +598,8 @@
 			MSM_FB_INFO("msm_fb_resume: can't turn on display!\n");
 	}
 
+	mfd->suspend.op_suspend = false;
+
 	return ret;
 }
 #endif
@@ -2918,17 +2921,9 @@
 	return ret;
 }
 
-static int msmfb_overlay_commit(struct fb_info *info, unsigned long *argp)
+static int msmfb_overlay_commit(struct fb_info *info)
 {
-	int ret, ndx;
-
-	ret = copy_from_user(&ndx, argp, sizeof(ndx));
-	if (ret) {
-		pr_err("%s: ioctl failed\n", __func__);
-		return ret;
-	}
-
-	return mdp4_overlay_commit(info, ndx);
+	return mdp4_overlay_commit(info);
 }
 
 static int msmfb_overlay_play(struct fb_info *info, unsigned long *argp)
@@ -3362,7 +3357,7 @@
 		break;
 	case MSMFB_OVERLAY_COMMIT:
 		down(&msm_fb_ioctl_ppp_sem);
-		ret = msmfb_overlay_commit(info, argp);
+		ret = msmfb_overlay_commit(info);
 		up(&msm_fb_ioctl_ppp_sem);
 		break;
 	case MSMFB_OVERLAY_PLAY:
diff --git a/drivers/video/msm/msm_fb.h b/drivers/video/msm/msm_fb.h
index 7d8ad73..9c4f3d3 100644
--- a/drivers/video/msm/msm_fb.h
+++ b/drivers/video/msm/msm_fb.h
@@ -53,6 +53,7 @@
 	boolean op_enable;
 	boolean sw_refreshing_enable;
 	boolean panel_power_on;
+	boolean op_suspend;
 };
 
 struct msmfb_writeback_data_list {
@@ -125,6 +126,7 @@
 	__u32 channel_irq;
 
 	struct mdp_dma_data *dma;
+	struct device_attribute dev_attr;
 	void (*dma_fnc) (struct msm_fb_data_type *mfd);
 	int (*cursor_update) (struct fb_info *info,
 			      struct fb_cursor *cursor);
@@ -135,6 +137,8 @@
 	int (*start_histogram) (struct mdp_histogram_start_req *req);
 	int (*stop_histogram) (struct fb_info *info, uint32_t block);
 	void (*vsync_ctrl) (int enable);
+	void (*vsync_init) (int cndx);
+	void *vsync_show;
 	void *cursor_buf;
 	void *cursor_buf_phys;
 
@@ -190,6 +194,7 @@
 	unsigned char *copy_splash_phys;
 	void *cpu_pm_hdl;
 	u32 avtimer_phy;
+	int vsync_sysfs_created;
 };
 
 struct dentry *msm_fb_get_debugfs_root(void);
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c
index d5b195d..a82feb9 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c
@@ -198,7 +198,8 @@
 		ddl->client_state = DDL_CLIENT_OPEN;
 		ddl->codec_data.hdr.decoding = decoding;
 		ddl->decoding = decoding;
-		ddl_set_default_meta_data_hdr(ddl);
+		if (!res_trk_check_for_sec_session())
+			ddl_set_default_meta_data_hdr(ddl);
 		ddl_set_initial_default_values(ddl);
 		*ddl_handle	= (u32 *) ddl;
 	} else {
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
index a9709fb..b84ae44 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_sub.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
@@ -784,7 +784,11 @@
 		buf_pool->allocated--;
 	}
 
-	memset(buf_entry, 0, sizeof(struct vcd_buffer_entry));
+	buf_entry->valid = buf_entry->allocated = buf_entry->in_use = 0;
+	buf_entry->alloc = buf_entry->virtual = buf_entry->physical = NULL;
+	buf_entry->sz = 0;
+	memset(&buf_entry->frame, 0, sizeof(struct vcd_frame_data));
+
 	buf_pool->validated--;
 	if (buf_pool->validated == 0)
 		vcd_free_buffer_pool_entries(buf_pool);
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index bb5f394..43daaf2 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -43,6 +43,7 @@
 #define DIAG_IOCTL_DCI_REG		23
 #define DIAG_IOCTL_DCI_STREAM_INIT	24
 #define DIAG_IOCTL_DCI_HEALTH_STATS	25
+#define DIAG_IOCTL_REMOTE_DEV		32
 
 /* PC Tools IDs */
 #define APQ8060_TOOLS_ID	4062
@@ -674,18 +675,18 @@
 };
 
 static const uint32_t msg_bld_masks_22[] = {
-	MSG_LVL_HIGH,
-	MSG_LVL_HIGH,
-	MSG_LVL_HIGH,
-	MSG_LVL_HIGH,
-	MSG_LVL_HIGH,
-	MSG_LVL_HIGH,
-	MSG_LVL_HIGH,
-	MSG_LVL_HIGH,
-	MSG_LVL_HIGH,
-	MSG_LVL_HIGH,
-	MSG_LVL_HIGH,
-	MSG_LVL_HIGH
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
 };
 
 /* LOG CODES */
diff --git a/include/linux/i2c/ti_drv2667.h b/include/linux/i2c/ti_drv2667.h
new file mode 100644
index 0000000..0ae0e5e
--- /dev/null
+++ b/include/linux/i2c/ti_drv2667.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __TI_DRV2667__
+
+#define DRV2667_WAV_SEQ_LEN	11
+
+struct drv2667_pdata {
+	const char *name;
+	u8 mode;
+	/* support one waveform for now */
+	u8 wav_seq[DRV2667_WAV_SEQ_LEN];
+	u8 gain;
+	u8 idle_timeout_ms;
+	u32 max_runtime_ms;
+};
+#endif
diff --git a/include/linux/mfd/pm8xxx/batterydata-lib.h b/include/linux/mfd/pm8xxx/batterydata-lib.h
index c55e47e..afa1843 100644
--- a/include/linux/mfd/pm8xxx/batterydata-lib.h
+++ b/include/linux/mfd/pm8xxx/batterydata-lib.h
@@ -102,7 +102,8 @@
 };
 
 #if defined(CONFIG_PM8921_BMS) || \
-	defined(CONFIG_PM8921_BMS_MODULE)
+	defined(CONFIG_PM8921_BMS_MODULE) || \
+	defined(CONFIG_QPNP_BMS)
 extern struct bms_battery_data  palladium_1500_data;
 extern struct bms_battery_data  desay_5200_data;
 
diff --git a/include/linux/mfd/pm8xxx/ccadc.h b/include/linux/mfd/pm8xxx/ccadc.h
index 29f7a62..fc31f89 100644
--- a/include/linux/mfd/pm8xxx/ccadc.h
+++ b/include/linux/mfd/pm8xxx/ccadc.h
@@ -19,11 +19,11 @@
 
 /**
  * struct pm8xxx_ccadc_platform_data -
- * @r_sense:		sense resistor value in (mOhms)
+ * @r_sense_uohm:		sense resistor value in (micro Ohms)
  * @calib_delay_ms:	how often should the adc calculate gain and offset
  */
 struct pm8xxx_ccadc_platform_data {
-	int		r_sense;
+	int		r_sense_uohm;
 	unsigned int	calib_delay_ms;
 };
 
diff --git a/include/linux/mfd/pm8xxx/pm8921-bms.h b/include/linux/mfd/pm8xxx/pm8921-bms.h
index ba70c96..82ec57d 100644
--- a/include/linux/mfd/pm8xxx/pm8921-bms.h
+++ b/include/linux/mfd/pm8xxx/pm8921-bms.h
@@ -30,7 +30,7 @@
 /**
  * struct pm8921_bms_platform_data -
  * @batt_type:		allows to force chose battery calibration data
- * @r_sense:		sense resistor value in (mOhms)
+ * @r_sense_uohm:	sense resistor value in (micro Ohms)
  * @i_test:		current at which the unusable charger cutoff is to be
  *			calculated or the peak system current (mA)
  * @v_cutoff:		the loaded voltage at which the battery
@@ -41,7 +41,7 @@
 struct pm8921_bms_platform_data {
 	struct pm8xxx_bms_core_data	bms_cdata;
 	enum battery_type		battery_type;
-	unsigned int			r_sense;
+	int				r_sense_uohm;
 	unsigned int			i_test;
 	unsigned int			v_cutoff;
 	unsigned int			max_voltage_uv;
diff --git a/include/linux/mfd/wcd9xxx/core.h b/include/linux/mfd/wcd9xxx/core.h
index 2dea611..4e9e1ce 100644
--- a/include/linux/mfd/wcd9xxx/core.h
+++ b/include/linux/mfd/wcd9xxx/core.h
@@ -131,7 +131,7 @@
 	u32 bit_width;				/* sit width 16,24,32   */
 	struct list_head wcd9xxx_ch_list;	/* channel list         */
 	u16 grph;				/* slimbus group handle */
-	u32 ch_mask;
+	unsigned long ch_mask;
 	wait_queue_head_t dai_wait;
 };
 
diff --git a/include/linux/mfd/wcd9xxx/wcd9320_registers.h b/include/linux/mfd/wcd9xxx/wcd9320_registers.h
index 4b8626a..f9966be 100644
--- a/include/linux/mfd/wcd9xxx/wcd9320_registers.h
+++ b/include/linux/mfd/wcd9xxx/wcd9320_registers.h
@@ -1334,9 +1334,16 @@
 
 /* SLIMBUS Slave Registers */
 #define TAIKO_SLIM_PGD_PORT_INT_EN0                     (0x30)
-#define TAIKO_SLIM_PGD_PORT_INT_STATUS0                 (0x34)
-#define TAIKO_SLIM_PGD_PORT_INT_CLR0                    (0x38)
-#define TAIKO_SLIM_PGD_PORT_INT_SOURCE0			(0x60)
+#define TAIKO_SLIM_PGD_PORT_INT_STATUS_RX_0             (0x34)
+#define TAIKO_SLIM_PGD_PORT_INT_STATUS_RX_1             (0x35)
+#define TAIKO_SLIM_PGD_PORT_INT_STATUS_TX_0             (0x36)
+#define TAIKO_SLIM_PGD_PORT_INT_STATUS_TX_1             (0x37)
+#define TAIKO_SLIM_PGD_PORT_INT_CLR_RX_0                (0x38)
+#define TAIKO_SLIM_PGD_PORT_INT_CLR_RX_1                (0x39)
+#define TAIKO_SLIM_PGD_PORT_INT_CLR_TX_0                (0x3A)
+#define TAIKO_SLIM_PGD_PORT_INT_CLR_TX_1                (0x3B)
+#define TAIKO_SLIM_PGD_PORT_INT_RX_SOURCE0		(0x60)
+#define TAIKO_SLIM_PGD_PORT_INT_TX_SOURCE0		(0x70)
 
 /* Macros for Packing Register Writes into a U32 */
 #define TAIKO_PACKED_REG_SIZE sizeof(u32)
diff --git a/include/linux/mhl_8334.h b/include/linux/mhl_8334.h
index cb9d7fa..c9f57c5 100644
--- a/include/linux/mhl_8334.h
+++ b/include/linux/mhl_8334.h
@@ -130,4 +130,162 @@
 	DEV_PAGE_DDC_SEGM   = (0x60),
 };
 
+#define MHL_SII_PAGE0_RD(off) \
+	mhl_i2c_reg_read(client, TX_PAGE_L0, off)
+#define MHL_SII_PAGE0_WR(off, val) \
+	mhl_i2c_reg_write(client, TX_PAGE_L0, off, val)
+#define MHL_SII_PAGE0_MOD(off, mask, val)		\
+	mhl_i2c_reg_modify(client, TX_PAGE_L0, off, mask, val)
+
+
+#define MHL_SII_PAGE1_RD(off) \
+	mhl_i2c_reg_read(client, TX_PAGE_L1, off)
+#define MHL_SII_PAGE1_WR(off, val) \
+	mhl_i2c_reg_write(client, TX_PAGE_L1, off, val)
+#define MHL_SII_PAGE1_MOD(off, mask, val) \
+	mhl_i2c_reg_modify(client, TX_PAGE_L1, off, mask, val)
+
+
+#define MHL_SII_PAGE2_RD(off) \
+	mhl_i2c_reg_read(client, TX_PAGE_2, off)
+#define MHL_SII_PAGE2_WR(off, val) \
+	mhl_i2c_reg_write(client, TX_PAGE_2, off, val)
+#define MHL_SII_PAGE2_MOD(off, mask, val) \
+	mhl_i2c_reg_modify(client, TX_PAGE_2, off, mask, val)
+
+
+#define MHL_SII_PAGE3_RD(off) \
+	mhl_i2c_reg_read(client, TX_PAGE_3, off)
+#define MHL_SII_PAGE3_WR(off, val) \
+	mhl_i2c_reg_write(client, TX_PAGE_3, off, val)
+#define MHL_SII_PAGE3_MOD(off, mask, val)		\
+	mhl_i2c_reg_modify(client, TX_PAGE_3, off, mask, val)
+
+#define MHL_SII_CBUS_RD(off) \
+	mhl_i2c_reg_read(client, TX_PAGE_CBUS, off)
+#define MHL_SII_CBUS_WR(off, val) \
+	mhl_i2c_reg_write(client, TX_PAGE_CBUS, off, val)
+#define MHL_SII_CBUS_MOD(off, mask, val) \
+	mhl_i2c_reg_modify(client, TX_PAGE_CBUS, off, mask, val)
+
+#define REG_SRST        ((TX_PAGE_3 << 16) | 0x0000)
+#define REG_INTR1       ((TX_PAGE_L0 << 16) | 0x0071)
+#define REG_INTR1_MASK  ((TX_PAGE_L0 << 16) | 0x0075)
+#define REG_INTR2       ((TX_PAGE_L0 << 16) | 0x0072)
+#define REG_TMDS_CCTRL  ((TX_PAGE_L0 << 16) | 0x0080)
+
+#define REG_DISC_CTRL1	((TX_PAGE_3 << 16) | 0x0010)
+#define REG_DISC_CTRL2	((TX_PAGE_3 << 16) | 0x0011)
+#define REG_DISC_CTRL3	((TX_PAGE_3 << 16) | 0x0012)
+#define REG_DISC_CTRL4	((TX_PAGE_3 << 16) | 0x0013)
+#define REG_DISC_CTRL5	((TX_PAGE_3 << 16) | 0x0014)
+#define REG_DISC_CTRL6	((TX_PAGE_3 << 16) | 0x0015)
+#define REG_DISC_CTRL7	((TX_PAGE_3 << 16) | 0x0016)
+#define REG_DISC_CTRL8	((TX_PAGE_3 << 16) | 0x0017)
+#define REG_DISC_CTRL9	((TX_PAGE_3 << 16) | 0x0018)
+#define REG_DISC_CTRL10	((TX_PAGE_3 << 16) | 0x0019)
+#define REG_DISC_CTRL11	((TX_PAGE_3 << 16) | 0x001A)
+#define REG_DISC_STAT	((TX_PAGE_3 << 16) | 0x001B)
+#define REG_DISC_STAT2	((TX_PAGE_3 << 16) | 0x001C)
+
+#define REG_INT_CTRL	((TX_PAGE_3 << 16) | 0x0020)
+#define REG_INTR4		((TX_PAGE_3 << 16) | 0x0021)
+#define REG_INTR4_MASK	((TX_PAGE_3 << 16) | 0x0022)
+#define REG_INTR5		((TX_PAGE_3 << 16) | 0x0023)
+#define REG_INTR5_MASK	((TX_PAGE_3 << 16) | 0x0024)
+
+#define REG_MHLTX_CTL1	((TX_PAGE_3 << 16) | 0x0030)
+#define REG_MHLTX_CTL2	((TX_PAGE_3 << 16) | 0x0031)
+#define REG_MHLTX_CTL3	((TX_PAGE_3 << 16) | 0x0032)
+#define REG_MHLTX_CTL4	((TX_PAGE_3 << 16) | 0x0033)
+#define REG_MHLTX_CTL5	((TX_PAGE_3 << 16) | 0x0034)
+#define REG_MHLTX_CTL6	((TX_PAGE_3 << 16) | 0x0035)
+#define REG_MHLTX_CTL7	((TX_PAGE_3 << 16) | 0x0036)
+#define REG_MHLTX_CTL8	((TX_PAGE_3 << 16) | 0x0037)
+
+#define REG_TMDS_CSTAT	((TX_PAGE_3 << 16) | 0x0040)
+
+#define REG_CBUS_INTR_ENABLE            ((TX_PAGE_CBUS << 16) | 0x0009)
+
+#define REG_DDC_ABORT_REASON            ((TX_PAGE_CBUS << 16) | 0x000B)
+#define REG_CBUS_BUS_STATUS             ((TX_PAGE_CBUS << 16) | 0x000A)
+#define REG_PRI_XFR_ABORT_REASON        ((TX_PAGE_CBUS << 16) | 0x000D)
+#define REG_CBUS_PRI_FWR_ABORT_REASON   ((TX_PAGE_CBUS << 16) | 0x000E)
+#define REG_CBUS_PRI_START              ((TX_PAGE_CBUS << 16) | 0x0012)
+#define REG_CBUS_PRI_ADDR_CMD           ((TX_PAGE_CBUS << 16) | 0x0013)
+#define REG_CBUS_PRI_WR_DATA_1ST        ((TX_PAGE_CBUS << 16) | 0x0014)
+#define REG_CBUS_PRI_WR_DATA_2ND        ((TX_PAGE_CBUS << 16) | 0x0015)
+#define REG_CBUS_PRI_RD_DATA_1ST        ((TX_PAGE_CBUS << 16) | 0x0016)
+#define REG_CBUS_PRI_RD_DATA_2ND        ((TX_PAGE_CBUS << 16) | 0x0017)
+#define REG_CBUS_PRI_VS_CMD             ((TX_PAGE_CBUS << 16) | 0x0018)
+#define REG_CBUS_PRI_VS_DATA            ((TX_PAGE_CBUS << 16) | 0x0019)
+#define	REG_CBUS_MSC_RETRY_INTERVAL		((TX_PAGE_CBUS << 16) | 0x001A)
+#define	REG_CBUS_DDC_FAIL_LIMIT			((TX_PAGE_CBUS << 16) | 0x001C)
+#define	REG_CBUS_MSC_FAIL_LIMIT			((TX_PAGE_CBUS << 16) | 0x001D)
+#define	REG_CBUS_MSC_INT2_STATUS        ((TX_PAGE_CBUS << 16) | 0x001E)
+#define REG_CBUS_MSC_INT2_ENABLE        ((TX_PAGE_CBUS << 16) | 0x001F)
+#define	REG_MSC_WRITE_BURST_LEN         ((TX_PAGE_CBUS << 16) | 0x0020)
+#define	REG_MSC_HEARTBEAT_CONTROL       ((TX_PAGE_CBUS << 16) | 0x0021)
+#define REG_MSC_TIMEOUT_LIMIT           ((TX_PAGE_CBUS << 16) | 0x0022)
+#define	REG_CBUS_LINK_CONTROL_1			((TX_PAGE_CBUS << 16) | 0x0030)
+#define	REG_CBUS_LINK_CONTROL_2			((TX_PAGE_CBUS << 16) | 0x0031)
+#define	REG_CBUS_LINK_CONTROL_3			((TX_PAGE_CBUS << 16) | 0x0032)
+#define	REG_CBUS_LINK_CONTROL_4			((TX_PAGE_CBUS << 16) | 0x0033)
+#define	REG_CBUS_LINK_CONTROL_5			((TX_PAGE_CBUS << 16) | 0x0034)
+#define	REG_CBUS_LINK_CONTROL_6			((TX_PAGE_CBUS << 16) | 0x0035)
+#define	REG_CBUS_LINK_CONTROL_7			((TX_PAGE_CBUS << 16) | 0x0036)
+#define REG_CBUS_LINK_STATUS_1          ((TX_PAGE_CBUS << 16) | 0x0037)
+#define REG_CBUS_LINK_STATUS_2          ((TX_PAGE_CBUS << 16) | 0x0038)
+#define	REG_CBUS_LINK_CONTROL_8			((TX_PAGE_CBUS << 16) | 0x0039)
+#define	REG_CBUS_LINK_CONTROL_9			((TX_PAGE_CBUS << 16) | 0x003A)
+#define	REG_CBUS_LINK_CONTROL_10		((TX_PAGE_CBUS << 16) | 0x003B)
+#define	REG_CBUS_LINK_CONTROL_11		((TX_PAGE_CBUS << 16) | 0x003C)
+#define	REG_CBUS_LINK_CONTROL_12		((TX_PAGE_CBUS << 16) | 0x003D)
+
+
+#define REG_CBUS_LINK_CTRL9_0			((TX_PAGE_CBUS << 16) | 0x003A)
+#define REG_CBUS_LINK_CTRL9_1           ((TX_PAGE_CBUS << 16) | 0x00BA)
+
+#define	REG_CBUS_DRV_STRENGTH_0			((TX_PAGE_CBUS << 16) | 0x0040)
+#define	REG_CBUS_DRV_STRENGTH_1			((TX_PAGE_CBUS << 16) | 0x0041)
+#define	REG_CBUS_ACK_CONTROL			((TX_PAGE_CBUS << 16) | 0x0042)
+#define	REG_CBUS_CAL_CONTROL			((TX_PAGE_CBUS << 16) | 0x0043)
+
+#define REG_CBUS_SCRATCHPAD_0           ((TX_PAGE_CBUS << 16) | 0x00C0)
+#define REG_CBUS_DEVICE_CAP_0           ((TX_PAGE_CBUS << 16) | 0x0080)
+#define REG_CBUS_DEVICE_CAP_1           ((TX_PAGE_CBUS << 16) | 0x0081)
+#define REG_CBUS_DEVICE_CAP_2           ((TX_PAGE_CBUS << 16) | 0x0082)
+#define REG_CBUS_DEVICE_CAP_3           ((TX_PAGE_CBUS << 16) | 0x0083)
+#define REG_CBUS_DEVICE_CAP_4           ((TX_PAGE_CBUS << 16) | 0x0084)
+#define REG_CBUS_DEVICE_CAP_5           ((TX_PAGE_CBUS << 16) | 0x0085)
+#define REG_CBUS_DEVICE_CAP_6           ((TX_PAGE_CBUS << 16) | 0x0086)
+#define REG_CBUS_DEVICE_CAP_7           ((TX_PAGE_CBUS << 16) | 0x0087)
+#define REG_CBUS_DEVICE_CAP_8           ((TX_PAGE_CBUS << 16) | 0x0088)
+#define REG_CBUS_DEVICE_CAP_9           ((TX_PAGE_CBUS << 16) | 0x0089)
+#define REG_CBUS_DEVICE_CAP_A           ((TX_PAGE_CBUS << 16) | 0x008A)
+#define REG_CBUS_DEVICE_CAP_B           ((TX_PAGE_CBUS << 16) | 0x008B)
+#define REG_CBUS_DEVICE_CAP_C           ((TX_PAGE_CBUS << 16) | 0x008C)
+#define REG_CBUS_DEVICE_CAP_D           ((TX_PAGE_CBUS << 16) | 0x008D)
+#define REG_CBUS_DEVICE_CAP_E           ((TX_PAGE_CBUS << 16) | 0x008E)
+#define REG_CBUS_DEVICE_CAP_F           ((TX_PAGE_CBUS << 16) | 0x008F)
+#define REG_CBUS_SET_INT_0              ((TX_PAGE_CBUS << 16) | 0x00A0)
+#define REG_CBUS_SET_INT_1		((TX_PAGE_CBUS << 16) | 0x00A1)
+#define REG_CBUS_SET_INT_2		((TX_PAGE_CBUS << 16) | 0x00A2)
+#define REG_CBUS_SET_INT_3		((TX_PAGE_CBUS << 16) | 0x00A3)
+#define REG_CBUS_WRITE_STAT_0           ((TX_PAGE_CBUS << 16) | 0x00B0)
+#define REG_CBUS_WRITE_STAT_1           ((TX_PAGE_CBUS << 16) | 0x00B1)
+#define REG_CBUS_WRITE_STAT_2           ((TX_PAGE_CBUS << 16) | 0x00B2)
+#define REG_CBUS_WRITE_STAT_3           ((TX_PAGE_CBUS << 16) | 0x00B3)
+
+#define GET_PAGE(x) (x >> 16)
+#define GET_OFF(x) (x & 0xffff)
+
+
+#define MHL_SII_REG_NAME_RD(arg)\
+	mhl_i2c_reg_read(client, GET_PAGE(arg), GET_OFF(arg))
+#define MHL_SII_REG_NAME_WR(arg, val)\
+	mhl_i2c_reg_write(client, GET_PAGE(arg), GET_OFF(arg), val)
+#define MHL_SII_REG_NAME_MOD(arg, mask, val)\
+	mhl_i2c_reg_modify(client, GET_PAGE(arg), GET_OFF(arg), mask, val)
+
 #endif /* __MHL_MSM_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ddfb7c5..48268f0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1561,6 +1561,32 @@
 #define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
 #endif	/* __HAVE_ARCH_GATE_AREA */
 
+#ifdef CONFIG_USE_USER_ACCESSIBLE_TIMERS
+static inline int use_user_accessible_timers(void) { return 1; }
+extern int in_user_timers_area(struct mm_struct *mm, unsigned long addr);
+extern struct vm_area_struct *get_user_timers_vma(struct mm_struct *mm);
+extern int get_user_timer_page(struct vm_area_struct *vma,
+	struct mm_struct *mm, unsigned long start, unsigned int gup_flags,
+	struct page **pages, int idx, int *goto_next_page);
+#else
+static inline int use_user_accessible_timers(void) { return 0; }
+static inline int in_user_timers_area(struct mm_struct *mm, unsigned long addr)
+{
+	return 0;
+}
+static inline struct vm_area_struct *get_user_timers_vma(struct mm_struct *mm)
+{
+	return NULL;
+}
+static inline int get_user_timer_page(struct vm_area_struct *vma,
+	struct mm_struct *mm, unsigned long start, unsigned int gup_flags,
+	struct page **pages, int idx, int *goto_next_page)
+{
+	*goto_next_page = 0;
+	return 0;
+}
+#endif
+
 int drop_caches_sysctl_handler(struct ctl_table *, int,
 					void __user *, size_t *, loff_t *);
 unsigned long shrink_slab(struct shrink_control *shrink,
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index e9051e1..c5b492b 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -91,6 +91,7 @@
 	unsigned int quirks2;	/* More deviations from spec. */
 
 #define SDHCI_QUIRK2_HOST_OFF_CARD_ON			(1<<0)
+#define SDHCI_QUIRK2_OWN_CARD_DETECTION			(1<<1)
 
 	int irq;		/* Device IRQ */
 	void __iomem *ioaddr;	/* Mapped address */
diff --git a/include/linux/msm_ipa.h b/include/linux/msm_ipa.h
new file mode 100644
index 0000000..613cd9f
--- /dev/null
+++ b/include/linux/msm_ipa.h
@@ -0,0 +1,714 @@
+#ifndef _MSM_IPA_H_
+#define _MSM_IPA_H_
+
+#ifndef __KERNEL__
+#include <stdint.h>
+#include <stddef.h>
+#include <sys/stat.h>
+#endif
+#include <linux/ioctl.h>
+
+/**
+ * unique magic number of the IPA device
+ */
+#define IPA_IOC_MAGIC 0xCF
+
+/**
+ * name of the default routing tables for v4 and v6
+ */
+#define IPA_DFLT_RT_TBL_NAME "ipa_dflt_rt"
+
+/**
+ *   the commands supported by IPA driver
+ */
+#define IPA_IOCTL_ADD_HDR            0
+#define IPA_IOCTL_DEL_HDR            1
+#define IPA_IOCTL_ADD_RT_RULE        2
+#define IPA_IOCTL_DEL_RT_RULE        3
+#define IPA_IOCTL_ADD_FLT_RULE       4
+#define IPA_IOCTL_DEL_FLT_RULE       5
+#define IPA_IOCTL_COMMIT_HDR         6
+#define IPA_IOCTL_RESET_HDR          7
+#define IPA_IOCTL_COMMIT_RT          8
+#define IPA_IOCTL_RESET_RT           9
+#define IPA_IOCTL_COMMIT_FLT        10
+#define IPA_IOCTL_RESET_FLT         11
+#define IPA_IOCTL_DUMP              12
+#define IPA_IOCTL_GET_RT_TBL        13
+#define IPA_IOCTL_PUT_RT_TBL        14
+#define IPA_IOCTL_COPY_HDR          15
+#define IPA_IOCTL_QUERY_INTF        16
+#define IPA_IOCTL_QUERY_INTF_TX_PROPS 17
+#define IPA_IOCTL_QUERY_INTF_RX_PROPS 18
+#define IPA_IOCTL_GET_HDR           19
+#define IPA_IOCTL_PUT_HDR           20
+#define IPA_IOCTL_SET_FLT        21
+#define IPA_IOCTL_ALLOC_NAT_MEM  22
+#define IPA_IOCTL_V4_INIT_NAT    23
+#define IPA_IOCTL_NAT_DMA        24
+#define IPA_IOCTL_V4_DEL_NAT     26
+#define IPA_IOCTL_GET_ASYNC_MSG  27
+#define IPA_IOCTL_GET_NAT_OFFSET 28
+#define IPA_IOCTL_MAX            29
+
+/**
+ * max size of the header to be inserted
+ */
+#define IPA_HDR_MAX_SIZE 64
+
+/**
+ * max size of the name of the resource (routing table, header)
+ */
+#define IPA_RESOURCE_NAME_MAX 20
+
+/**
+ * the attributes of the rule (routing or filtering)
+ */
+#define IPA_FLT_TOS            (1ul << 0)
+#define IPA_FLT_PROTOCOL       (1ul << 1)
+#define IPA_FLT_SRC_ADDR       (1ul << 2)
+#define IPA_FLT_DST_ADDR       (1ul << 3)
+#define IPA_FLT_SRC_PORT_RANGE (1ul << 4)
+#define IPA_FLT_DST_PORT_RANGE (1ul << 5)
+#define IPA_FLT_TYPE           (1ul << 6)
+#define IPA_FLT_CODE           (1ul << 7)
+#define IPA_FLT_SPI            (1ul << 8)
+#define IPA_FLT_SRC_PORT       (1ul << 9)
+#define IPA_FLT_DST_PORT       (1ul << 10)
+#define IPA_FLT_TC             (1ul << 11)
+#define IPA_FLT_FLOW_LABEL     (1ul << 12)
+#define IPA_FLT_NEXT_HDR       (1ul << 13)
+#define IPA_FLT_META_DATA      (1ul << 14)
+#define IPA_FLT_FRAGMENT       (1ul << 15)
+
+/**
+ * enum ipa_client_type - names for the various IPA "clients"
+ * these are from the perspective of the clients, for e.g.
+ * HSIC1_PROD means HSIC client is the producer and IPA is the
+ * consumer
+ */
+enum ipa_client_type {
+	IPA_CLIENT_PROD,
+	IPA_CLIENT_HSIC1_PROD = IPA_CLIENT_PROD,
+	IPA_CLIENT_HSIC2_PROD,
+	IPA_CLIENT_HSIC3_PROD,
+	IPA_CLIENT_HSIC4_PROD,
+	IPA_CLIENT_HSIC5_PROD,
+	IPA_CLIENT_USB_PROD,
+	IPA_CLIENT_A5_WLAN_AMPDU_PROD,
+	IPA_CLIENT_A2_EMBEDDED_PROD,
+	IPA_CLIENT_A2_TETHERED_PROD,
+	IPA_CLIENT_A5_LAN_WAN_PROD,
+	IPA_CLIENT_A5_CMD_PROD,
+	IPA_CLIENT_Q6_LAN_PROD,
+
+	IPA_CLIENT_CONS,
+	IPA_CLIENT_HSIC1_CONS = IPA_CLIENT_CONS,
+	IPA_CLIENT_HSIC2_CONS,
+	IPA_CLIENT_HSIC3_CONS,
+	IPA_CLIENT_HSIC4_CONS,
+	IPA_CLIENT_HSIC5_CONS,
+	IPA_CLIENT_USB_CONS,
+	IPA_CLIENT_A2_EMBEDDED_CONS,
+	IPA_CLIENT_A2_TETHERED_CONS,
+	IPA_CLIENT_A5_LAN_WAN_CONS,
+	IPA_CLIENT_Q6_LAN_CONS,
+
+	IPA_CLIENT_MAX,
+};
+
+/**
+ * enum ipa_ip_type - Address family: IPv4 or IPv6
+ */
+enum ipa_ip_type {
+	IPA_IP_v4,
+	IPA_IP_v6,
+	IPA_IP_MAX
+};
+
+/**
+ * enum ipa_flt_action - action field of filtering rule
+ *
+ * Pass to routing: 5'd0
+ * Pass to source NAT: 5'd1
+ * Pass to destination NAT: 5'd2
+ * Pass to default output pipe (e.g., A5): 5'd3
+ */
+enum ipa_flt_action {
+	IPA_PASS_TO_ROUTING,
+	IPA_PASS_TO_SRC_NAT,
+	IPA_PASS_TO_DST_NAT,
+	IPA_PASS_TO_EXCEPTION
+};
+
+/**
+ * struct ipa_rule_attrib - attributes of a routing/filtering
+ * rule, all in LE
+ * @attrib_mask: what attributes are valid
+ * @src_port_lo: low port of src port range
+ * @src_port_hi: high port of src port range
+ * @dst_port_lo: low port of dst port range
+ * @dst_port_hi: high port of dst port range
+ * @type: ICMP/IGMP type
+ * @code: ICMP/IGMP code
+ * @spi: IPSec SPI
+ * @src_port: exact src port
+ * @dst_port: exact dst port
+ * @meta_data: meta-data val
+ * @meta_data_mask: meta-data mask
+ * @u.v4.tos: type of service
+ * @u.v4.protocol: protocol
+ * @u.v4.src_addr: src address value
+ * @u.v4.src_addr_mask: src address mask
+ * @u.v4.dst_addr: dst address value
+ * @u.v4.dst_addr_mask: dst address mask
+ * @u.v6.tc: traffic class
+ * @u.v6.flow_label: flow label
+ * @u.v6.next_hdr: next header
+ * @u.v6.src_addr: src address val
+ * @u.v6.src_addr_mask: src address mask
+ * @u.v6.dst_addr: dst address val
+ * @u.v6.dst_addr_mask: dst address mask
+ */
+struct ipa_rule_attrib {
+	uint32_t attrib_mask;
+	uint16_t src_port_lo;
+	uint16_t src_port_hi;
+	uint16_t dst_port_lo;
+	uint16_t dst_port_hi;
+	uint8_t type;
+	uint8_t code;
+	uint32_t spi;
+	uint16_t src_port;
+	uint16_t dst_port;
+	uint32_t meta_data;
+	uint32_t meta_data_mask;
+	union {
+		struct {
+			uint8_t tos;
+			uint8_t protocol;
+			uint32_t src_addr;
+			uint32_t src_addr_mask;
+			uint32_t dst_addr;
+			uint32_t dst_addr_mask;
+		} v4;
+		struct {
+			uint8_t tc;
+			uint32_t flow_label;
+			uint8_t next_hdr;
+			uint32_t src_addr[4];
+			uint32_t src_addr_mask[4];
+			uint32_t dst_addr[4];
+			uint32_t dst_addr_mask[4];
+		} v6;
+	} u;
+};
+
+/**
+ * struct ipa_flt_rule - attributes of a filtering rule
+ * @action: action field
+ * @rt_tbl_hdl: handle of table from "get"
+ * @attrib: attributes of the rule
+ */
+struct ipa_flt_rule {
+	enum ipa_flt_action action;
+	uint32_t rt_tbl_hdl;
+	struct ipa_rule_attrib attrib;
+};
+
+/**
+ * struct ipa_rt_rule - attributes of a routing rule
+ * @dst: dst "client"
+ * @hdr_hdl: handle to the dynamic header
+	it is not an index or an offset
+ * @attrib: attributes of the rule
+ */
+struct ipa_rt_rule {
+	enum ipa_client_type dst;
+	uint32_t hdr_hdl;
+	struct ipa_rule_attrib attrib;
+};
+
+/**
+ * struct ipa_hdr_add - header descriptor includes in and out
+ * parameters
+ * @name: name of the header
+ * @hdr: actual header to be inserted
+ * @hdr_len: size of above header
+ * @is_partial: header not fully specified
+ * @hdr_hdl: out paramerer, handle to header, valid when status is 0
+ * @status:	out paramerer, status of header add operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_hdr_add {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t hdr[IPA_HDR_MAX_SIZE];
+	uint8_t hdr_len;
+	uint8_t is_partial;
+	uint32_t hdr_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_add_hdr - header addition parameters (support
+ * multiple headers and commit)
+ * @commit: should headers be written to IPA HW also?
+ * @num_hdrs: num of headers that follow
+ * @ipa_hdr_add hdr:	all headers need to go here back to
+ *			back, no pointers
+ */
+struct ipa_ioc_add_hdr {
+	uint8_t commit;
+	uint8_t num_hdrs;
+	struct ipa_hdr_add hdr[0];
+};
+
+/**
+ * struct ipa_ioc_copy_hdr - retrieve a copy of the specified
+ * header - caller can then derive the complete header
+ * @name: name of the header resource
+ * @hdr:	out parameter, contents of specified header,
+ *	valid only when ioctl return val is non-negative
+ * @hdr_len: out parameter, size of above header
+ *	valid only when ioctl return val is non-negative
+ * @is_partial:	out parameter, indicates whether specified header is partial
+ *		valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_copy_hdr {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t hdr[IPA_HDR_MAX_SIZE];
+	uint8_t hdr_len;
+	uint8_t is_partial;
+};
+
+/**
+ * struct ipa_ioc_get_hdr - header entry lookup parameters, if lookup was
+ * successful caller must call put to release the reference count when done
+ * @name: name of the header resource
+ * @hdl:	out parameter, handle of header entry
+ *		valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_get_hdr {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t hdl;
+};
+
+/**
+ * struct ipa_hdr_del - header descriptor includes in and out
+ * parameters
+ *
+ * @hdl: handle returned from header add operation
+ * @status:	out parameter, status of header remove operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_hdr_del {
+	uint32_t hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_del_hdr - header deletion parameters (support
+ * multiple headers and commit)
+ * @commit: should headers be removed from IPA HW also?
+ * @num_hdls: num of headers being removed
+ * @ipa_hdr_del hdl: all handles need to go here back to back, no pointers
+ */
+struct ipa_ioc_del_hdr {
+	uint8_t commit;
+	uint8_t num_hdls;
+	struct ipa_hdr_del hdl[0];
+};
+
+/**
+ * struct ipa_rt_rule_add - routing rule descriptor includes in
+ * and out parameters
+ * @rule: actual rule to be added
+ * @at_rear:	add at back of routing table, it is NOT possible to add rules at
+ *		the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of routing rule add operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_add {
+	struct ipa_rt_rule rule;
+	uint8_t at_rear;
+	uint32_t rt_rule_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports
+ * multiple rules and commit);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_rt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_rules;
+	struct ipa_rt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_rt_rule_del - routing rule descriptor includes in
+ * and out parameters
+ * @hdl: handle returned from route rule add operation
+ * @status:	output parameter, status of route rule delete operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_del {
+	uint32_t hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_del_rt_rule - routing rule deletion parameters (supports
+ * multiple headers and commit)
+ * @commit: should rules be removed from IPA HW also?
+ * @ip: IP family of rules
+ * @num_hdls: num of rules being removed
+ * @ipa_rt_rule_del hdl: all handles need to go back to back here, no pointers
+ */
+struct ipa_ioc_del_rt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_hdls;
+	struct ipa_rt_rule_del hdl[0];
+};
+
+/**
+ * struct ipa_flt_rule_add - filtering rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of filtering table?
+ * @flt_rule_hdl: out parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of filtering rule add   operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_flt_rule_add {
+	struct ipa_flt_rule rule;
+	uint8_t at_rear;
+	uint32_t flt_rule_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_add_flt_rule - filtering rule addition parameters (supports
+ * multiple rules and commit)
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @ep:	which "clients" pipe does this rule apply to?
+ *	valid only when global is 0
+ * @global: does this apply to global filter table of specific IP family
+ * @num_rules: number of filtering rules that follow
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_flt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	enum ipa_client_type ep;
+	uint8_t global;
+	uint8_t num_rules;
+	struct ipa_flt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_flt_rule_del - filtering rule descriptor includes
+ * in and out parameters
+ *
+ * @hdl: handle returned from filtering rule add operation
+ * @status:	output parameter, status of filtering rule delete operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_flt_rule_del {
+	uint32_t hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_del_flt_rule - filtering rule deletion parameters (supports
+ * multiple headers and commit)
+ * @commit: should rules be removed from IPA HW also?
+ * @ip: IP family of rules
+ * @num_hdls: num of rules being removed
+ * @hdl: all handles need to go back to back here, no pointers
+ */
+struct ipa_ioc_del_flt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_hdls;
+	struct ipa_flt_rule_del hdl[0];
+};
+
+/**
+ * struct ipa_ioc_get_rt_tbl - routing table lookup parameters, if lookup was
+ * successful caller must call put to release the reference
+ * count when done
+ * @ip: IP family of table
+ * @name: name of routing table resource
+ * @htl:	output parameter, handle of routing table, valid only when ioctl
+ *		return val is non-negative
+ */
+struct ipa_ioc_get_rt_tbl {
+	enum ipa_ip_type ip;
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t hdl;
+};
+
+/**
+ * struct ipa_ioc_query_intf - used to lookup number of tx and
+ * rx properties of interface
+ * @name: name of interface
+ * @num_tx_props:	output parameter, number of tx properties
+ *			valid only when ioctl return val is non-negative
+ * @num_rx_props:	output parameter, number of rx properties
+ *			valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_query_intf {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t num_tx_props;
+	uint32_t num_rx_props;
+};
+
+/**
+ * struct ipa_ioc_tx_intf_prop - interface tx property
+ * @ip: IP family of routing rule
+ * @attrib: routing rule
+ * @dst_pipe: routing output pipe
+ * @hdr_name: name of associated header if any, empty string when no header
+ */
+struct ipa_ioc_tx_intf_prop {
+	enum ipa_ip_type ip;
+	struct ipa_rule_attrib attrib;
+	enum ipa_client_type dst_pipe;
+	char hdr_name[IPA_RESOURCE_NAME_MAX];
+};
+
+/**
+ * struct ipa_ioc_query_intf_tx_props - interface tx propertie
+ * @name: name of interface
+ * @tx[0]: output parameter, the tx properties go here back to back
+ */
+struct ipa_ioc_query_intf_tx_props {
+	char name[IPA_RESOURCE_NAME_MAX];
+	struct ipa_ioc_tx_intf_prop tx[0];
+};
+
+/**
+ * struct ipa_ioc_rx_intf_prop - interface rx property
+ * @ip: IP family of filtering rule
+ * @attrib: filtering rule
+ * @src_pipe: input pipe
+ */
+struct ipa_ioc_rx_intf_prop {
+	enum ipa_ip_type ip;
+	struct ipa_rule_attrib attrib;
+	enum ipa_client_type src_pipe;
+};
+
+/**
+ * struct ipa_ioc_query_intf_rx_props - interface rx propertie
+ * @name: name of interface
+ * @rx: output parameter, the rx properties go here back to back
+ */
+struct ipa_ioc_query_intf_rx_props {
+	char name[IPA_RESOURCE_NAME_MAX];
+	struct ipa_ioc_rx_intf_prop rx[0];
+};
+
+/**
+ * struct ipa_ioc_nat_alloc_mem - nat table memory allocation
+ * properties
+ * @dev_name: input parameter, the name of table
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa_ioc_nat_alloc_mem {
+	char dev_name[IPA_RESOURCE_NAME_MAX];
+	size_t size;
+	off_t offset;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_init - nat table initialization
+ * parameters
+ * @tbl_index: input parameter, index of the table
+ * @ipv4_rules_offset: input parameter, ipv4 rules address offset
+ * @expn_rules_offset: input parameter, ipv4 expansion rules address offset
+ * @index_offset: input parameter, index rules offset
+ * @index_expn_offset: input parameter, index expansion rules offset
+ * @table_entries: input parameter, ipv4 rules table size in entries
+ * @expn_table_entries: input parameter, ipv4 expansion rules table size
+ * @ip_addr: input parameter, public ip address
+ */
+struct ipa_ioc_v4_nat_init {
+	uint8_t tbl_index;
+	uint32_t ipv4_rules_offset;
+	uint32_t expn_rules_offset;
+
+	uint32_t index_offset;
+	uint32_t index_expn_offset;
+
+	uint16_t table_entries;
+	uint16_t expn_table_entries;
+	uint32_t ip_addr;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_del - nat table delete parameter
+ * @table_index: input parameter, index of the table
+ * @public_ip_addr: input parameter, public ip address
+ */
+struct ipa_ioc_v4_nat_del {
+	uint8_t table_index;
+	uint32_t public_ip_addr;
+};
+
+/**
+ * struct ipa_ioc_nat_dma_one - nat dma command parameter
+ * @table_index: input parameter, index of the table
+ * @base_addr:	type of table, from which the base address of the table
+ *		can be inferred
+ * @offset: destination offset within the NAT table
+ * @data: data to be written.
+ */
+struct ipa_ioc_nat_dma_one {
+	uint8_t table_index;
+	uint8_t base_addr;
+
+	uint32_t offset;
+	uint16_t data;
+
+};
+
+/**
+ * struct ipa_ioc_nat_dma_cmd - To hold multiple nat dma commands
+ * @entries: number of dma commands in use
+ * @dma: data pointer to the dma commands
+ */
+struct ipa_ioc_nat_dma_cmd {
+	uint8_t entries;
+	struct ipa_ioc_nat_dma_one dma[0];
+
+};
+
+/**
+ * struct ipa_msg_meta - Format of the message meta-data.
+ * @msg_type: the type of the message
+ * @msg_len: the length of the message in bytes
+ * @rsvd: reserved bits for future use.
+ *
+ * Client in user-space should issue a read on the device (/dev/ipa) with a
+ * buffer of atleast this size in an continuous loop, call will block when there
+ * is no pending async message.
+ *
+ * After reading a message's meta-data using above scheme, client should issue a
+ * GET_MSG IOCTL to actually read the message itself into the buffer of
+ * "msg_len" immediately following the ipa_msg_meta itself in the IOCTL payload
+ */
+struct ipa_msg_meta {
+	uint8_t msg_type;
+	uint16_t msg_len;
+	uint8_t rsvd;
+};
+
+/**
+ *   actual IOCTLs supported by IPA driver
+ */
+#define IPA_IOC_ADD_HDR _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_HDR, \
+					struct ipa_ioc_add_hdr *)
+#define IPA_IOC_DEL_HDR _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_HDR, \
+					struct ipa_ioc_del_hdr *)
+#define IPA_IOC_ADD_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE, \
+					struct ipa_ioc_add_rt_rule *)
+#define IPA_IOC_DEL_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_RT_RULE, \
+					struct ipa_ioc_del_rt_rule *)
+#define IPA_IOC_ADD_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_FLT_RULE, \
+					struct ipa_ioc_add_flt_rule *)
+#define IPA_IOC_DEL_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_FLT_RULE, \
+					struct ipa_ioc_del_flt_rule *)
+#define IPA_IOC_COMMIT_HDR _IO(IPA_IOC_MAGIC,\
+					IPA_IOCTL_COMMIT_HDR)
+#define IPA_IOC_RESET_HDR _IO(IPA_IOC_MAGIC,\
+					IPA_IOCTL_RESET_HDR)
+#define IPA_IOC_COMMIT_RT _IOW(IPA_IOC_MAGIC, \
+					IPA_IOCTL_COMMIT_RT, \
+					enum ipa_ip_type)
+#define IPA_IOC_RESET_RT _IOW(IPA_IOC_MAGIC, \
+					IPA_IOCTL_RESET_RT, \
+					enum ipa_ip_type)
+#define IPA_IOC_COMMIT_FLT _IOW(IPA_IOC_MAGIC, \
+					IPA_IOCTL_COMMIT_FLT, \
+					enum ipa_ip_type)
+#define IPA_IOC_RESET_FLT _IOW(IPA_IOC_MAGIC, \
+			IPA_IOCTL_RESET_FLT, \
+			enum ipa_ip_type)
+#define IPA_IOC_DUMP _IO(IPA_IOC_MAGIC, \
+			IPA_IOCTL_DUMP)
+#define IPA_IOC_GET_RT_TBL _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_RT_TBL, \
+				struct ipa_ioc_get_rt_tbl *)
+#define IPA_IOC_PUT_RT_TBL _IOW(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PUT_RT_TBL, \
+				uint32_t)
+#define IPA_IOC_COPY_HDR _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_COPY_HDR, \
+				struct ipa_ioc_copy_hdr *)
+#define IPA_IOC_QUERY_INTF _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF, \
+				struct ipa_ioc_query_intf *)
+#define IPA_IOC_QUERY_INTF_TX_PROPS _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF_TX_PROPS, \
+				struct ipa_ioc_query_intf_tx_props *)
+#define IPA_IOC_QUERY_INTF_RX_PROPS _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_RX_PROPS, \
+					struct ipa_ioc_query_intf_rx_props *)
+#define IPA_IOC_GET_HDR _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_HDR, \
+				struct ipa_ioc_get_hdr *)
+#define IPA_IOC_PUT_HDR _IOW(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PUT_HDR, \
+				uint32_t)
+#define IPA_IOC_ALLOC_NAT_MEM _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_NAT_MEM, \
+				struct ipa_ioc_nat_alloc_mem *)
+#define IPA_IOC_V4_INIT_NAT _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_INIT_NAT, \
+				struct ipa_ioc_v4_nat_init *)
+#define IPA_IOC_NAT_DMA _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NAT_DMA, \
+				struct ipa_ioc_nat_dma_cmd *)
+#define IPA_IOC_V4_DEL_NAT _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_DEL_NAT, \
+				struct ipa_ioc_v4_nat_del *)
+#define IPA_IOC_GET_NAT_OFFSET _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_NAT_OFFSET, \
+				uint32_t *)
+#define IPA_IOC_SET_FLT _IOW(IPA_IOC_MAGIC, \
+			IPA_IOCTL_SET_FLT, \
+			uint32_t)
+#define IPA_IOC_GET_ASYNC_MSG _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_ASYNC_MSG, \
+				struct ipa_msg_meta *)
+
+#endif /* _MSM_IPA_H_ */
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index 98050ce..d3f6792 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -71,7 +71,7 @@
 #define MSMFB_OVERLAY_VSYNC_CTRL _IOW(MSMFB_IOCTL_MAGIC, 160, unsigned int)
 #define MSMFB_VSYNC_CTRL  _IOW(MSMFB_IOCTL_MAGIC, 161, unsigned int)
 #define MSMFB_METADATA_SET  _IOW(MSMFB_IOCTL_MAGIC, 162, struct msmfb_metadata)
-#define MSMFB_OVERLAY_COMMIT      _IOW(MSMFB_IOCTL_MAGIC, 163, unsigned int)
+#define MSMFB_OVERLAY_COMMIT      _IO(MSMFB_IOCTL_MAGIC, 163)
 
 #define FB_TYPE_3D_PANEL 0x10101010
 #define MDP_IMGTYPE2_START 0x10000
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 89a8421..e0d9072 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -548,6 +548,14 @@
  * @NL80211_CMD_SET_NOACK_MAP: sets a bitmap for the individual TIDs whether
  *      No Acknowledgement Policy should be applied.
  *
+ * @NL80211_CMD_UPDATE_FT_IES: Pass down the most up-to-date Fast Transition
+ *	Information Element to the WLAN driver
+ *
+ * @NL80211_CMD_FT_EVENT: Send a Fast transition event from the WLAN driver
+ *	to the supplicant. This will carry the target AP's MAC address along
+ *	with the relevant Information Elements. This event to report received
+ *	FT IEs( MDIE, FTIE,RSN IE, TIE, RICIE).
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -689,6 +697,9 @@
 
 	NL80211_CMD_SET_NOACK_MAP,
 
+	NL80211_CMD_UPDATE_FT_IES,
+	NL80211_CMD_FT_EVENT,
+
 	/* add new commands above here */
 
 	/* used to define NL80211_CMD_MAX below */
@@ -1264,6 +1275,10 @@
  * @NL80211_ATTR_BG_SCAN_PERIOD: Background scan period in seconds
  *      or 0 to disable background scan.
  *
+ * @NL80211_ATTR_MDID: Mobility Domain Identifier
+ *
+ * @NL80211_ATTR_IE_RIC: Resource Information Container Information Element
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -1515,6 +1530,9 @@
 
 	NL80211_ATTR_BG_SCAN_PERIOD,
 
+	NL80211_ATTR_MDID,
+	NL80211_ATTR_IE_RIC,
+
 	/* add attributes here, update the policy in nl80211.c */
 
 	__NL80211_ATTR_AFTER_LAST,
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index 9d5de90..077ccfc 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -28,7 +28,7 @@
 	DCIN,
 	VCHG_SNS,
 	SPARE1_03,
-	SPARE2_03,
+	USB_ID_MV,
 	VCOIN,
 	VBAT_SNS,
 	VSYS,
@@ -81,44 +81,44 @@
 	LR_MUX7_HW_ID,
 	LR_MUX8_AMUX_THM4,
 	LR_MUX9_AMUX_THM5,
-	LR_MUX10_USB_ID,
+	LR_MUX10_USB_ID_LV,
 	AMUX_PU1,
 	AMUX_PU2,
 	LR_MUX3_BUF_XO_THERM_BUF,
-	LR_MUX1_PU1_BAT_THERM,
-	LR_MUX2_PU1_BAT_ID,
-	LR_MUX3_PU1_XO_THERM,
-	LR_MUX4_PU1_AMUX_THM1,
-	LR_MUX5_PU1_AMUX_THM2,
-	LR_MUX6_PU1_AMUX_THM3,
-	LR_MUX7_PU1_AMUX_HW_ID,
-	LR_MUX8_PU1_AMUX_THM4,
-	LR_MUX9_PU1_AMUX_THM5,
-	LR_MUX10_PU1_AMUX_USB_ID,
-	LR_MUX3_BUF_PU1_XO_THERM_BUF,
-	LR_MUX1_PU2_BAT_THERM,
-	LR_MUX2_PU2_BAT_ID,
-	LR_MUX3_PU2_XO_THERM,
-	LR_MUX4_PU2_AMUX_THM1,
-	LR_MUX5_PU2_AMUX_THM2,
-	LR_MUX6_PU2_AMUX_THM3,
-	LR_MUX7_PU2_AMUX_HW_ID,
-	LR_MUX8_PU2_AMUX_THM4,
-	LR_MUX9_PU2_AMUX_THM5,
-	LR_MUX10_PU2_AMUX_USB_ID,
-	LR_MUX3_BUF_PU2_XO_THERM_BUF,
-	LR_MUX1_PU1_PU2_BAT_THERM,
-	LR_MUX2_PU1_PU2_BAT_ID,
-	LR_MUX3_PU1_PU2_XO_THERM,
-	LR_MUX4_PU1_PU2_AMUX_THM1,
-	LR_MUX5_PU1_PU2_AMUX_THM2,
-	LR_MUX6_PU1_PU2_AMUX_THM3,
-	LR_MUX7_PU1_PU2_AMUX_HW_ID,
-	LR_MUX8_PU1_PU2_AMUX_THM4,
-	LR_MUX9_PU1_PU2_AMUX_THM5,
-	LR_MUX10_PU1_PU2_AMUX_USB_ID,
-	LR_MUX3_BUF_PU1_PU2_XO_THERM_BUF,
-	ALL_OFF,
+	LR_MUX1_PU1_BAT_THERM = 112,
+	LR_MUX2_PU1_BAT_ID = 113,
+	LR_MUX3_PU1_XO_THERM = 114,
+	LR_MUX4_PU1_AMUX_THM1 = 115,
+	LR_MUX5_PU1_AMUX_THM2 = 116,
+	LR_MUX6_PU1_AMUX_THM3 = 117,
+	LR_MUX7_PU1_AMUX_HW_ID = 118,
+	LR_MUX8_PU1_AMUX_THM4 = 119,
+	LR_MUX9_PU1_AMUX_THM5 = 120,
+	LR_MUX10_PU1_AMUX_USB_ID_LV = 121,
+	LR_MUX3_BUF_PU1_XO_THERM_BUF = 124,
+	LR_MUX1_PU2_BAT_THERM = 176,
+	LR_MUX2_PU2_BAT_ID = 177,
+	LR_MUX3_PU2_XO_THERM = 178,
+	LR_MUX4_PU2_AMUX_THM1 = 179,
+	LR_MUX5_PU2_AMUX_THM2 = 180,
+	LR_MUX6_PU2_AMUX_THM3 = 181,
+	LR_MUX7_PU2_AMUX_HW_ID = 182,
+	LR_MUX8_PU2_AMUX_THM4 = 183,
+	LR_MUX9_PU2_AMUX_THM5 = 184,
+	LR_MUX10_PU2_AMUX_USB_ID_LV = 185,
+	LR_MUX3_BUF_PU2_XO_THERM_BUF = 188,
+	LR_MUX1_PU1_PU2_BAT_THERM = 240,
+	LR_MUX2_PU1_PU2_BAT_ID = 241,
+	LR_MUX3_PU1_PU2_XO_THERM = 242,
+	LR_MUX4_PU1_PU2_AMUX_THM1 = 243,
+	LR_MUX5_PU1_PU2_AMUX_THM2 = 244,
+	LR_MUX6_PU1_PU2_AMUX_THM3 = 245,
+	LR_MUX7_PU1_PU2_AMUX_HW_ID = 246,
+	LR_MUX8_PU1_PU2_AMUX_THM4 = 247,
+	LR_MUX9_PU1_PU2_AMUX_THM5 = 248,
+	LR_MUX10_PU1_PU2_AMUX_USB_ID_LV = 249,
+	LR_MUX3_BUF_PU1_PU2_XO_THERM_BUF = 252,
+	ALL_OFF = 255,
 	ADC_MAX_NUM,
 };
 
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index 578b9f9..d6fbc64 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -397,7 +397,12 @@
 	unsigned data;
 	struct msm_bus_scale_pdata *bus_scale_table;
 	unsigned log2_irq_thresh;
+
+	/*swfi latency is required while driving resume on to the bus */
 	u32 swfi_latency;
+
+	/*standalone latency is required when HSCI is active*/
+	u32 standalone_latency;
 };
 
 struct msm_usb_host_platform_data {
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 41ff312..b1f534d 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -1810,6 +1810,30 @@
 	V4L2_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE_DISABLE = 0,
 	V4L2_MPEG_VIDC_VIDEO_SYNC_FRAME_DECODE_ENABLE = 1
 };
+#define V4L2_CID_MPEG_VIDC_VIDEO_SECURE (V4L2_CID_MPEG_MSM_VIDC_BASE+24)
+#define V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 25)
+enum v4l2_mpeg_vidc_extradata {
+	V4L2_MPEG_VIDC_EXTRADATA_NONE,
+	V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION,
+	V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO,
+	V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP,
+	V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP,
+	V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP,
+	V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING,
+	V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE,
+	V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW,
+	V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI,
+	V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD,
+	V4L2_MPEG_VIDC_EXTRADATA_AFD_UD,
+	V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO,
+	V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB,
+	V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER,
+	V4L2_MPEG_VIDC_INDEX_EXTRADATA_INPUT_CROP,
+	V4L2_MPEG_VIDC_INDEX_EXTRADATA_DIGITAL_ZOOM,
+	V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO,
+};
+
 /*  Camera class control IDs */
 #define V4L2_CID_CAMERA_CLASS_BASE 	(V4L2_CTRL_CLASS_CAMERA | 0x900)
 #define V4L2_CID_CAMERA_CLASS 		(V4L2_CTRL_CLASS_CAMERA | 1)
diff --git a/include/media/msm_media_info.h b/include/media/msm_media_info.h
index 3098bfe..13ce043 100644
--- a/include/media/msm_media_info.h
+++ b/include/media/msm_media_info.h
@@ -17,7 +17,7 @@
 
 	switch (color_fmt) {
 	case COLOR_FMT_NV12:
-		alignment = 32;
+		alignment = 128;
 		stride = ALIGN(width, alignment);
 		break;
 	default:
@@ -35,8 +35,8 @@
 
 	switch (color_fmt) {
 	case COLOR_FMT_NV12:
-		alignment = 32;
-		stride = ALIGN(((width + 1) >> 1), alignment) << 1;
+		alignment = 128;
+		stride = ALIGN(width, alignment);
 		break;
 	default:
 		break;
@@ -71,7 +71,7 @@
 
 	switch (color_fmt) {
 	case COLOR_FMT_NV12:
-		alignment = 32;
+		alignment = 16;
 		sclines = ALIGN(((height + 1) >> 1), alignment);
 		break;
 	default:
@@ -97,7 +97,7 @@
 	uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
 	switch (color_fmt) {
 	case COLOR_FMT_NV12:
-		uv_alignment = 32;
+		uv_alignment = 0;
 		y_plane = y_stride * y_sclines;
 		uv_plane = uv_stride * uv_sclines + uv_alignment;
 		size = y_plane + uv_plane;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index a57c9f9..5c1daf3 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1318,6 +1318,21 @@
 };
 
 /**
+ * struct cfg80211_update_ft_ies_params - FT IE Information
+ *
+ * This structure provides information needed to update the fast transition IE
+ *
+ * @md: The Mobility Domain ID, 2 Octet value
+ * @ie: Fast Transition IEs
+ * @ie_len: Length of ft_ie in octets
+ */
+struct cfg80211_update_ft_ies_params {
+	u16 md;
+	u8 *ie;
+	size_t ie_len;
+};
+
+/**
  * struct cfg80211_ops - backend description for wireless configuration
  *
  * This struct is registered by fullmac card drivers and/or wireless stacks
@@ -1697,6 +1712,8 @@
 				  u16 noack_map);
 
 	struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy);
+	int	(*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev,
+				 struct cfg80211_update_ft_ies_params *ftie);
 };
 
 /*
@@ -3348,6 +3365,32 @@
  */
 u16 cfg80211_calculate_bitrate(struct rate_info *rate);
 
+/**
+ * struct cfg80211_ft_event - FT Information Elements
+ * @dev: network device
+ * @ies: FT IEs
+ * @ies_len: length of the FT IE in bytes
+ * @target_ap: target AP's MAC address
+ * @ric_ies: RIC IE
+ * @ric_ies_len: length of the RIC IE in bytes
+ */
+struct cfg80211_ft_event_params {
+	u8 *ies;
+	size_t ies_len;
+	u8 target_ap[ETH_ALEN];
+	u8 *ric_ies;
+	size_t ric_ies_len;
+};
+
+/**
+ * cfg80211_ft_event - notify userspace about FT IE and RIC IE
+ * @dev: network device
+ * @cfg80211_ft_event_params: IE information
+ */
+int cfg80211_ft_event(struct net_device *dev,
+			struct cfg80211_ft_event_params ft_event);
+
+
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 07179e9..4376ece 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -6123,6 +6123,11 @@
 /*	Band cut equalizer effect.*/
 #define ASM_PARAM_EQ_BAND_CUT       6
 
+/* Voice get & set params */
+#define VOICE_CMD_SET_PARAM				0x0001133D
+#define VOICE_CMD_GET_PARAM				0x0001133E
+#define VOICE_EVT_GET_PARAM_ACK				0x00011008
+
 
 /* ERROR CODES */
 /* Success. The operation completed with no errors. */
diff --git a/include/sound/msm-dai-q6-v2.h b/include/sound/msm-dai-q6-v2.h
index 3d5ffdd..6c60318 100644
--- a/include/sound/msm-dai-q6-v2.h
+++ b/include/sound/msm-dai-q6-v2.h
@@ -20,6 +20,10 @@
 #define MSM_MI2S_SD3 (1 << 3)
 #define MSM_MI2S_CAP_RX 0
 #define MSM_MI2S_CAP_TX 1
+#define MSM_PRIM_MI2S 0
+#define MSM_SEC_MI2S  1
+#define MSM_TERT_MI2S 2
+#define MSM_QUAD_MI2S 3
 
 struct msm_dai_auxpcm_pdata {
 	const char *clk;
@@ -35,6 +39,11 @@
 	int pcm_clk_rate;
 };
 
+struct msm_mi2s_pdata {
+	u16 rx_sd_lines;
+	u16 tx_sd_lines;
+};
+
 struct msm_i2s_data {
 	u32 capability; /* RX or TX */
 	u16 sd_lines;
diff --git a/include/sound/q6afe-v2.h b/include/sound/q6afe-v2.h
index e107130..1324f8a 100644
--- a/include/sound/q6afe-v2.h
+++ b/include/sound/q6afe-v2.h
@@ -16,6 +16,7 @@
 #define IN			0x000
 #define OUT			0x001
 #define MSM_AFE_MONO        0
+#define MSM_AFE_CH_STEREO   1
 #define MSM_AFE_MONO_RIGHT  1
 #define MSM_AFE_MONO_LEFT   2
 #define MSM_AFE_STEREO      3
diff --git a/lib/Kconfig b/lib/Kconfig
index f1621d5..8437e36 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -387,4 +387,11 @@
 	  The kernel drivers receive the QMI message over a transport
 	  and then decode it into a C structure.
 
+config QMI_ENCDEC_DEBUG
+	bool
+	help
+	  Kernel config option to enable debugging QMI Encode/Decode
+	  library. This will log the information regarding the element
+	  and message being encoded & decoded.
+
 endmenu
diff --git a/lib/qmi_encdec.c b/lib/qmi_encdec.c
index d759885..40273d0 100644
--- a/lib/qmi_encdec.c
+++ b/lib/qmi_encdec.c
@@ -25,6 +25,61 @@
 #define TLV_LEN_SIZE sizeof(uint16_t)
 #define TLV_TYPE_SIZE sizeof(uint8_t)
 
+#ifdef CONFIG_QMI_ENCDEC_DEBUG
+
+#define qmi_encdec_dump(prefix_str, buf, buf_len) do { \
+	const u8 *ptr = buf; \
+	int i, linelen, remaining = buf_len; \
+	int rowsize = 16, groupsize = 1; \
+	unsigned char linebuf[256]; \
+	for (i = 0; i < buf_len; i += rowsize) { \
+		linelen = min(remaining, rowsize); \
+		remaining -= linelen; \
+		hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, \
+				   linebuf, sizeof(linebuf), false); \
+		pr_debug("%s: %s\n", prefix_str, linebuf); \
+	} \
+} while (0)
+
+#define QMI_ENCODE_LOG_MSG(buf, buf_len) do { \
+	qmi_encdec_dump("QMI_ENCODE_MSG", buf, buf_len); \
+} while (0)
+
+#define QMI_DECODE_LOG_MSG(buf, buf_len) do { \
+	qmi_encdec_dump("QMI_DECODE_MSG", buf, buf_len); \
+} while (0)
+
+#define QMI_ENCODE_LOG_ELEM(level, elem_len, elem_size, buf) do { \
+	pr_debug("QMI_ENCODE_ELEM lvl: %d, len: %d, size: %d\n", \
+		 level, elem_len, elem_size); \
+	qmi_encdec_dump("QMI_ENCODE_ELEM", buf, (elem_len * elem_size)); \
+} while (0)
+
+#define QMI_DECODE_LOG_ELEM(level, elem_len, elem_size, buf) do { \
+	pr_debug("QMI_DECODE_ELEM lvl: %d, len: %d, size: %d\n", \
+		 level, elem_len, elem_size); \
+	qmi_encdec_dump("QMI_DECODE_ELEM", buf, (elem_len * elem_size)); \
+} while (0)
+
+#define QMI_ENCODE_LOG_TLV(tlv_type, tlv_len) do { \
+	pr_debug("QMI_ENCODE_TLV type: %d, len: %d\n", tlv_type, tlv_len); \
+} while (0)
+
+#define QMI_DECODE_LOG_TLV(tlv_type, tlv_len) do { \
+	pr_debug("QMI_DECODE_TLV type: %d, len: %d\n", tlv_type, tlv_len); \
+} while (0)
+
+#else
+
+#define QMI_ENCODE_LOG_MSG(buf, buf_len) { }
+#define QMI_DECODE_LOG_MSG(buf, buf_len) { }
+#define QMI_ENCODE_LOG_ELEM(level, elem_len, elem_size, buf) { }
+#define QMI_DECODE_LOG_ELEM(level, elem_len, elem_size, buf) { }
+#define QMI_ENCODE_LOG_TLV(tlv_type, tlv_len) { }
+#define QMI_DECODE_LOG_TLV(tlv_type, tlv_len) { }
+
+#endif
+
 static int _qmi_kernel_encode(struct elem_info *ei_array,
 			      void *out_buf, void *in_c_struct,
 			      int enc_level);
@@ -232,6 +287,8 @@
 		case QMI_SIGNED_4_BYTE_ENUM:
 			rc = qmi_encode_basic_elem(buf_dst, buf_src,
 				data_len_value, temp_ei->elem_size);
+			QMI_ENCODE_LOG_ELEM(enc_level, data_len_value,
+				temp_ei->elem_size, buf_src);
 			UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
 				encoded_bytes, tlv_len, encode_tlv, rc);
 			break;
@@ -253,6 +310,7 @@
 
 		if (encode_tlv && enc_level == 1) {
 			QMI_ENCDEC_ENCODE_TLV(tlv_type, tlv_len, tlv_pointer);
+			QMI_ENCODE_LOG_TLV(tlv_type, tlv_len);
 			encoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
 			tlv_pointer = buf_dst;
 			tlv_len = 0;
@@ -260,6 +318,7 @@
 			encode_tlv = 0;
 		}
 	}
+	QMI_ENCODE_LOG_MSG(out_buf, encoded_bytes);
 	return encoded_bytes;
 }
 
@@ -419,11 +478,13 @@
 	void *buf_src = in_buf;
 	int rc;
 
+	QMI_DECODE_LOG_MSG(in_buf, in_buf_len);
 	while (decoded_bytes < in_buf_len) {
 		if (dec_level == 1) {
 			tlv_pointer = buf_src;
 			QMI_ENCDEC_DECODE_TLV(&tlv_type,
 					      &tlv_len, tlv_pointer);
+			QMI_DECODE_LOG_TLV(tlv_type, tlv_len);
 			buf_src += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
 			decoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
 			temp_ei = find_ei(ei_array, tlv_type);
@@ -470,6 +531,8 @@
 		case QMI_SIGNED_4_BYTE_ENUM:
 			rc = qmi_decode_basic_elem(buf_dst, buf_src,
 				data_len_value, temp_ei->elem_size);
+			QMI_DECODE_LOG_ELEM(dec_level, data_len_value,
+				temp_ei->elem_size, buf_dst);
 			UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
 			break;
 
diff --git a/mm/Kconfig b/mm/Kconfig
index 4cde97f..bbab5a6 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -390,3 +390,14 @@
           the memory corresponding to the hole to be removed using memblock-
           remove.
 
+config USE_USER_ACCESSIBLE_TIMERS
+	bool "Enables timers accessible from userspace"
+	depends on MMU
+	help
+	  User-accessible timers allow the kernel to map kernel timer
+	  registers to a userspace accessible page, to allow faster
+	  access to time information.  This flag will enable the
+	  interface code in the main kernel.  However, there are
+	  architecture-specific code that will need to be enabled
+	  separately.
+
diff --git a/mm/memory.c b/mm/memory.c
index 6105f47..174fcaa 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1726,6 +1726,19 @@
 			goto next_page;
 		}
 
+		if (use_user_accessible_timers()) {
+			if (!vma && in_user_timers_area(mm, start)) {
+				int goto_next_page = 0;
+				int user_timer_ret = get_user_timer_page(vma,
+					mm, start, gup_flags, pages, i,
+					&goto_next_page);
+				if (goto_next_page)
+					goto next_page;
+				else
+					return user_timer_ret;
+			}
+		}
+
 		if (!vma ||
 		    (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
 		    !(vm_flags & vma->vm_flags))
diff --git a/mm/mlock.c b/mm/mlock.c
index ef726e8..38c77ab 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -229,7 +229,9 @@
 
 	if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
 			is_vm_hugetlb_page(vma) ||
-			vma == get_gate_vma(current->mm))) {
+			vma == get_gate_vma(current->mm) ||
+			((use_user_accessible_timers() &&
+				(vma == get_user_timers_vma(current->mm)))))) {
 
 		__mlock_vma_pages_range(vma, start, end, NULL);
 
@@ -324,7 +326,9 @@
 	int lock = !!(newflags & VM_LOCKED);
 
 	if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
-	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
+	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
+	    ((use_user_accessible_timers()) &&
+		(vma == get_user_timers_vma(current->mm))))
 		goto out;	/* don't set VM_LOCKED,  don't count */
 
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 22a4dbe..fa2469e 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1426,6 +1426,11 @@
 	} else {
 		u16 flags;
 
+		if (!(pi->conn)) {
+			kfree_skb(skb);
+			return;
+		}
+
 		bt_cb(skb)->force_active = pi->force_active;
 		BT_DBG("Sending on BR/EDR connection %p", pi->conn->hcon);
 
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index f5a7ac3..dd99041 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -954,3 +954,16 @@
 	return nl80211_unexpected_4addr_frame(dev, addr, gfp);
 }
 EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame);
+
+int cfg80211_ft_event(struct net_device *dev,
+			struct cfg80211_ft_event_params ft_event)
+{
+	int err = 0;
+	struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+	nl80211_ft_event(rdev, dev, ft_event);
+
+	return err;
+}
+EXPORT_SYMBOL(cfg80211_ft_event);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index e322d4d..0410707 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -206,6 +206,9 @@
 	[NL80211_ATTR_NOACK_MAP] = { .type = NLA_U16 },
 	[NL80211_ATTR_INACTIVITY_TIMEOUT] = { .type = NLA_U16 },
 	[NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 },
+	[NL80211_ATTR_MDID] = { .type = NLA_U16 },
+	[NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
+					 .len = IEEE80211_MAX_DATA_LEN },
 };
 
 /* policy for the key attributes */
@@ -6299,6 +6302,26 @@
 	return 0;
 }
 
+static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+	struct cfg80211_update_ft_ies_params ft_params;
+	struct net_device *dev = info->user_ptr[1];
+
+	if (!info->attrs[NL80211_ATTR_MDID])
+		return -EINVAL;
+
+	ft_params.md = nla_get_u16(info->attrs[NL80211_ATTR_MDID]);
+
+	if (!info->attrs[NL80211_ATTR_IE])
+		return -EINVAL;
+
+	ft_params.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
+	ft_params.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+
+	return rdev->ops->update_ft_ies(&rdev->wiphy, dev, &ft_params);
+}
+
 #define NL80211_FLAG_NEED_WIPHY		0x01
 #define NL80211_FLAG_NEED_NETDEV	0x02
 #define NL80211_FLAG_NEED_RTNL		0x04
@@ -6887,6 +6910,14 @@
 		.internal_flags = NL80211_FLAG_NEED_NETDEV |
 				  NL80211_FLAG_NEED_RTNL,
 	},
+	{
+		.cmd = NL80211_CMD_UPDATE_FT_IES,
+		.doit = nl80211_update_ft_ies,
+		.policy = nl80211_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+				  NL80211_FLAG_NEED_RTNL,
+	},
 
 };
 
@@ -8080,6 +8111,47 @@
 	.notifier_call = nl80211_netlink_notify,
 };
 
+void nl80211_ft_event(struct cfg80211_registered_device *rdev,
+	struct net_device *netdev, struct cfg80211_ft_event_params ft_event)
+{
+	struct sk_buff *msg;
+	void *hdr;
+
+	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (!msg)
+		return;
+
+	hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FT_EVENT);
+	if (!hdr) {
+		nlmsg_free(msg);
+		return;
+	}
+
+	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+	if (ft_event.target_ap)
+		NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, ft_event.target_ap);
+	if (ft_event.ies)
+		NLA_PUT(msg, NL80211_ATTR_IE, ft_event.ies_len, ft_event.ies);
+	if (ft_event.ric_ies)
+		NLA_PUT(msg, NL80211_ATTR_IE_RIC, ft_event.ric_ies_len,
+					ft_event.ric_ies);
+
+	if (genlmsg_end(msg, hdr) < 0) {
+		nlmsg_free(msg);
+		return;
+	}
+
+	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+				nl80211_mlme_mcgrp.id, GFP_KERNEL);
+	return;
+
+ nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	nlmsg_free(msg);
+}
+
+
 /* initialisation/exit functions */
 
 int nl80211_init(void)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 4ffe50d..ffd4c8a 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -123,4 +123,8 @@
 bool nl80211_unexpected_4addr_frame(struct net_device *dev,
 				    const u8 *addr, gfp_t gfp);
 
+void nl80211_ft_event(struct cfg80211_registered_device *rdev,
+			struct net_device *netdev,
+			struct cfg80211_ft_event_params ft_event);
+
 #endif /* __NET_WIRELESS_NL80211_H */
diff --git a/sound/soc/codecs/wcd9304.c b/sound/soc/codecs/wcd9304.c
index 412090f..fd9d825 100644
--- a/sound/soc/codecs/wcd9304.c
+++ b/sound/soc/codecs/wcd9304.c
@@ -2836,7 +2836,7 @@
 	if (dai->id <= NUM_CODEC_DAIS) {
 		if (sitar->dai[dai->id].ch_mask) {
 			active = 1;
-			pr_debug("%s(): Codec DAI: chmask[%d] = 0x%x\n",
+			pr_debug("%s(): Codec DAI: chmask[%d] = 0x%lx\n",
 				__func__, dai->id,
 				sitar->dai[dai->id].ch_mask);
 		}
diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c
index f28fd774..9c1aef2 100644
--- a/sound/soc/codecs/wcd9310.c
+++ b/sound/soc/codecs/wcd9310.c
@@ -4128,7 +4128,7 @@
 	if (dai->id <= NUM_CODEC_DAIS) {
 		if (tabla->dai[dai->id].ch_mask) {
 			active = 1;
-			pr_debug("%s(): Codec DAI: chmask[%d] = 0x%x\n",
+			pr_debug("%s(): Codec DAI: chmask[%d] = 0x%lx\n",
 				__func__, dai->id, tabla->dai[dai->id].ch_mask);
 		}
 	}
@@ -7863,7 +7863,7 @@
 				port_id = i*8 + j;
 				for (k = 0; k < ARRAY_SIZE(tabla_dai); k++) {
 					ch_mask_temp = 1 << port_id;
-					pr_debug("%s: tabla_p->dai[%d].ch_mask = 0x%x\n",
+					pr_debug("%s: tabla_p->dai[%d].ch_mask = 0x%lx\n",
 						 __func__, k,
 						 tabla_p->dai[k].ch_mask);
 					if (ch_mask_temp &
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index 9f4babb..a27dc48 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -18,6 +18,8 @@
 #include <linux/printk.h>
 #include <linux/ratelimit.h>
 #include <linux/debugfs.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
 #include <linux/mfd/wcd9xxx/core.h>
 #include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
 #include <linux/mfd/wcd9xxx/wcd9320_registers.h>
@@ -45,6 +47,13 @@
 #define TAIKO_TX_PORT_NUMBER	16
 
 #define TAIKO_I2S_MASTER_MODE_MASK 0x08
+#define TAIKO_MCLK_CLK_12P288MHZ 12288000
+#define TAIKO_MCLK_CLK_9P6HZ 9600000
+
+#define TAIKO_SLIM_CLOSE_TIMEOUT 1000
+#define TAIKO_SLIM_IRQ_OVERFLOW (1 << 0)
+#define TAIKO_SLIM_IRQ_UNDERFLOW (1 << 1)
+#define TAIKO_SLIM_IRQ_PORT_CLOSED (1 << 2)
 
 enum {
 	AIF1_PB = 0,
@@ -97,7 +106,8 @@
 };
 
 enum {
-	COMPANDER_1 = 0,
+	COMPANDER_0,
+	COMPANDER_1,
 	COMPANDER_2,
 	COMPANDER_MAX,
 };
@@ -203,6 +213,7 @@
 };
 
 static const u32 comp_shift[] = {
+	4, /* Compander 0's clock source is on interpolator 7 */
 	0,
 	2,
 };
@@ -214,31 +225,46 @@
 	COMPANDER_2,
 	COMPANDER_2,
 	COMPANDER_2,
+	COMPANDER_0,
 	COMPANDER_MAX,
 };
 
 static const struct comp_sample_dependent_params comp_samp_params[] = {
 	{
-		.peak_det_timeout = 0x2,
-		.rms_meter_div_fact = 0x8 << 4,
-		.rms_meter_resamp_fact = 0x21,
+		/* 8 Khz */
+		.peak_det_timeout = 0x02,
+		.rms_meter_div_fact = 0x09,
+		.rms_meter_resamp_fact = 0x06,
 	},
 	{
-		.peak_det_timeout = 0x3,
-		.rms_meter_div_fact = 0x9 << 4,
+		/* 16 Khz */
+		.peak_det_timeout = 0x03,
+		.rms_meter_div_fact = 0x0A,
+		.rms_meter_resamp_fact = 0x0C,
+	},
+	{
+		/* 32 Khz */
+		.peak_det_timeout = 0x05,
+		.rms_meter_div_fact = 0x0B,
+		.rms_meter_resamp_fact = 0x1E,
+	},
+	{
+		/* 48 Khz */
+		.peak_det_timeout = 0x05,
+		.rms_meter_div_fact = 0x0B,
 		.rms_meter_resamp_fact = 0x28,
 	},
-
 	{
-		.peak_det_timeout = 0x5,
-		.rms_meter_div_fact = 0xB << 4,
-		.rms_meter_resamp_fact = 0x28,
+		/* 96 Khz */
+		.peak_det_timeout = 0x06,
+		.rms_meter_div_fact = 0x0C,
+		.rms_meter_resamp_fact = 0x50,
 	},
-
 	{
-		.peak_det_timeout = 0x5,
-		.rms_meter_div_fact = 0xB << 4,
-		.rms_meter_resamp_fact = 0x28,
+		/* 192 Khz */
+		.peak_det_timeout = 0x07,
+		.rms_meter_div_fact = 0xD,
+		.rms_meter_resamp_fact = 0xA0,
 	},
 };
 
@@ -318,7 +344,6 @@
 	case SND_SOC_DAPM_PRE_PMU:
 		snd_soc_update_bits(codec, w->reg, 0x01, 0x01);
 		snd_soc_update_bits(codec, w->reg, 0x40, 0x00);
-		snd_soc_update_bits(codec, TAIKO_A_NCP_STATIC, 0x0f, 0x01);
 		break;
 
 	case SND_SOC_DAPM_POST_PMU:
@@ -328,7 +353,6 @@
 	case SND_SOC_DAPM_PRE_PMD:
 	    snd_soc_update_bits(codec, w->reg, 0x01, 0x00);
 		snd_soc_update_bits(codec, w->reg, 0x40, 0x40);
-		snd_soc_update_bits(codec, TAIKO_A_NCP_STATIC, 0x0f, 0x08);
 		break;
 	}
 	return 0;
@@ -551,194 +575,167 @@
 	return 0;
 }
 
-static int taiko_compander_gain_offset(
-	struct snd_soc_codec *codec, u32 enable,
-	unsigned int reg, int mask,	int event)
-{
-	int pa_mode = snd_soc_read(codec, reg) & mask;
-	int gain_offset = 0;
-	/*  if PMU && enable is 1-> offset is 3
-	 *  if PMU && enable is 0-> offset is 0
-	 *  if PMD && pa_mode is PA -> offset is 0: PMU compander is off
-	 *  if PMD && pa_mode is comp -> offset is -3: PMU compander is on.
-	 */
-
-	if (SND_SOC_DAPM_EVENT_ON(event) && (enable != 0))
-		gain_offset = TAIKO_COMP_DIGITAL_GAIN_OFFSET;
-	if (SND_SOC_DAPM_EVENT_OFF(event) && (pa_mode == 0))
-		gain_offset = -TAIKO_COMP_DIGITAL_GAIN_OFFSET;
-	return gain_offset;
-}
-
-
-static int taiko_config_gain_compander(
-				struct snd_soc_codec *codec,
-				u32 compander, u32 enable, int event)
-{
-	int value = 0;
-	int mask = 1 << 5;
-	int gain = 0;
-	int gain_offset;
-	if (compander >= COMPANDER_MAX) {
-		pr_err("%s: Error, invalid compander channel\n", __func__);
-		return -EINVAL;
-	}
-
-	if ((enable == 0) || SND_SOC_DAPM_EVENT_OFF(event))
-		value = 1 << 4;
-
-	if (compander == COMPANDER_1) {
-		gain_offset = taiko_compander_gain_offset(codec, enable,
-				TAIKO_A_RX_HPH_L_GAIN, mask, event);
-		snd_soc_update_bits(codec, TAIKO_A_RX_HPH_L_GAIN, mask, value);
-		gain = snd_soc_read(codec, TAIKO_A_CDC_RX1_VOL_CTL_B2_CTL);
-		snd_soc_update_bits(codec, TAIKO_A_CDC_RX1_VOL_CTL_B2_CTL,
-				0xFF, gain - gain_offset);
-		gain_offset = taiko_compander_gain_offset(codec, enable,
-				TAIKO_A_RX_HPH_R_GAIN, mask, event);
-		snd_soc_update_bits(codec, TAIKO_A_RX_HPH_R_GAIN, mask, value);
-		gain = snd_soc_read(codec, TAIKO_A_CDC_RX2_VOL_CTL_B2_CTL);
-		snd_soc_update_bits(codec, TAIKO_A_CDC_RX2_VOL_CTL_B2_CTL,
-				0xFF, gain - gain_offset);
-	} else if (compander == COMPANDER_2) {
-		gain_offset = taiko_compander_gain_offset(codec, enable,
-				TAIKO_A_RX_LINE_1_GAIN, mask, event);
-		snd_soc_update_bits(codec, TAIKO_A_RX_LINE_1_GAIN, mask, value);
-		gain = snd_soc_read(codec, TAIKO_A_CDC_RX3_VOL_CTL_B2_CTL);
-		snd_soc_update_bits(codec, TAIKO_A_CDC_RX3_VOL_CTL_B2_CTL,
-				0xFF, gain - gain_offset);
-		gain_offset = taiko_compander_gain_offset(codec, enable,
-				TAIKO_A_RX_LINE_3_GAIN, mask, event);
-		snd_soc_update_bits(codec, TAIKO_A_RX_LINE_3_GAIN, mask, value);
-		gain = snd_soc_read(codec, TAIKO_A_CDC_RX4_VOL_CTL_B2_CTL);
-		snd_soc_update_bits(codec, TAIKO_A_CDC_RX4_VOL_CTL_B2_CTL,
-				0xFF, gain - gain_offset);
-		gain_offset = taiko_compander_gain_offset(codec, enable,
-				TAIKO_A_RX_LINE_2_GAIN, mask, event);
-		snd_soc_update_bits(codec, TAIKO_A_RX_LINE_2_GAIN, mask, value);
-		gain = snd_soc_read(codec, TAIKO_A_CDC_RX5_VOL_CTL_B2_CTL);
-		snd_soc_update_bits(codec, TAIKO_A_CDC_RX5_VOL_CTL_B2_CTL,
-				0xFF, gain - gain_offset);
-		gain_offset = taiko_compander_gain_offset(codec, enable,
-				TAIKO_A_RX_LINE_4_GAIN, mask, event);
-		snd_soc_update_bits(codec, TAIKO_A_RX_LINE_4_GAIN, mask, value);
-		gain = snd_soc_read(codec, TAIKO_A_CDC_RX6_VOL_CTL_B2_CTL);
-		snd_soc_update_bits(codec, TAIKO_A_CDC_RX6_VOL_CTL_B2_CTL,
-				0xFF, gain - gain_offset);
-	}
-	return 0;
-}
 static int taiko_get_compander(struct snd_kcontrol *kcontrol,
-					   struct snd_ctl_elem_value *ucontrol)
+			       struct snd_ctl_elem_value *ucontrol)
 {
 
 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
 	int comp = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->max;
+		    kcontrol->private_value)->shift;
 	struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
 
 	ucontrol->value.integer.value[0] = taiko->comp_enabled[comp];
-
 	return 0;
 }
 
 static int taiko_set_compander(struct snd_kcontrol *kcontrol,
-					   struct snd_ctl_elem_value *ucontrol)
+			       struct snd_ctl_elem_value *ucontrol)
 {
 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
 	struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
 	int comp = ((struct soc_multi_mixer_control *)
-					kcontrol->private_value)->max;
+		    kcontrol->private_value)->shift;
 	int value = ucontrol->value.integer.value[0];
 
-	if (value == taiko->comp_enabled[comp]) {
-		pr_debug("%s: compander #%d enable %d no change\n",
-			    __func__, comp, value);
-		return 0;
-	}
+	pr_debug("%s: Compander %d enable current %d, new %d\n",
+		 __func__, comp, taiko->comp_enabled[comp], value);
 	taiko->comp_enabled[comp] = value;
 	return 0;
 }
 
+static int taiko_config_gain_compander(struct snd_soc_codec *codec,
+				       int comp, bool enable)
+{
+	int ret = 0;
+
+	switch (comp) {
+	case COMPANDER_0:
+		snd_soc_update_bits(codec, TAIKO_A_SPKR_DRV_GAIN,
+				    1 << 2, !enable << 2);
+		break;
+	case COMPANDER_1:
+		snd_soc_update_bits(codec, TAIKO_A_RX_HPH_L_GAIN,
+				    1 << 5, !enable << 5);
+		snd_soc_update_bits(codec, TAIKO_A_RX_HPH_R_GAIN,
+				    1 << 5, !enable << 5);
+		break;
+	case COMPANDER_2:
+		snd_soc_update_bits(codec, TAIKO_A_RX_LINE_1_GAIN,
+				    1 << 5, !enable << 5);
+		snd_soc_update_bits(codec, TAIKO_A_RX_LINE_3_GAIN,
+				    1 << 5, !enable << 5);
+		snd_soc_update_bits(codec, TAIKO_A_RX_LINE_2_GAIN,
+				    1 << 5, !enable << 5);
+		snd_soc_update_bits(codec, TAIKO_A_RX_LINE_4_GAIN,
+				    1 << 5, !enable << 5);
+		break;
+	default:
+		WARN_ON(1);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static void taiko_discharge_comp(struct snd_soc_codec *codec, int comp)
+{
+	/* Update RSM to 1, DIVF to 5 */
+	snd_soc_write(codec, TAIKO_A_CDC_COMP0_B3_CTL + (comp * 8), 1);
+	snd_soc_update_bits(codec, TAIKO_A_CDC_COMP0_B2_CTL + (comp * 8), 0xF0,
+			    1 << 5);
+	/* Wait for 1ms */
+	usleep_range(1000, 1000);
+}
 
 static int taiko_config_compander(struct snd_soc_dapm_widget *w,
-						  struct snd_kcontrol *kcontrol,
-						  int event)
+				  struct snd_kcontrol *kcontrol, int event)
 {
+	int mask, emask;
+	bool timedout;
+	unsigned long timeout;
 	struct snd_soc_codec *codec = w->codec;
 	struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
-	u32 rate = taiko->comp_fs[w->shift];
+	const int comp = w->shift;
+	const u32 rate = taiko->comp_fs[comp];
+	const struct comp_sample_dependent_params *comp_params =
+	    &comp_samp_params[rate];
 
-	pr_debug("%s: %s event %d enabled = %d", __func__, w->name,
-		event, taiko->comp_enabled[w->shift]);
+	pr_debug("%s: %s event %d compander %d, enabled %d", __func__,
+		 w->name, event, comp, taiko->comp_enabled[comp]);
+
+	if (!taiko->comp_enabled[comp])
+		return 0;
+
+	/* Compander 0 has single channel */
+	mask = (comp == COMPANDER_0 ? 0x01 : 0x03);
+	emask = (comp == COMPANDER_0 ? 0x02 : 0x03);
 
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
-		if (taiko->comp_enabled[w->shift] != 0) {
-			/* Enable both L/R compander clocks */
-			snd_soc_update_bits(codec,
-					TAIKO_A_CDC_CLK_RX_B2_CTL,
-					0x03 << comp_shift[w->shift],
-					0x03 << comp_shift[w->shift]);
-			/* Clar the HALT for the compander*/
-			snd_soc_update_bits(codec,
-					TAIKO_A_CDC_COMP1_B1_CTL +
-					w->shift * 8, 1 << 2, 0);
-			/* Toggle compander reset bits*/
-			snd_soc_update_bits(codec,
-					TAIKO_A_CDC_CLK_OTHR_RESET_B2_CTL,
-					0x03 << comp_shift[w->shift],
-					0x03 << comp_shift[w->shift]);
-			snd_soc_update_bits(codec,
-					TAIKO_A_CDC_CLK_OTHR_RESET_B2_CTL,
-					0x03 << comp_shift[w->shift], 0);
-			taiko_config_gain_compander(codec, w->shift, 1, event);
-			/* Update the RMS meter resampling*/
-			snd_soc_update_bits(codec,
-					TAIKO_A_CDC_COMP1_B3_CTL +
-					w->shift * 8, 0xFF, 0x01);
-			/* Wait for 1ms*/
-			usleep_range(1000, 1000);
-		}
+		/* Set gain source to compander */
+		taiko_config_gain_compander(codec, comp, true);
+		/* Enable RX interpolation path clocks */
+		snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_RX_B2_CTL,
+				    mask << comp_shift[comp],
+				    mask << comp_shift[comp]);
+
+		taiko_discharge_comp(codec, comp);
+
+		/* Clear compander halt */
+		snd_soc_update_bits(codec, TAIKO_A_CDC_COMP0_B1_CTL +
+					   (comp * 8),
+				    1 << 2, 0);
+		/* Toggle compander reset bits */
+		snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_OTHR_RESET_B2_CTL,
+				    mask << comp_shift[comp],
+				    mask << comp_shift[comp]);
+		snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_OTHR_RESET_B2_CTL,
+				    mask << comp_shift[comp], 0);
 		break;
 	case SND_SOC_DAPM_POST_PMU:
-		/* Set sample rate dependent paramater*/
-		if (taiko->comp_enabled[w->shift] != 0) {
-			snd_soc_update_bits(codec, TAIKO_A_CDC_COMP1_FS_CFG +
-			w->shift * 8, 0x03,	rate);
-			snd_soc_update_bits(codec, TAIKO_A_CDC_COMP1_B2_CTL +
-			w->shift * 8, 0x0F,
-			comp_samp_params[rate].peak_det_timeout);
-			snd_soc_update_bits(codec, TAIKO_A_CDC_COMP1_B2_CTL +
-			w->shift * 8, 0xF0,
-			comp_samp_params[rate].rms_meter_div_fact);
-			snd_soc_update_bits(codec, TAIKO_A_CDC_COMP1_B3_CTL +
-			w->shift * 8, 0xFF,
-			comp_samp_params[rate].rms_meter_resamp_fact);
-			/* Compander enable -> 0x370/0x378*/
-			snd_soc_update_bits(codec, TAIKO_A_CDC_COMP1_B1_CTL +
-			w->shift * 8, 0x03, 0x03);
-		}
+		/* Set sample rate dependent paramater */
+		snd_soc_update_bits(codec,
+				    TAIKO_A_CDC_COMP0_FS_CFG + (comp * 8),
+				    0x07, rate);
+		snd_soc_write(codec, TAIKO_A_CDC_COMP0_B3_CTL + (comp * 8),
+			      comp_params->rms_meter_resamp_fact);
+		snd_soc_update_bits(codec,
+				    TAIKO_A_CDC_COMP0_B2_CTL + (comp * 8),
+				    0x0F, comp_params->peak_det_timeout);
+		snd_soc_update_bits(codec,
+				    TAIKO_A_CDC_COMP0_B2_CTL + (comp * 8),
+				    0xF0, comp_params->rms_meter_div_fact << 4);
+		/* Compander enable */
+		snd_soc_update_bits(codec, TAIKO_A_CDC_COMP0_B1_CTL +
+				    (comp * 8), emask, emask);
 		break;
 	case SND_SOC_DAPM_PRE_PMD:
-		/* Halt the compander*/
-		if (taiko->comp_enabled[w->shift] != 0) {
-			snd_soc_update_bits(codec, TAIKO_A_CDC_COMP1_B1_CTL +
-				w->shift * 8, 1 << 2, 1 << 2);
-		}
+		/* Halt compander */
+		snd_soc_update_bits(codec,
+				    TAIKO_A_CDC_COMP0_B1_CTL + (comp * 8),
+				    1 << 2, 1 << 2);
+		/* Wait up to a second for shutdown complete */
+		timeout = jiffies + HZ;
+		do {
+			if ((snd_soc_read(codec,
+					  TAIKO_A_CDC_COMP0_SHUT_DOWN_STATUS +
+					  (comp * 8)) & mask) == mask)
+				break;
+		} while (!(timedout = time_after(jiffies, timeout)));
+		pr_debug("%s: Compander %d shutdown %s in %dms\n", __func__,
+			 comp, timedout ? "timedout" : "completed",
+			 jiffies_to_msecs(timeout - HZ - jiffies));
 		break;
 	case SND_SOC_DAPM_POST_PMD:
-		/* Restore the gain */
-		if (taiko->comp_enabled[w->shift] != 0) {
-			taiko_config_gain_compander(codec, w->shift,
-					taiko->comp_enabled[w->shift], event);
-			/* Disable the compander*/
-			snd_soc_update_bits(codec, TAIKO_A_CDC_COMP1_B1_CTL +
-					w->shift * 8, 0x03, 0x00);
-			/* Turn off the clock for compander in pair*/
-			snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_RX_B2_CTL,
-				0x03 << comp_shift[w->shift], 0);
-		}
+		/* Disable compander */
+		snd_soc_update_bits(codec,
+				    TAIKO_A_CDC_COMP0_B1_CTL + (comp * 8),
+				    emask, 0x00);
+		/* Turn off the clock for compander in pair */
+		snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_RX_B2_CTL,
+				    mask << comp_shift[comp], 0);
+		/* Set gain source to register */
+		taiko_config_gain_compander(codec, comp, false);
 		break;
 	}
 	return 0;
@@ -962,10 +959,12 @@
 	SOC_SINGLE_MULTI_EXT("IIR2 Band5", IIR2, BAND5, 255, 0, 5,
 	taiko_get_iir_band_audio_mixer, taiko_put_iir_band_audio_mixer),
 
-	SOC_SINGLE_EXT("COMP1 Switch", SND_SOC_NOPM, 1, COMPANDER_1, 0,
-				   taiko_get_compander, taiko_set_compander),
-	SOC_SINGLE_EXT("COMP2 Switch", SND_SOC_NOPM, 0, COMPANDER_2, 0,
-				   taiko_get_compander, taiko_set_compander),
+	SOC_SINGLE_EXT("COMP0 Switch", SND_SOC_NOPM, COMPANDER_0, 1, 0,
+		       taiko_get_compander, taiko_set_compander),
+	SOC_SINGLE_EXT("COMP1 Switch", SND_SOC_NOPM, COMPANDER_1, 1, 0,
+		       taiko_get_compander, taiko_set_compander),
+	SOC_SINGLE_EXT("COMP2 Switch", SND_SOC_NOPM, COMPANDER_2, 1, 0,
+		       taiko_get_compander, taiko_set_compander),
 
 };
 
@@ -1540,45 +1539,50 @@
 			return -EINVAL;
 		}
 	}
-	switch (dai_id) {
-	case AIF1_CAP:
-	case AIF2_CAP:
-	case AIF3_CAP:
-		/* only add to the list if value not set
-		 */
-		if (enable && !(widget->value & 1 << port_id)) {
-			if (wcd9xxx_tx_vport_validation(
+	if (taiko_p->intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
+		switch (dai_id) {
+		case AIF1_CAP:
+		case AIF2_CAP:
+		case AIF3_CAP:
+			/* only add to the list if value not set
+			 */
+			if (enable && !(widget->value & 1 << port_id)) {
+				if (wcd9xxx_tx_vport_validation(
 						vport_check_table[dai_id],
 						port_id,
 						taiko_p->dai)) {
-				pr_info("%s: TX%u is used by other virtual port\n",
-					__func__, port_id + 1);
-				mutex_unlock(&codec->mutex);
-				return -EINVAL;
-			}
-			widget->value |= 1 << port_id;
-			list_add_tail(&core->tx_chs[port_id].list,
+					pr_debug("%s: TX%u is used by other\n"
+						"virtual port\n",
+						__func__, port_id + 1);
+					mutex_unlock(&codec->mutex);
+					return -EINVAL;
+				}
+				widget->value |= 1 << port_id;
+				list_add_tail(&core->tx_chs[port_id].list,
 				      &taiko_p->dai[dai_id].wcd9xxx_ch_list
-				      );
-		} else if (!enable && (widget->value & 1 << port_id)) {
-			widget->value &= ~(1 << port_id);
-			list_del_init(&core->tx_chs[port_id].list);
-		} else {
-			if (enable)
-				pr_info("%s: TX%u port is used by this virtual port\n",
-					__func__, port_id + 1);
-			else
-				pr_info("%s: TX%u port is not used by this virtual port\n",
-					__func__, port_id + 1);
-			/* avoid update power function */
+					      );
+			} else if (!enable && (widget->value & 1 << port_id)) {
+				widget->value &= ~(1 << port_id);
+				list_del_init(&core->tx_chs[port_id].list);
+			} else {
+				if (enable)
+					pr_debug("%s: TX%u port is used by\n"
+						"this virtual port\n",
+						__func__, port_id + 1);
+				else
+					pr_debug("%s: TX%u port is not used by\n"
+						"this virtual port\n",
+						__func__, port_id + 1);
+				/* avoid update power function */
+				mutex_unlock(&codec->mutex);
+				return 0;
+			}
+			break;
+		default:
+			pr_err("Unknown AIF %d\n", dai_id);
 			mutex_unlock(&codec->mutex);
-			return 0;
+			return -EINVAL;
 		}
-		break;
-	default:
-		pr_err("Unknown AIF %d\n", dai_id);
-		mutex_unlock(&codec->mutex);
-		return -EINVAL;
 	}
 	pr_debug("%s: name %s sname %s updated value %u shift %d\n", __func__,
 		widget->name, widget->sname, widget->value, widget->shift);
@@ -2440,10 +2444,10 @@
 	{"SLIM RX3", NULL, "RX_I2S_CLK"},
 	{"SLIM RX4", NULL, "RX_I2S_CLK"},
 
-	{"SLIM TX7", NULL, "TX_I2S_CLK"},
-	{"SLIM TX8", NULL, "TX_I2S_CLK"},
-	{"SLIM TX9", NULL, "TX_I2S_CLK"},
-	{"SLIM TX10", NULL, "TX_I2S_CLK"},
+	{"SLIM TX7 MUX", NULL, "TX_I2S_CLK"},
+	{"SLIM TX8 MUX", NULL, "TX_I2S_CLK"},
+	{"SLIM TX9 MUX", NULL, "TX_I2S_CLK"},
+	{"SLIM TX10 MUX", NULL, "TX_I2S_CLK"},
 };
 
 static const struct snd_soc_dapm_route audio_map[] = {
@@ -2667,6 +2671,7 @@
 	{"LINEOUT4 DAC", NULL, "RX_BIAS"},
 	{"SPK DAC", NULL, "RX_BIAS"},
 
+	{"RX7 MIX1", NULL, "COMP0_CLK"},
 	{"RX1 MIX1", NULL, "COMP1_CLK"},
 	{"RX2 MIX1", NULL, "COMP1_CLK"},
 	{"RX3 MIX1", NULL, "COMP2_CLK"},
@@ -3103,7 +3108,11 @@
 static int taiko_set_dai_sysclk(struct snd_soc_dai *dai,
 		int clk_id, unsigned int freq, int dir)
 {
-	pr_debug("%s\n", __func__);
+	struct snd_soc_codec *codec = dai->codec;
+	if (freq == TAIKO_MCLK_CLK_12P288MHZ)
+		snd_soc_write(codec, TAIKO_A_CHIP_CTL, 0x04);
+	else if (freq == TAIKO_MCLK_CLK_9P6HZ)
+		snd_soc_write(codec, TAIKO_A_CHIP_CTL, 0x0A);
 	return 0;
 }
 
@@ -3621,6 +3630,37 @@
 	},
 };
 
+static int taiko_codec_enable_slim_chmask(struct wcd9xxx_codec_dai_data *dai,
+					  bool up)
+{
+	int ret = 0;
+	struct wcd9xxx_ch *ch;
+
+	if (up) {
+		list_for_each_entry(ch, &dai->wcd9xxx_ch_list, list) {
+			ret = wcd9xxx_get_slave_port(ch->ch_num);
+			if (ret < 0) {
+				pr_err("%s: Invalid slave port ID: %d\n",
+				       __func__, ret);
+				ret = -EINVAL;
+			} else {
+				set_bit(ret, &dai->ch_mask);
+			}
+		}
+	} else {
+		ret = wait_event_timeout(dai->dai_wait, (dai->ch_mask == 0),
+					 msecs_to_jiffies(
+						     TAIKO_SLIM_CLOSE_TIMEOUT));
+		if (!ret) {
+			pr_err("%s: Slim close tx/rx wait timeout\n", __func__);
+			ret = -ETIMEDOUT;
+		} else {
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
 static int taiko_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
 				     struct snd_kcontrol *kcontrol,
 				     int event)
@@ -3628,7 +3668,7 @@
 	struct wcd9xxx *core;
 	struct snd_soc_codec *codec = w->codec;
 	struct taiko_priv *taiko_p = snd_soc_codec_get_drvdata(codec);
-	u32  ret = 0;
+	int ret = 0;
 	struct wcd9xxx_codec_dai_data *dai;
 
 	core = dev_get_drvdata(codec->dev->parent);
@@ -3647,6 +3687,7 @@
 
 	switch (event) {
 	case SND_SOC_DAPM_POST_PMU:
+		(void) taiko_codec_enable_slim_chmask(dai, true);
 		ret = wcd9xxx_cfg_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
 					      dai->rate, dai->bit_width,
 					      &dai->grph);
@@ -3654,7 +3695,14 @@
 	case SND_SOC_DAPM_POST_PMD:
 		ret = wcd9xxx_close_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
 						dai->grph);
-		usleep_range(15000, 15000);
+		ret = taiko_codec_enable_slim_chmask(dai, false);
+		if (ret < 0) {
+			ret = wcd9xxx_disconnect_port(core,
+						      &dai->wcd9xxx_ch_list,
+						      dai->grph);
+			pr_debug("%s: Disconnect RX port, ret = %d\n",
+				 __func__, ret);
+		}
 		break;
 	}
 	return ret;
@@ -3685,6 +3733,7 @@
 	dai = &taiko_p->dai[w->shift];
 	switch (event) {
 	case SND_SOC_DAPM_POST_PMU:
+		(void) taiko_codec_enable_slim_chmask(dai, true);
 		ret = wcd9xxx_cfg_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
 					      dai->rate, dai->bit_width,
 					      &dai->grph);
@@ -3692,6 +3741,14 @@
 	case SND_SOC_DAPM_POST_PMD:
 		ret = wcd9xxx_close_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
 						dai->grph);
+		ret = taiko_codec_enable_slim_chmask(dai, false);
+		if (ret < 0) {
+			ret = wcd9xxx_disconnect_port(core,
+						      &dai->wcd9xxx_ch_list,
+						      dai->grph);
+			pr_debug("%s: Disconnect RX port, ret = %d\n",
+				 __func__, ret);
+		}
 		break;
 	}
 	return ret;
@@ -3935,10 +3992,13 @@
 	SND_SOC_DAPM_SUPPLY("LDO_H", TAIKO_A_LDO_H_MODE_1, 7, 0,
 		taiko_codec_enable_ldo_h, SND_SOC_DAPM_POST_PMU),
 
-	SND_SOC_DAPM_SUPPLY("COMP1_CLK", SND_SOC_NOPM, 0, 0,
+	SND_SOC_DAPM_SUPPLY("COMP0_CLK", SND_SOC_NOPM, 0, 0,
 		taiko_config_compander, SND_SOC_DAPM_PRE_PMU |
 		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
-	SND_SOC_DAPM_SUPPLY("COMP2_CLK", SND_SOC_NOPM, 1, 0,
+	SND_SOC_DAPM_SUPPLY("COMP1_CLK", SND_SOC_NOPM, 1, 0,
+		taiko_config_compander, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+	SND_SOC_DAPM_SUPPLY("COMP2_CLK", SND_SOC_NOPM, 2, 0,
 		taiko_config_compander, SND_SOC_DAPM_PRE_PMU |
 		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
 
@@ -4169,34 +4229,69 @@
 
 };
 
-static unsigned long slimbus_value;
-
 static irqreturn_t taiko_slimbus_irq(int irq, void *data)
 {
 	struct taiko_priv *priv = data;
 	struct snd_soc_codec *codec = priv->codec;
-	int i, j;
+	unsigned long status = 0;
+	int i, j, port_id, k;
+	u32 bit;
 	u8 val;
+	bool tx, cleared;
 
-	for (i = 0; i < WCD9XXX_SLIM_NUM_PORT_REG; i++) {
-		slimbus_value = wcd9xxx_interface_reg_read(codec->control_data,
-			TAIKO_SLIM_PGD_PORT_INT_STATUS0 + i);
-		for_each_set_bit(j, &slimbus_value, BITS_PER_BYTE) {
-			val = wcd9xxx_interface_reg_read(codec->control_data,
-				TAIKO_SLIM_PGD_PORT_INT_SOURCE0 + i*8 + j);
-			if (val & 0x1)
-				pr_err_ratelimited(
-				"overflow error on port %x, value %x\n",
-				i*8 + j, val);
-			if (val & 0x2)
-				pr_err_ratelimited(
-				"underflow error on port %x, value %x\n",
-				i*8 + j, val);
+	for (i = TAIKO_SLIM_PGD_PORT_INT_STATUS_RX_0, j = 0;
+	     i <= TAIKO_SLIM_PGD_PORT_INT_STATUS_TX_1; i++, j++) {
+		val = wcd9xxx_interface_reg_read(codec->control_data, i);
+		status |= ((u32)val << (8 * j));
+	}
+
+	for_each_set_bit(j, &status, 32) {
+		tx = (j >= 16 ? true : false);
+		port_id = (tx ? j - 16 : j);
+		val = wcd9xxx_interface_reg_read(codec->control_data,
+					TAIKO_SLIM_PGD_PORT_INT_RX_SOURCE0 + j);
+		if (val & TAIKO_SLIM_IRQ_OVERFLOW)
+			pr_err_ratelimited(
+			    "%s: overflow error on %s port %d, value %x\n",
+			    __func__, (tx ? "TX" : "RX"), port_id, val);
+		if (val & TAIKO_SLIM_IRQ_UNDERFLOW)
+			pr_err_ratelimited(
+			    "%s: underflow error on %s port %d, value %x\n",
+			    __func__, (tx ? "TX" : "RX"), port_id, val);
+		if (val & TAIKO_SLIM_IRQ_PORT_CLOSED) {
+			/*
+			 * INT SOURCE register starts from RX to TX
+			 * but port number in the ch_mask is in opposite way
+			 */
+			bit = (tx ? j - 16 : j + 16);
+			pr_debug("%s: %s port %d closed value %x, bit %u\n",
+				 __func__, (tx ? "TX" : "RX"), port_id, val,
+				 bit);
+			for (k = 0, cleared = false; k < NUM_CODEC_DAIS; k++) {
+				pr_debug("%s: priv->dai[%d].ch_mask = 0x%lx\n",
+					 __func__, k, priv->dai[k].ch_mask);
+				if (test_and_clear_bit(bit,
+						       &priv->dai[k].ch_mask)) {
+					cleared = true;
+					if (!priv->dai[k].ch_mask)
+						wake_up(&priv->dai[k].dai_wait);
+					/*
+					 * There are cases when multiple DAIs
+					 * might be using the same slimbus
+					 * channel. Hence don't break here.
+					 */
+				}
+			}
+			WARN(!cleared,
+			     "Couldn't find slimbus %s port %d for closing\n",
+			     (tx ? "TX" : "RX"), port_id);
 		}
 		wcd9xxx_interface_reg_write(codec->control_data,
-			TAIKO_SLIM_PGD_PORT_INT_CLR0 + i, 0xFF);
-
+					    TAIKO_SLIM_PGD_PORT_INT_CLR_RX_0 +
+					    (j / 8),
+					    1 << (j % 8));
 	}
+
 	return IRQ_HANDLED;
 }
 
@@ -4538,6 +4633,7 @@
 	{TAIKO_A_RX_LINE_2_GAIN, 0x20, 0x20},
 	{TAIKO_A_RX_LINE_3_GAIN, 0x20, 0x20},
 	{TAIKO_A_RX_LINE_4_GAIN, 0x20, 0x20},
+	{TAIKO_A_SPKR_DRV_GAIN, 0x04, 0x04},
 
 	/* CLASS H config */
 	{TAIKO_A_CDC_CONN_CLSH_CTL, 0x3C, 0x14},
@@ -4588,6 +4684,13 @@
 	{TAIKO_A_CDC_CLK_DMIC_B1_CTL, 0xEE, 0x22},
 	{TAIKO_A_CDC_CLK_DMIC_B2_CTL, 0x0E, 0x02},
 
+	/* Compander zone selection */
+	{TAIKO_A_CDC_COMP0_B4_CTL, 0x3F, 0x37},
+	{TAIKO_A_CDC_COMP1_B4_CTL, 0x3F, 0x37},
+	{TAIKO_A_CDC_COMP2_B4_CTL, 0x3F, 0x37},
+	{TAIKO_A_CDC_COMP0_B5_CTL, 0x7F, 0x7F},
+	{TAIKO_A_CDC_COMP1_B5_CTL, 0x7F, 0x7F},
+	{TAIKO_A_CDC_COMP2_B5_CTL, 0x7F, 0x7F},
 };
 
 static void taiko_codec_init_reg(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wcd9320.h b/sound/soc/codecs/wcd9320.h
index 7bc5a57..1fff80c 100644
--- a/sound/soc/codecs/wcd9320.h
+++ b/sound/soc/codecs/wcd9320.h
@@ -23,6 +23,7 @@
 #define TAIKO_CACHE_SIZE TAIKO_NUM_REGISTERS
 
 #define TAIKO_REG_VAL(reg, val)		{reg, 0, val}
+#define TAIKO_MCLK_ID 0
 
 extern const u8 taiko_reg_readable[TAIKO_CACHE_SIZE];
 extern const u8 taiko_reset_reg_defaults[TAIKO_CACHE_SIZE];
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index 894e114..d5cada7 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -200,4 +200,15 @@
         default n
         help
          To add support for SoC audio on APQ8060 board
+
+config SND_SOC_MDM9625
+	tristate "SoC Machine driver for MDM9625 boards"
+	depends on ARCH_MSM9625
+	select SND_SOC_QDSP6V2
+	select SND_SOC_MSM_STUB
+	select SND_SOC_WCD9320
+	select SND_SOC_MSM_HOSTLESS_PCM
+	select SND_DYNAMIC_MINORS
+	help
+	 To add support for SoC audio on MDM9625 boards.
 endmenu
diff --git a/sound/soc/msm/Makefile b/sound/soc/msm/Makefile
index a261184..a4c365a 100644
--- a/sound/soc/msm/Makefile
+++ b/sound/soc/msm/Makefile
@@ -84,3 +84,6 @@
 snd-soc-qdsp6v2-objs := msm-dai-fe.o msm-dai-stub.o
 obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o
 
+#for MDM9625 sound card driver
+snd-soc-mdm9625-objs := mdm9625.o
+obj-$(CONFIG_SND_SOC_MDM9625) += snd-soc-mdm9625.o
diff --git a/sound/soc/msm/mdm9615.c b/sound/soc/msm/mdm9615.c
index 0125c1a..76cd625 100644
--- a/sound/soc/msm/mdm9615.c
+++ b/sound/soc/msm/mdm9615.c
@@ -1702,6 +1702,19 @@
 
 	return 0;
 }
+
+static int mdm9615_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+			struct snd_pcm_hw_params *params)
+{
+	struct snd_interval *rate = hw_param_interval(params,
+						      SNDRV_PCM_HW_PARAM_RATE);
+
+	pr_debug("%s()\n", __func__);
+	rate->min = rate->max = 48000;
+
+	return 0;
+}
+
 static int mdm9615_aux_pcm_get_gpios(void)
 {
 	int ret = 0;
@@ -2028,6 +2041,15 @@
 		.be_id = MSM_FRONTEND_DAI_DTMF_RX,
 		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
 	},
+	{
+		.name = "DTMF TX",
+		.stream_name = "DTMF TX",
+		.cpu_dai_name = "msm-dai-stub",
+		.platform_name  = "msm-pcm-dtmf",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.ignore_suspend = 1,
+	},
 
 	/* Backend BT DAI Links */
 	{
@@ -2125,6 +2147,43 @@
 		.be_hw_params_fixup = mdm9615_auxpcm_be_params_fixup,
 		.ops = &mdm9615_sec_auxpcm_be_ops,
 	},
+	/* Incall Music BACK END DAI Link */
+	{
+		.name = LPASS_BE_VOICE_PLAYBACK_TX,
+		.stream_name = "Voice Farend Playback",
+		.cpu_dai_name = "msm-dai-q6.32773",
+		.platform_name = "msm-pcm-routing",
+		.codec_name     = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.be_id = MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
+		.be_hw_params_fixup = mdm9615_be_hw_params_fixup,
+	},
+	/* Incall Record Uplink BACK END DAI Link */
+	{
+		.name = LPASS_BE_INCALL_RECORD_TX,
+		.stream_name = "Voice Uplink Capture",
+		.cpu_dai_name = "msm-dai-q6.32772",
+		.platform_name = "msm-pcm-routing",
+		.codec_name     = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.be_id = MSM_BACKEND_DAI_INCALL_RECORD_TX,
+		.be_hw_params_fixup = mdm9615_be_hw_params_fixup,
+	},
+	/* Incall Record Downlink BACK END DAI Link */
+	{
+		.name = LPASS_BE_INCALL_RECORD_RX,
+		.stream_name = "Voice Downlink Capture",
+		.cpu_dai_name = "msm-dai-q6.32771",
+		.platform_name = "msm-pcm-routing",
+		.codec_name     = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.be_id = MSM_BACKEND_DAI_INCALL_RECORD_RX,
+		.be_hw_params_fixup = mdm9615_be_hw_params_fixup,
+		.ignore_pmdown_time = 1, /* this dailink has playback support */
+	},
 };
 
 static struct snd_soc_dai_link mdm9615_dai_i2s_tabla[] = {
diff --git a/sound/soc/msm/mdm9625.c b/sound/soc/msm/mdm9625.c
new file mode 100644
index 0000000..b1822f6
--- /dev/null
+++ b/sound/soc/msm/mdm9625.c
@@ -0,0 +1,798 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/qpnp/clkdiv.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/jack.h>
+#include <asm/mach-types.h>
+#include <mach/socinfo.h>
+#include <qdsp6v2/msm-pcm-routing-v2.h>
+#include "../codecs/wcd9320.h"
+
+/* MI2S GPIO SECTION */
+
+#define GPIO_MI2S_WS     12
+#define GPIO_MI2S_SCLK   15
+#define GPIO_MI2S_DOUT   14
+#define GPIO_MI2S_DIN    13
+#define GPIO_MI2S_MCLK   71
+
+/* Spk control */
+#define MDM9625_SPK_ON 1
+
+/* MDM9625 run Taiko at 12.288 Mhz.
+ * At present MDM supports 12.288mhz
+ * only. Taiko supports 9.6 MHz also.
+ */
+#define MDM_MCLK_CLK_12P288MHZ 12288000
+#define MDM_MCLK_CLK_9P6HZ 9600000
+#define MDM_IBIT_CLK_DIV_1P56MHZ 7
+
+/* Machine driver Name*/
+#define MDM9625_MACHINE_DRV_NAME "mdm9625-asoc-taiko"
+
+struct mdm9625_machine_data {
+	u32 mclk_freq;
+};
+
+/* MI2S clock */
+struct mdm_mi2s_clk {
+	struct clk *cdc_cr_clk;
+	struct clk *cdc_osr_clk;
+	struct clk *cdc_bit_clk;
+	bool clk_enable;
+
+};
+static struct mdm_mi2s_clk prim_clk;
+
+/* I2S GPIO */
+struct request_gpio {
+	unsigned gpio_no;
+	char *gpio_name;
+};
+static bool cdc_mclk_init;
+static struct mutex cdc_mclk_mutex;
+static int mdm9625_mi2s_rx_ch = 1;
+static int mdm9625_mi2s_tx_ch = 1;
+static int msm_spk_control;
+static atomic_t mi2s_ref_count;
+
+/* MI2S GPIO CONFIG */
+static struct request_gpio mi2s_gpio[] = {
+	{
+		.gpio_no = GPIO_MI2S_WS,
+		.gpio_name = "MI2S_WS",
+	},
+	{
+		.gpio_no = GPIO_MI2S_SCLK,
+		.gpio_name = "MI2S_SCLK",
+	},
+	{
+		.gpio_no = GPIO_MI2S_DOUT,
+		.gpio_name = "MI2S_DOUT",
+	},
+	{
+		.gpio_no = GPIO_MI2S_DIN,
+		.gpio_name = "MI2S_DIN",
+	},
+	{
+		.gpio_no = GPIO_MI2S_MCLK,
+		.gpio_name = "MI2S_MCLK",
+	},
+};
+
+static int mdm9625_enable_codec_ext_clk(struct snd_soc_codec *codec,
+					int enable, bool dapm);
+
+void *def_taiko_mbhc_cal(void);
+
+static struct wcd9xxx_mbhc_config mbhc_cfg = {
+	.read_fw_bin = false,
+	.calibration = NULL,
+	.micbias = MBHC_MICBIAS2,
+	.mclk_cb_fn = mdm9625_enable_codec_ext_clk,
+	.mclk_rate = MDM_MCLK_CLK_12P288MHZ,
+	.gpio = 0,
+	.gpio_irq = 0,
+	.gpio_level_insert = 1,
+	.detect_extn_cable = true,
+	.insert_detect = true,
+	.swap_gnd_mic = NULL,
+};
+
+#define WCD9XXX_MBHC_DEF_BUTTONS 8
+#define WCD9XXX_MBHC_DEF_RLOADS 5
+
+
+static bool gpio_enable;
+
+static int mdm9625_set_mi2s_gpio(void)
+{
+	int rtn = 0;
+	int i;
+	int j;
+
+	if (gpio_enable == false) {
+		for (i = 0; i < ARRAY_SIZE(mi2s_gpio); i++) {
+			rtn = gpio_request(mi2s_gpio[i].gpio_no,
+					   mi2s_gpio[i].gpio_name);
+			pr_debug("%s: gpio = %d, gpio name = %s\n"
+				 "rtn = %d\n", __func__,
+				 mi2s_gpio[i].gpio_no,
+				 mi2s_gpio[i].gpio_name,
+				 rtn);
+			if (rtn) {
+				pr_err("%s: Failed to request gpio %d\n",
+					__func__, mi2s_gpio[i].gpio_no);
+				/* Release all the GPIO on failure */
+				for (j = i; j >= 0; j--)
+					gpio_free(mi2s_gpio[j].gpio_no);
+				goto err;
+			}
+		}
+	gpio_enable = true;
+	}
+err:
+	return rtn;
+}
+
+static int mdm9625_mi2s_free_gpios(void)
+{
+	int i;
+	pr_debug("%s:", __func__);
+	for (i = 0; i < ARRAY_SIZE(mi2s_gpio); i++)
+		gpio_free(mi2s_gpio[i].gpio_no);
+	gpio_enable = false;
+	return 0;
+}
+static int mdm9625_mi2s_clk_ctl(struct snd_soc_pcm_runtime *rtd, bool enable)
+{
+	struct mdm_mi2s_clk *clk = &prim_clk;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_card *card = rtd->card;
+	struct mdm9625_machine_data *pdata = snd_soc_card_get_drvdata(card);
+	int ret = 0;
+
+	if (pdata == NULL) {
+		pr_err("%s:platform data is null\n", __func__);
+		return -ENODEV;
+	}
+
+	if (enable) {
+		if (clk->clk_enable == true) {
+			pr_info("%s:Device clock already enabled\n", __func__);
+			return 0;
+		}
+		/* Set up core clock. */
+		clk->cdc_cr_clk = clk_get(cpu_dai->dev, "core_clk");
+		if (IS_ERR(clk->cdc_cr_clk)) {
+			pr_err("%s: Failed to Core clk %ld\n"
+			       "CPU dai name %s\n", __func__,
+			       PTR_ERR(clk->cdc_cr_clk),
+			       cpu_dai->dev->driver->name);
+			return -ENODEV ;
+		}
+		/* osr clock */
+		clk->cdc_osr_clk = clk_get(cpu_dai->dev, "osr_clk");
+		if (IS_ERR(clk->cdc_osr_clk)) {
+			pr_err("%s: Failed to request OSR %ld\n"
+			       "CPU dai name %s\n", __func__,
+			       PTR_ERR(clk->cdc_osr_clk),
+			       cpu_dai->dev->driver->name);
+			clk_put(clk->cdc_cr_clk);
+			return -ENODEV ;
+		}
+		/* ibit clock */
+		clk->cdc_bit_clk = clk_get(cpu_dai->dev, "ibit_clk");
+		if (IS_ERR(clk->cdc_bit_clk)) {
+			pr_err("%s: Failed to request Bit %ld\n"
+			       "CPU dai name %s\n", __func__,
+			       PTR_ERR(clk->cdc_bit_clk),
+			       cpu_dai->dev->driver->name);
+			clk_put(clk->cdc_cr_clk);
+			clk_put(clk->cdc_osr_clk);
+			return -ENODEV ;
+		}
+		/* Set rate core and ibit clock */
+		clk_set_rate(clk->cdc_cr_clk, pdata->mclk_freq);
+		clk_set_rate(clk->cdc_bit_clk, MDM_IBIT_CLK_DIV_1P56MHZ);
+
+		/* Enable clocks. core clock need not be enabled.
+		 * Enabling branch clocks indirectly enables
+		 * core clock.
+		 */
+		ret = clk_prepare_enable(clk->cdc_osr_clk);
+		if (ret != 0) {
+			pr_err("Fail to enable cdc_osr_clk\n");
+			goto exit_osrclk_err;
+		}
+		ret = clk_prepare_enable(clk->cdc_bit_clk);
+		if (ret != 0) {
+			pr_err("Fail to enable cdc_bit_clk\n");
+			goto exit_bclk_err;
+		}
+		clk->clk_enable = true;
+		return ret;
+	} else {
+		clk->clk_enable = false;
+		ret = 0;
+		goto exit_bclk_err;
+	}
+exit_bclk_err:
+	clk_disable_unprepare(clk->cdc_bit_clk);
+	clk_put(clk->cdc_bit_clk);
+exit_osrclk_err:
+	clk_disable_unprepare(clk->cdc_osr_clk);
+	clk_put(clk->cdc_osr_clk);
+	clk_put(clk->cdc_cr_clk);
+	clk->cdc_cr_clk = NULL;
+	clk->cdc_bit_clk = NULL;
+	clk->cdc_osr_clk = NULL;
+	clk->clk_enable = false;
+	return ret;
+}
+
+static void mdm9625_mi2s_snd_shutdown(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	int ret;
+	if (atomic_dec_return(&mi2s_ref_count) == 0) {
+		mdm9625_mi2s_free_gpios();
+		ret = mdm9625_mi2s_clk_ctl(rtd, false);
+		if (ret < 0)
+			pr_err("%s:clock disable failed\n", __func__);
+	}
+}
+
+static int mdm9625_mi2s_startup(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	int ret = 0;
+
+	if (atomic_inc_return(&mi2s_ref_count) == 1) {
+		mdm9625_set_mi2s_gpio();
+		ret = mdm9625_mi2s_clk_ctl(rtd, true);
+		if (ret < 0) {
+			pr_err("set format for codec dai failed\n");
+			return ret;
+		}
+	}
+	/* This sets the CONFIG PARAMETER WS_SRC.
+	 * 1 means internal clock master mode.
+	 * 0 means external clock slave mode.
+	 */
+	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		pr_err("set fmt cpu dai failed\n");
+
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		pr_err("set fmt for codec dai failed\n");
+
+	return ret;
+}
+
+static int set_codec_mclk(struct snd_soc_pcm_runtime *rtd)
+{
+	int ret = 0;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_card *card = rtd->card;
+	struct mdm9625_machine_data *pdata = snd_soc_card_get_drvdata(card);
+
+	if (cdc_mclk_init == true)
+		return 0;
+	ret = snd_soc_dai_set_sysclk(codec_dai, TAIKO_MCLK_ID, pdata->mclk_freq,
+				     SND_SOC_CLOCK_IN);
+	if (ret < 0) {
+		pr_err("%s: Set codec sys clk failed %x", __func__, ret);
+		return ret;
+	}
+	cdc_mclk_init = true;
+	return 0;
+}
+
+static int mdm9625_mi2s_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+					     struct snd_pcm_hw_params *params)
+{
+	struct snd_interval *rate = hw_param_interval(params,
+						      SNDRV_PCM_HW_PARAM_RATE);
+	struct snd_interval *channels = hw_param_interval(params,
+					SNDRV_PCM_HW_PARAM_CHANNELS);
+	rate->min = rate->max = 48000;
+	channels->min = channels->max = mdm9625_mi2s_rx_ch;
+	set_codec_mclk(rtd);
+	return 0;
+}
+
+static int mdm9625_mi2s_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+					     struct snd_pcm_hw_params *params)
+{
+	struct snd_interval *rate = hw_param_interval(params,
+						      SNDRV_PCM_HW_PARAM_RATE);
+	struct snd_interval *channels = hw_param_interval(params,
+						SNDRV_PCM_HW_PARAM_CHANNELS);
+	rate->min = rate->max = 48000;
+	channels->min = channels->max = mdm9625_mi2s_tx_ch;
+	set_codec_mclk(rtd);
+	return 0;
+}
+
+
+static int mdm9625_mi2s_rx_ch_get(struct snd_kcontrol *kcontrol,
+				 struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s: msm9615_i2s_rx_ch  = %d\n", __func__,
+			mdm9625_mi2s_rx_ch);
+	ucontrol->value.integer.value[0] = mdm9625_mi2s_rx_ch - 1;
+	return 0;
+}
+
+static int mdm9625_mi2s_rx_ch_put(struct snd_kcontrol *kcontrol,
+				 struct snd_ctl_elem_value *ucontrol)
+{
+	mdm9625_mi2s_rx_ch = ucontrol->value.integer.value[0] + 1;
+	pr_debug("%s: msm9615_i2s_rx_ch = %d\n", __func__,
+			mdm9625_mi2s_rx_ch);
+	return 1;
+}
+
+static int mdm9625_mi2s_tx_ch_get(struct snd_kcontrol *kcontrol,
+				 struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s: msm9615_i2s_tx_ch  = %d\n", __func__,
+			mdm9625_mi2s_tx_ch);
+	ucontrol->value.integer.value[0] = mdm9625_mi2s_tx_ch - 1;
+	return 0;
+}
+
+static int mdm9625_mi2s_tx_ch_put(struct snd_kcontrol *kcontrol,
+				 struct snd_ctl_elem_value *ucontrol)
+{
+	mdm9625_mi2s_tx_ch = ucontrol->value.integer.value[0] + 1;
+	pr_debug("%s: msm9615_i2s_tx_ch = %d\n", __func__,
+			mdm9625_mi2s_tx_ch);
+	return 1;
+}
+
+
+static int mdm9625_mi2s_get_spk(struct snd_kcontrol *kcontrol,
+		       struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s: msm_spk_control = %d", __func__, msm_spk_control);
+	ucontrol->value.integer.value[0] = msm_spk_control;
+	return 0;
+}
+
+static void mdm_ext_control(struct snd_soc_codec *codec)
+{
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	pr_debug("%s: msm_spk_control = %d", __func__, msm_spk_control);
+	mutex_lock(&dapm->codec->mutex);
+	if (msm_spk_control == MDM9625_SPK_ON) {
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Pos");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Neg");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Pos");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Neg");
+	} else {
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk Bottom Pos");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk Bottom Neg");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk Top Pos");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk Top Neg");
+	}
+	snd_soc_dapm_sync(dapm);
+	mutex_unlock(&dapm->codec->mutex);
+}
+
+static int mdm9625_mi2s_set_spk(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	pr_debug("%s()\n", __func__);
+	if (msm_spk_control == ucontrol->value.integer.value[0])
+		return 0;
+	msm_spk_control = ucontrol->value.integer.value[0];
+	mdm_ext_control(codec);
+	return 1;
+}
+
+static int mdm9625_enable_codec_ext_clk(struct snd_soc_codec *codec,
+					int enable, bool dapm)
+{
+	int ret = 0;
+	pr_debug("%s: enable = %d  codec name %s\n", __func__,
+		enable, codec->name);
+	mutex_lock(&cdc_mclk_mutex);
+	if (enable)
+		taiko_mclk_enable(codec, 1, dapm);
+	else
+		taiko_mclk_enable(codec, 0, dapm);
+	mutex_unlock(&cdc_mclk_mutex);
+	return ret;
+}
+
+static int mdm9625_mclk_event(struct snd_soc_dapm_widget *w,
+			      struct snd_kcontrol *kcontrol, int event)
+{
+	pr_debug("%s: event = %d\n", __func__, event);
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		return mdm9625_enable_codec_ext_clk(w->codec, 1, true);
+	case SND_SOC_DAPM_POST_PMD:
+		return mdm9625_enable_codec_ext_clk(w->codec, 0, true);
+	}
+	return 0;
+}
+
+
+static const struct snd_soc_dapm_widget mdm9625_dapm_widgets[] = {
+
+	SND_SOC_DAPM_SUPPLY("MCLK",  SND_SOC_NOPM, 0, 0,
+	mdm9625_mclk_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SPK("Ext Spk Bottom Pos", NULL),
+	SND_SOC_DAPM_SPK("Ext Spk Bottom Neg", NULL),
+	SND_SOC_DAPM_SPK("Ext Spk Top Pos", NULL),
+	SND_SOC_DAPM_SPK("Ext Spk Top Neg", NULL),
+	SND_SOC_DAPM_MIC("Handset Mic", NULL),
+	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+	SND_SOC_DAPM_MIC("ANCRight Headset Mic", NULL),
+	SND_SOC_DAPM_MIC("ANCLeft Headset Mic", NULL),
+	SND_SOC_DAPM_MIC("Digital Mic1", NULL),
+	SND_SOC_DAPM_MIC("Digital Mic2", NULL),
+	SND_SOC_DAPM_MIC("Digital Mic3", NULL),
+	SND_SOC_DAPM_MIC("Digital Mic4", NULL),
+	SND_SOC_DAPM_MIC("Digital Mic5", NULL),
+	SND_SOC_DAPM_MIC("Digital Mic6", NULL),
+};
+
+static const char *const spk_function[] = {"Off", "On"};
+static const char *const mi2s_rx_ch_text[] = {"One", "Two"};
+static const char *const mi2s_tx_ch_text[] = {"One", "Two"};
+
+static const struct soc_enum mdm9625_enum[] = {
+	SOC_ENUM_SINGLE_EXT(2, spk_function),
+	SOC_ENUM_SINGLE_EXT(2, mi2s_rx_ch_text),
+	SOC_ENUM_SINGLE_EXT(2, mi2s_tx_ch_text),
+};
+
+static const struct snd_kcontrol_new mdm_snd_controls[] = {
+	SOC_ENUM_EXT("Speaker Function", mdm9625_enum[0],
+				 mdm9625_mi2s_get_spk,
+				 mdm9625_mi2s_set_spk),
+	SOC_ENUM_EXT("MI2S_RX Channels", mdm9625_enum[1],
+				 mdm9625_mi2s_rx_ch_get,
+				 mdm9625_mi2s_rx_ch_put),
+	SOC_ENUM_EXT("MI2S_TX Channels", mdm9625_enum[2],
+				 mdm9625_mi2s_tx_ch_get,
+				 mdm9625_mi2s_tx_ch_put),
+};
+
+static int mdm9625_mi2s_audrx_init(struct snd_soc_pcm_runtime *rtd)
+{
+	int err;
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	pr_info("%s(), dev_name%s\n", __func__, dev_name(cpu_dai->dev));
+
+	rtd->pmdown_time = 0;
+	err = snd_soc_add_codec_controls(codec, mdm_snd_controls,
+					 ARRAY_SIZE(mdm_snd_controls));
+	if (err < 0)
+		return err;
+
+	snd_soc_dapm_new_controls(dapm, mdm9625_dapm_widgets,
+				  ARRAY_SIZE(mdm9625_dapm_widgets));
+
+	/* After DAPM Enable pins alawys
+	 * DAPM SYNC needs to be called.
+	 */
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Pos");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Neg");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Pos");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Neg");
+	snd_soc_dapm_sync(dapm);
+
+	/* start mbhc */
+	 mdm9625_set_mi2s_gpio();
+	mdm9625_mi2s_clk_ctl(rtd, true);
+	mbhc_cfg.calibration = def_taiko_mbhc_cal();
+	if (mbhc_cfg.calibration)
+		err = taiko_hs_detect(codec, &mbhc_cfg);
+	else
+		err = -ENOMEM;
+	return err;
+}
+
+void *def_taiko_mbhc_cal(void)
+{
+	void *taiko_cal;
+	struct wcd9xxx_mbhc_btn_detect_cfg *btn_cfg;
+	u16 *btn_low, *btn_high;
+	u8 *n_ready, *n_cic, *gain;
+
+	taiko_cal = kzalloc(WCD9XXX_MBHC_CAL_SIZE(WCD9XXX_MBHC_DEF_BUTTONS,
+						WCD9XXX_MBHC_DEF_RLOADS),
+			    GFP_KERNEL);
+	if (!taiko_cal) {
+		pr_err("%s: out of memory\n", __func__);
+		return NULL;
+	}
+
+#define S(X, Y) ((WCD9XXX_MBHC_CAL_GENERAL_PTR(taiko_cal)->X) = (Y))
+	S(t_ldoh, 100);
+	S(t_bg_fast_settle, 100);
+	S(t_shutdown_plug_rem, 255);
+	S(mbhc_nsa, 4);
+	S(mbhc_navg, 4);
+#undef S
+#define S(X, Y) ((WCD9XXX_MBHC_CAL_PLUG_DET_PTR(taiko_cal)->X) = (Y))
+	S(mic_current, TAIKO_PID_MIC_5_UA);
+	S(hph_current, TAIKO_PID_MIC_5_UA);
+	S(t_mic_pid, 100);
+	S(t_ins_complete, 250);
+	S(t_ins_retry, 200);
+#undef S
+#define S(X, Y) ((WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(taiko_cal)->X) = (Y))
+	S(v_no_mic, 30);
+	S(v_hs_max, 2400);
+#undef S
+#define S(X, Y) ((WCD9XXX_MBHC_CAL_BTN_DET_PTR(taiko_cal)->X) = (Y))
+	S(c[0], 62);
+	S(c[1], 124);
+	S(nc, 1);
+	S(n_meas, 3);
+	S(mbhc_nsc, 11);
+	S(n_btn_meas, 1);
+	S(n_btn_con, 2);
+	S(num_btn, WCD9XXX_MBHC_DEF_BUTTONS);
+	S(v_btn_press_delta_sta, 100);
+	S(v_btn_press_delta_cic, 50);
+#undef S
+	btn_cfg = WCD9XXX_MBHC_CAL_BTN_DET_PTR(taiko_cal);
+	btn_low = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_V_BTN_LOW);
+	btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg,
+					       MBHC_BTN_DET_V_BTN_HIGH);
+	btn_low[0] = -50;
+	btn_high[0] = 10;
+	btn_low[1] = 11;
+	btn_high[1] = 52;
+	btn_low[2] = 53;
+	btn_high[2] = 94;
+	btn_low[3] = 95;
+	btn_high[3] = 133;
+	btn_low[4] = 134;
+	btn_high[4] = 171;
+	btn_low[5] = 172;
+	btn_high[5] = 208;
+	btn_low[6] = 209;
+	btn_high[6] = 244;
+	btn_low[7] = 245;
+	btn_high[7] = 330;
+	n_ready = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_N_READY);
+	n_ready[0] = 80;
+	n_ready[1] = 68;
+	n_cic = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_N_CIC);
+	n_cic[0] = 60;
+	n_cic[1] = 47;
+	gain = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_GAIN);
+	gain[0] = 11;
+	gain[1] = 9;
+
+	return taiko_cal;
+}
+
+
+static struct snd_soc_ops mdm9625_mi2s_be_ops = {
+	.startup = mdm9625_mi2s_startup,
+	.shutdown = mdm9625_mi2s_snd_shutdown,
+};
+
+/* Digital audio interface connects codec <---> CPU */
+static struct snd_soc_dai_link mdm9625_dai[] = {
+	/* FrontEnd DAI Links */
+	{
+		.name = "MDM9625 Media1",
+		.stream_name = "MultiMedia1",
+		.cpu_dai_name = "MultiMedia1",
+		.platform_name  = "msm-pcm-dsp",
+		.dynamic = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		/* This dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA1
+	},
+	{
+		.name = "MSM VoIP",
+		.stream_name = "VoIP",
+		.cpu_dai_name = "VoIP",
+		.platform_name  = "msm-voip-dsp",
+		.dynamic = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		/* This dainlink has VOIP support */
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_VOIP,
+	},
+	{
+		.name = "Circuit-Switch Voice",
+		.stream_name = "CS-Voice",
+		.cpu_dai_name   = "CS-VOICE",
+		.platform_name  = "msm-pcm-voice",
+		.dynamic = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		/* This dainlink has Voice support */
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_CS_VOICE,
+	},
+	{
+		.name = "MI2S Hostless",
+		.stream_name = "MI2S Hostless",
+		.cpu_dai_name = "MI2S_TX_HOSTLESS",
+		.platform_name  = "msm-pcm-hostless",
+		.dynamic = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		 /* This dainlink has MI2S support */
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+	},
+	/* Backend DAI Links */
+	{
+		.name = LPASS_BE_MI2S_RX,
+		.stream_name = "MI2S Playback",
+		.cpu_dai_name = "msm-dai-q6-mi2s.0",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "taiko_codec",
+		.codec_dai_name = "taiko_i2s_rx1",
+		.no_pcm = 1,
+		.be_id = MSM_BACKEND_DAI_MI2S_RX,
+		.init  = &mdm9625_mi2s_audrx_init,
+		.be_hw_params_fixup = &mdm9625_mi2s_rx_be_hw_params_fixup,
+		.ops = &mdm9625_mi2s_be_ops,
+	},
+	{
+		.name = LPASS_BE_MI2S_TX,
+		.stream_name = "MI2S Capture",
+		.cpu_dai_name = "msm-dai-q6-mi2s.0",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "taiko_codec",
+		.codec_dai_name = "taiko_i2s_tx1",
+		.no_pcm = 1,
+		.be_id = MSM_BACKEND_DAI_MI2S_TX,
+		.be_hw_params_fixup = &mdm9625_mi2s_tx_be_hw_params_fixup,
+		.ops = &mdm9625_mi2s_be_ops,
+	},
+};
+
+static struct snd_soc_card snd_soc_card_mdm9625 = {
+	.name = "mdm9625-taiko-i2s-snd-card",
+	.dai_link = mdm9625_dai,
+	.num_links = ARRAY_SIZE(mdm9625_dai),
+};
+
+static __devinit int mdm9625_asoc_machine_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct snd_soc_card *card = &snd_soc_card_mdm9625;
+	struct mdm9625_machine_data *pdata;
+
+	mutex_init(&cdc_mclk_mutex);
+	gpio_enable = false;
+	cdc_mclk_init = false;
+	if (!pdev->dev.of_node) {
+		dev_err(&pdev->dev, "No platform supplied from device tree\n");
+		return -EINVAL;
+	}
+	pdata = devm_kzalloc(&pdev->dev, sizeof(struct mdm9625_machine_data),
+			     GFP_KERNEL);
+	if (!pdata) {
+		dev_err(&pdev->dev, "Can't allocate msm8974_asoc_mach_data\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+	card->dev = &pdev->dev;
+	platform_set_drvdata(pdev, card);
+	snd_soc_card_set_drvdata(card, pdata);
+	ret = snd_soc_of_parse_card_name(card, "qcom,model");
+	if (ret)
+		goto err;
+	ret = snd_soc_of_parse_audio_routing(card, "qcom,audio-routing");
+	if (ret)
+		goto err;
+	ret = of_property_read_u32(pdev->dev.of_node,
+				   "qcom,taiko-mclk-clk-freq",
+				   &pdata->mclk_freq);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"Looking up %s property in node %s failed",
+			"qcom,taiko-mclk-clk-freq",
+			pdev->dev.of_node->full_name);
+		goto err;
+	}
+	/* At present only 12.288MHz is supported on MDM. */
+	if (pdata->mclk_freq != MDM_MCLK_CLK_12P288MHZ) {
+		dev_err(&pdev->dev, "unsupported taiko mclk freq %u\n",
+			pdata->mclk_freq);
+		ret = -EINVAL;
+		goto err;
+	}
+	ret = snd_soc_register_card(card);
+	if (ret) {
+		dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
+				ret);
+		goto err;
+	}
+	return 0;
+err:
+	devm_kfree(&pdev->dev, pdata);
+	return ret;
+}
+
+static int __devexit mdm9625_asoc_machine_remove(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = platform_get_drvdata(pdev);
+	struct mdm9625_machine_data *pdata = snd_soc_card_get_drvdata(card);
+	pdata->mclk_freq = 0;
+	snd_soc_unregister_card(card);
+	return 0;
+}
+
+static const struct of_device_id msm9625_asoc_machine_of_match[]  = {
+	{ .compatible = "qcom,mdm9625-audio-taiko", },
+	{},
+};
+
+static struct platform_driver msm9625_asoc_machine_driver = {
+	.driver = {
+		.name = MDM9625_MACHINE_DRV_NAME,
+		.owner = THIS_MODULE,
+		.pm = &snd_soc_pm_ops,
+		.of_match_table = msm9625_asoc_machine_of_match,
+	},
+	.probe = mdm9625_asoc_machine_probe,
+	.remove = __devexit_p(mdm9625_asoc_machine_remove),
+};
+
+
+module_platform_driver(msm9625_asoc_machine_driver);
+
+MODULE_DESCRIPTION("ALSA SoC msm");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" MDM9625_MACHINE_DRV_NAME);
+MODULE_DEVICE_TABLE(of, msm9625_asoc_machine_of_match);
+
diff --git a/sound/soc/msm/mpq8064.c b/sound/soc/msm/mpq8064.c
index 90c96b4..d0bfb76 100644
--- a/sound/soc/msm/mpq8064.c
+++ b/sound/soc/msm/mpq8064.c
@@ -925,7 +925,6 @@
 		clk_put(mi2s_bit_clk);
 		mi2s_bit_clk = NULL;
 	}
-	msm_mi2s_free_gpios();
 }
 
 static int configure_mi2s_gpio(void)
@@ -958,7 +957,6 @@
 	int ret = 0;
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	configure_mi2s_gpio();
 	mi2s_bit_clk = clk_get(cpu_dai->dev, "bit_clk");
 	if (IS_ERR(mi2s_bit_clk))
 		return PTR_ERR(mi2s_bit_clk);
@@ -1138,7 +1136,6 @@
 			clk_put(sec_i2s_rx_osr_clk);
 			sec_i2s_rx_osr_clk = NULL;
 		}
-		mpq8064_sec_i2s_rx_free_gpios();
 	}
 	pr_info("%s(): substream = %s  stream = %d\n", __func__,
 		 substream->name, substream->stream);
@@ -1177,7 +1174,6 @@
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		configure_sec_i2s_rx_gpio();
 		sec_i2s_rx_osr_clk = clk_get(cpu_dai->dev, "osr_clk");
 		if (IS_ERR(sec_i2s_rx_osr_clk)) {
 			pr_err("Failed to get sec_i2s_rx_osr_clk\n");
@@ -1695,7 +1691,8 @@
 		kfree(mbhc_cfg.calibration);
 		return ret;
 	}
-
+	configure_sec_i2s_rx_gpio();
+	configure_mi2s_gpio();
 	return ret;
 
 }
@@ -1707,6 +1704,8 @@
 		pr_err("%s: Not the right machine type\n", __func__);
 		return ;
 	}
+	mpq8064_sec_i2s_rx_free_gpios();
+	msm_mi2s_free_gpios();
 	platform_device_unregister(msm_snd_device);
 	kfree(mbhc_cfg.calibration);
 }
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 51c9ed7..4165254 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -498,15 +498,25 @@
 		.name = "VoLTE",
 	},
 	{
+		.playback = {
+			.stream_name = "MI2S_RX_HOSTLESS Playback",
+			.aif_name = "MI2S_DL_HL",
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
+		},
 		.capture = {
 			.stream_name = "MI2S_TX Hostless Capture",
 			.aif_name = "MI2S_UL_HL",
 			.rates = SNDRV_PCM_RATE_8000_48000,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE,
 			.channels_min = 1,
-			.channels_max = 8,
-			.rate_min =     8000,
-			.rate_max =    48000,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 48000,
 		},
 		.ops = &msm_fe_dai_ops,
 		.name = "MI2S_TX_HOSTLESS",
diff --git a/sound/soc/msm/msm-pcm-dtmf.c b/sound/soc/msm/msm-pcm-dtmf.c
index 04ab7a9..94cc1ca 100644
--- a/sound/soc/msm/msm-pcm-dtmf.c
+++ b/sound/soc/msm/msm-pcm-dtmf.c
@@ -12,13 +12,86 @@
 
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/time.h>
+#include <linux/wait.h>
 #include <linux/platform_device.h>
-#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
 #include <sound/core.h>
 #include <sound/soc.h>
 #include <sound/pcm.h>
 #include <sound/q6afe.h>
 
+#include "msm-pcm-q6.h"
+#include "msm-pcm-routing.h"
+#include "qdsp6/q6voice.h"
+
+enum {
+	DTMF_IN_RX,
+	DTMF_IN_TX,
+};
+
+enum format {
+	FORMAT_S16_LE = 2
+};
+
+struct dtmf_det_info {
+	char     session[MAX_SESSION_NAME_LEN];
+	uint8_t  dir;
+	uint16_t high_freq;
+	uint16_t low_freq;
+};
+
+struct dtmf_buf_node {
+	struct list_head list;
+	struct dtmf_det_info dtmf_det_pkt;
+};
+
+enum dtmf_state {
+	DTMF_GEN_RX_STOPPED,
+	DTMF_GEN_RX_STARTED,
+};
+
+#define DTMF_MAX_Q_LEN 10
+#define DTMF_PKT_SIZE sizeof(struct dtmf_det_info)
+
+struct dtmf_drv_info {
+	enum  dtmf_state state;
+	struct snd_pcm_substream *capture_substream;
+
+	struct list_head out_queue;
+	struct list_head free_out_queue;
+
+	wait_queue_head_t out_wait;
+
+	struct mutex lock;
+	spinlock_t dsp_lock;
+
+	uint8_t capture_start;
+	uint8_t capture_instance;
+
+	unsigned int pcm_capture_size;
+	unsigned int pcm_capture_count;
+	unsigned int pcm_capture_irq_pos;
+	unsigned int pcm_capture_buf_pos;
+};
+
+static struct snd_pcm_hardware msm_pcm_hardware = {
+	.info =                 (SNDRV_PCM_INFO_MMAP |
+				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				 SNDRV_PCM_INFO_MMAP_VALID |
+				 SNDRV_PCM_INFO_INTERLEAVED),
+	.formats =              SNDRV_PCM_FMTBIT_S16_LE,
+	.channels_min =         1,
+	.channels_max =         1,
+	.buffer_bytes_max =	(sizeof(struct dtmf_buf_node) * DTMF_MAX_Q_LEN),
+	.period_bytes_min =	DTMF_PKT_SIZE,
+	.period_bytes_max =	DTMF_PKT_SIZE,
+	.periods_min =		DTMF_MAX_Q_LEN,
+	.periods_max =		DTMF_MAX_Q_LEN,
+	.fifo_size =            0,
+};
+
 static int msm_dtmf_rx_generate_put(struct snd_kcontrol *kcontrol,
 				    struct snd_ctl_elem_value *ucontrol)
 {
@@ -33,7 +106,7 @@
 	return 0;
 }
 
-static int msm_dtmf_rx_generate_get(struct snd_kcontrol *kcontrol,
+static int msm_dtmf_rx_generate_get(struct  snd_kcontrol *kcontrol,
 				    struct snd_ctl_elem_value *ucontrol)
 {
 	pr_debug("%s:\n", __func__);
@@ -41,11 +114,55 @@
 	return 0;
 }
 
+static int msm_dtmf_detect_voice_rx_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int enable = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: enable=%d\n", __func__, enable);
+	voc_enable_dtmf_rx_detection(voc_get_session_id(VOICE_SESSION_NAME),
+				     enable);
+
+	return 0;
+}
+
+static int msm_dtmf_detect_voice_rx_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = 0;
+	return 0;
+}
+
+static int msm_dtmf_detect_volte_rx_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int enable = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: enable=%d\n", __func__, enable);
+	voc_enable_dtmf_rx_detection(voc_get_session_id(VOLTE_SESSION_NAME),
+				     enable);
+
+	return 0;
+}
+
+static int msm_dtmf_detect_volte_rx_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = 0;
+	return 0;
+}
+
 static struct snd_kcontrol_new msm_dtmf_controls[] = {
 	SOC_SINGLE_MULTI_EXT("DTMF_Generate Rx Low High Duration Gain",
 			     SND_SOC_NOPM, 0, 5000, 0, 4,
 			     msm_dtmf_rx_generate_get,
 			     msm_dtmf_rx_generate_put),
+	SOC_SINGLE_EXT("DTMF_Detect Rx Voice enable", SND_SOC_NOPM, 0, 1, 0,
+				msm_dtmf_detect_voice_rx_get,
+				msm_dtmf_detect_voice_rx_put),
+	SOC_SINGLE_EXT("DTMF_Detect Rx VoLTE enable", SND_SOC_NOPM, 0, 1, 0,
+				msm_dtmf_detect_volte_rx_get,
+				msm_dtmf_detect_volte_rx_put),
 };
 
 static int msm_pcm_dtmf_probe(struct snd_soc_platform *platform)
@@ -55,16 +172,384 @@
 	return 0;
 }
 
-static struct snd_pcm_ops msm_pcm_ops = {};
+static void dtmf_rx_detected_cb(uint8_t *pkt,
+				char *session,
+				void *private_data)
+{
+	struct dtmf_buf_node *buf_node = NULL;
+	struct vss_istream_evt_rx_dtmf_detected *dtmf_det_pkt =
+		(struct vss_istream_evt_rx_dtmf_detected *)pkt;
+	struct dtmf_drv_info *prtd = private_data;
+	unsigned long dsp_flags;
+
+	pr_debug("%s\n", __func__);
+	if (prtd->capture_substream == NULL)
+		return;
+
+	/* Copy dtmf detected info into out_queue. */
+	spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+	/* discarding dtmf detection info till start is received */
+	if (!list_empty(&prtd->free_out_queue) && prtd->capture_start) {
+		buf_node = list_first_entry(&prtd->free_out_queue,
+					    struct dtmf_buf_node, list);
+		list_del(&buf_node->list);
+		buf_node->dtmf_det_pkt.high_freq = dtmf_det_pkt->high_freq;
+		buf_node->dtmf_det_pkt.low_freq = dtmf_det_pkt->low_freq;
+		if (session != NULL)
+			strlcpy(buf_node->dtmf_det_pkt.session,
+				session, MAX_SESSION_NAME_LEN);
+
+		buf_node->dtmf_det_pkt.dir = DTMF_IN_RX;
+		pr_debug("high =%d, low=%d session=%s\n",
+			 buf_node->dtmf_det_pkt.high_freq,
+			 buf_node->dtmf_det_pkt.low_freq,
+			 buf_node->dtmf_det_pkt.session);
+		list_add_tail(&buf_node->list, &prtd->out_queue);
+		prtd->pcm_capture_irq_pos += prtd->pcm_capture_count;
+		spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+		snd_pcm_period_elapsed(prtd->capture_substream);
+	} else {
+		spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+		pr_err("DTMF detection pkt in Rx  dropped, no free node available\n");
+	}
+
+	wake_up(&prtd->out_wait);
+}
+
+static int msm_pcm_capture_copy(struct snd_pcm_substream *substream,
+				int channel, snd_pcm_uframes_t hwoff,
+				void __user *buf, snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+	int count = 0;
+	struct dtmf_buf_node *buf_node = NULL;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct dtmf_drv_info *prtd = runtime->private_data;
+	unsigned long dsp_flags;
+
+	count = frames_to_bytes(runtime, frames);
+
+	ret = wait_event_interruptible_timeout(prtd->out_wait,
+				(!list_empty(&prtd->out_queue)),
+				1 * HZ);
+
+	if (ret > 0) {
+		if (count <= DTMF_PKT_SIZE) {
+			spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+			buf_node = list_first_entry(&prtd->out_queue,
+					struct dtmf_buf_node, list);
+			list_del(&buf_node->list);
+			spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+			ret = copy_to_user(buf,
+					   &buf_node->dtmf_det_pkt,
+					   count);
+			if (ret) {
+				pr_err("%s: Copy to user retuned %d\n",
+					__func__, ret);
+				ret = -EFAULT;
+			}
+			spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+			list_add_tail(&buf_node->list,
+				      &prtd->free_out_queue);
+			spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+
+		} else {
+			pr_err("%s: Read count %d > DTMF_PKT_SIZE\n",
+				__func__, count);
+			ret = -ENOMEM;
+		}
+	} else if (ret == 0) {
+		pr_err("%s: No UL data available\n", __func__);
+		ret = -ETIMEDOUT;
+	} else {
+		pr_err("%s: Read was interrupted\n", __func__);
+		ret = -ERESTARTSYS;
+	}
+	return ret;
+}
+
+static int msm_pcm_copy(struct snd_pcm_substream *substream, int a,
+	 snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+	pr_debug("%s() DTMF\n", __func__);
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_copy(substream, a, hwoff, buf, frames);
+
+	return ret;
+}
+
+static int msm_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct dtmf_drv_info *prtd = NULL;
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		prtd = kzalloc(sizeof(struct dtmf_drv_info), GFP_KERNEL);
+
+		if (prtd == NULL) {
+			pr_err("Failed to allocate memory for msm_audio\n");
+			ret = -ENOMEM;
+			goto done;
+		}
+
+		mutex_init(&prtd->lock);
+		spin_lock_init(&prtd->dsp_lock);
+		init_waitqueue_head(&prtd->out_wait);
+		INIT_LIST_HEAD(&prtd->out_queue);
+		INIT_LIST_HEAD(&prtd->free_out_queue);
+
+		runtime->hw = msm_pcm_hardware;
+
+		ret = snd_pcm_hw_constraint_integer(runtime,
+						    SNDRV_PCM_HW_PARAM_PERIODS);
+		if (ret < 0)
+			pr_info("snd_pcm_hw_constraint_integer failed\n");
+
+		prtd->capture_substream = substream;
+		prtd->capture_instance++;
+		runtime->private_data = prtd;
+	}
+
+done:
+	return ret;
+}
+
+static int msm_pcm_close(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct list_head *ptr = NULL;
+	struct list_head *next = NULL;
+	struct dtmf_buf_node *buf_node = NULL;
+	struct snd_dma_buffer *c_dma_buf;
+	struct snd_pcm_substream *c_substream;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct dtmf_drv_info *prtd = runtime->private_data;
+	unsigned long dsp_flags;
+
+	pr_debug("%s() DTMF\n", __func__);
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		mutex_lock(&prtd->lock);
+		wake_up(&prtd->out_wait);
+
+		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+			prtd->capture_instance--;
+
+		if (!prtd->capture_instance) {
+			if (prtd->state == DTMF_GEN_RX_STARTED) {
+				prtd->state = DTMF_GEN_RX_STOPPED;
+				voc_disable_dtmf_det_on_active_sessions();
+				voc_register_dtmf_rx_detection_cb(NULL, NULL);
+			}
+			/* release all buffer */
+			/* release out_queue and free_out_queue */
+			pr_debug("release all buffer\n");
+			c_substream = prtd->capture_substream;
+			if (c_substream == NULL) {
+				pr_debug("c_substream is NULL\n");
+				mutex_unlock(&prtd->lock);
+				return -EINVAL;
+			}
+
+			c_dma_buf = &c_substream->dma_buffer;
+			if (c_dma_buf == NULL) {
+				pr_debug("c_dma_buf is NULL.\n");
+				mutex_unlock(&prtd->lock);
+				return -EINVAL;
+			}
+
+			if (c_dma_buf->area != NULL) {
+				spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+				list_for_each_safe(ptr, next,
+							&prtd->out_queue) {
+					buf_node = list_entry(ptr,
+						   struct dtmf_buf_node, list);
+					list_del(&buf_node->list);
+				}
+
+				list_for_each_safe(ptr, next,
+						   &prtd->free_out_queue) {
+					buf_node = list_entry(ptr,
+						   struct dtmf_buf_node, list);
+					list_del(&buf_node->list);
+				}
+
+				spin_unlock_irqrestore(&prtd->dsp_lock,
+						       dsp_flags);
+				dma_free_coherent(c_substream->pcm->card->dev,
+						  runtime->hw.buffer_bytes_max,
+						  c_dma_buf->area,
+						  c_dma_buf->addr);
+				c_dma_buf->area = NULL;
+			}
+		}
+		prtd->capture_substream = NULL;
+		mutex_unlock(&prtd->lock);
+	}
+
+	return ret;
+}
+
+static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
+			     struct snd_pcm_hw_params *params)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct dtmf_drv_info *prtd = runtime->private_data;
+	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
+	struct dtmf_buf_node *buf_node = NULL;
+	int i = 0, offset = 0;
+	int ret = 0;
+
+	pr_debug("%s: DTMF\n", __func__);
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		mutex_lock(&prtd->lock);
+		dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
+		dma_buf->dev.dev = substream->pcm->card->dev;
+		dma_buf->private_data = NULL;
+
+		dma_buf->area = dma_alloc_coherent(substream->pcm->card->dev,
+						runtime->hw.buffer_bytes_max,
+						&dma_buf->addr, GFP_KERNEL);
+		if (!dma_buf->area) {
+			pr_err("%s:MSM DTMF dma_alloc failed\n", __func__);
+			mutex_unlock(&prtd->lock);
+			return -ENOMEM;
+		}
+
+		dma_buf->bytes = runtime->hw.buffer_bytes_max;
+		memset(dma_buf->area, 0, runtime->hw.buffer_bytes_max);
+
+		for (i = 0; i < DTMF_MAX_Q_LEN; i++) {
+			pr_debug("node =%d\n", i);
+			buf_node = (void *) dma_buf->area + offset;
+			list_add_tail(&buf_node->list,
+				      &prtd->free_out_queue);
+			offset = offset + sizeof(struct dtmf_buf_node);
+		}
+
+		snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+		mutex_unlock(&prtd->lock);
+	}
+
+	return ret;
+}
+
+static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct dtmf_drv_info *prtd = runtime->private_data;
+
+	pr_debug("%s: DTMF\n", __func__);
+	prtd->pcm_capture_size  = snd_pcm_lib_buffer_bytes(substream);
+	prtd->pcm_capture_count = snd_pcm_lib_period_bytes(substream);
+	prtd->pcm_capture_irq_pos = 0;
+	prtd->pcm_capture_buf_pos = 0;
+	return 0;
+}
+
+static int msm_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct dtmf_drv_info *prtd = runtime->private_data;
+
+	pr_debug("%s: DTMF\n", __func__);
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		mutex_lock(&prtd->lock);
+
+		msm_pcm_capture_prepare(substream);
+
+		if (runtime->format != FORMAT_S16_LE) {
+			pr_err("format:%u doesnt match %d\n",
+			       (uint32_t)runtime->format, FORMAT_S16_LE);
+			mutex_unlock(&prtd->lock);
+			return -EINVAL;
+		}
+
+		if (prtd->capture_instance &&
+			(prtd->state != DTMF_GEN_RX_STARTED)) {
+			voc_register_dtmf_rx_detection_cb(dtmf_rx_detected_cb,
+							  prtd);
+			prtd->state = DTMF_GEN_RX_STARTED;
+		}
+		mutex_unlock(&prtd->lock);
+	}
+
+	return 0;
+}
+
+static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct dtmf_drv_info *prtd = runtime->private_data;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		pr_debug("%s: Trigger start\n", __func__);
+		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+			prtd->capture_start = 1;
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+		pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+			prtd->capture_start = 0;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
+{
+	snd_pcm_uframes_t ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct dtmf_drv_info *prtd = runtime->private_data;
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		if (prtd->pcm_capture_irq_pos >= prtd->pcm_capture_size)
+			prtd->pcm_capture_irq_pos = 0;
+		ret = bytes_to_frames(runtime, (prtd->pcm_capture_irq_pos));
+	}
+
+	return ret;
+}
+
+static struct snd_pcm_ops msm_pcm_ops = {
+	.open           = msm_pcm_open,
+	.copy		= msm_pcm_copy,
+	.hw_params	= msm_pcm_hw_params,
+	.close          = msm_pcm_close,
+	.prepare        = msm_pcm_prepare,
+	.trigger        = msm_pcm_trigger,
+	.pointer        = msm_pcm_pointer,
+};
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	int ret = 0;
+
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	return ret;
+}
 
 static struct snd_soc_platform_driver msm_soc_platform = {
 	.ops		= &msm_pcm_ops,
+	.pcm_new	= msm_asoc_pcm_new,
 	.probe		= msm_pcm_dtmf_probe,
 };
 
 static __devinit int msm_pcm_probe(struct platform_device *pdev)
 {
-	pr_debug("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
 	return snd_soc_register_platform(&pdev->dev,
 					 &msm_soc_platform);
 }
diff --git a/sound/soc/msm/msm-pcm-routing.c b/sound/soc/msm/msm-pcm-routing.c
index 1ddb6c9..79ce671 100644
--- a/sound/soc/msm/msm-pcm-routing.c
+++ b/sound/soc/msm/msm-pcm-routing.c
@@ -438,7 +438,6 @@
 
 	mutex_lock(&routing_lock);
 
-	adm_pseudo_close(PSEUDOPORT_01);
 	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
 		if (!is_be_dai_extproc(i) &&
 			(msm_bedais[i].active) &&
@@ -480,8 +479,12 @@
 		if (!is_be_dai_extproc(i) &&
 		   (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
 		   (msm_bedais[i].active) &&
-		   (test_bit(fedai_id, &msm_bedais[i].fe_sessions)))
-			adm_close(msm_bedais[i].port_id);
+		   (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
+			if (msm_bedais[i].port_id == PSEUDOPORT_01)
+				adm_pseudo_close(msm_bedais[i].port_id);
+			else
+				adm_close(msm_bedais[i].port_id);
+		}
 	}
 
 	fe_dai_map[fedai_id][session_type] = INVALID_SESSION;
@@ -1597,8 +1600,8 @@
 	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
 	msm_routing_put_voice_stub_mixer),
 	SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_PRI_I2S_RX,
-	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_stub_mixer,
-	msm_routing_put_voice_stub_mixer),
+	MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
 };
 
 static const struct snd_kcontrol_new sec_i2s_rx_voice_mixer_controls[] = {
diff --git a/sound/soc/msm/qdsp6/q6afe.c b/sound/soc/msm/qdsp6/q6afe.c
index 6cabc97..2d44a41 100644
--- a/sound/soc/msm/qdsp6/q6afe.c
+++ b/sound/soc/msm/qdsp6/q6afe.c
@@ -1790,6 +1790,9 @@
 		goto fail_cmd;
 	}
 	pr_debug("%s: port_id=%d\n", __func__, port_id);
+	if ((port_id == RT_PROXY_DAI_001_RX) ||
+		(port_id == RT_PROXY_DAI_002_TX))
+		return 0;
 	port_id = afe_convert_virtual_to_portid(port_id);
 
 	stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
diff --git a/sound/soc/msm/qdsp6/q6voice.c b/sound/soc/msm/qdsp6/q6voice.c
index cb2e39b..3b1e722 100644
--- a/sound/soc/msm/qdsp6/q6voice.c
+++ b/sound/soc/msm/qdsp6/q6voice.c
@@ -146,6 +146,21 @@
 	v->cvp_handle = cvp_handle;
 }
 
+char *voc_get_session_name(u16 session_id)
+{
+	char *session_name = NULL;
+
+	if (session_id == common.voice[VOC_PATH_PASSIVE].session_id) {
+		session_name = VOICE_SESSION_NAME;
+	} else if (session_id ==
+			common.voice[VOC_PATH_VOLTE_PASSIVE].session_id) {
+		session_name = VOLTE_SESSION_NAME;
+	} else if (session_id == common.voice[VOC_PATH_FULL].session_id) {
+		session_name = VOIP_SESSION_NAME;
+	}
+	return session_name;
+}
+
 uint16_t voc_get_session_id(char *name)
 {
 	u16 session_id = 0;
@@ -894,6 +909,105 @@
 	return 0;
 }
 
+static int voice_send_dtmf_rx_detection_cmd(struct voice_data *v,
+					    uint32_t enable)
+{
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+	struct cvs_set_rx_dtmf_detection_cmd cvs_dtmf_rx_detection;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	cvs_handle = voice_get_cvs_handle(v);
+
+	/* Set SET_DTMF_RX_DETECTION */
+	cvs_dtmf_rx_detection.hdr.hdr_field =
+				APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					      APR_HDR_LEN(APR_HDR_SIZE),
+					      APR_PKT_VER);
+	cvs_dtmf_rx_detection.hdr.pkt_size =
+				APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvs_dtmf_rx_detection) - APR_HDR_SIZE);
+	cvs_dtmf_rx_detection.hdr.src_port = v->session_id;
+	cvs_dtmf_rx_detection.hdr.dest_port = cvs_handle;
+	cvs_dtmf_rx_detection.hdr.token = 0;
+	cvs_dtmf_rx_detection.hdr.opcode =
+					VSS_ISTREAM_CMD_SET_RX_DTMF_DETECTION;
+	cvs_dtmf_rx_detection.cvs_dtmf_det.enable = enable;
+
+	v->cvs_state = CMD_STATUS_FAIL;
+
+	ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_dtmf_rx_detection);
+	if (ret < 0) {
+		pr_err("%s: Error %d sending SET_DTMF_RX_DETECTION\n",
+		       __func__,
+		       ret);
+		return -EINVAL;
+	}
+
+	ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+void voc_disable_dtmf_det_on_active_sessions(void)
+{
+	struct voice_data *v = NULL;
+	int i;
+	for (i = 0; i < MAX_VOC_SESSIONS; i++) {
+		v = &common.voice[i];
+		if ((v->dtmf_rx_detect_en) &&
+			((v->voc_state == VOC_RUN) ||
+			 (v->voc_state == VOC_CHANGE) ||
+			 (v->voc_state == VOC_STANDBY))) {
+			pr_debug("disable dtmf det on ses_id=%d\n",
+				 v->session_id);
+			voice_send_dtmf_rx_detection_cmd(v, 0);
+		}
+	}
+}
+
+int voc_enable_dtmf_rx_detection(uint16_t session_id, uint32_t enable)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+	v->dtmf_rx_detect_en = enable;
+
+	if ((v->voc_state == VOC_RUN) ||
+	    (v->voc_state == VOC_CHANGE) ||
+	    (v->voc_state == VOC_STANDBY))
+		ret = voice_send_dtmf_rx_detection_cmd(v,
+						       v->dtmf_rx_detect_en);
+
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
 static int voice_config_cvs_vocoder(struct voice_data *v)
 {
 	int ret = 0;
@@ -2207,6 +2321,9 @@
 	if (v->rec_info.rec_enable)
 		voice_cvs_start_record(v, v->rec_info.rec_mode);
 
+	if (v->dtmf_rx_detect_en)
+		voice_send_dtmf_rx_detection_cmd(v, v->dtmf_rx_detect_en);
+
 	rtac_add_voice(voice_get_cvs_handle(v),
 		voice_get_cvp_handle(v),
 		v->dev_rx.port_id, v->dev_tx.port_id,
@@ -2445,6 +2562,10 @@
 	/* send stop voice cmd */
 	voice_send_stop_voice_cmd(v);
 
+	/* send stop dtmf detecton cmd */
+	if (v->dtmf_rx_detect_en)
+		voice_send_dtmf_rx_detection_cmd(v, 0);
+
 	/* Clear mute setting */
 	v->dev_tx.mute = common.default_mute_val;
 
@@ -3700,6 +3821,13 @@
 	common.mvs_info.private_data = private_data;
 }
 
+void voc_register_dtmf_rx_detection_cb(dtmf_rx_det_cb_fn dtmf_rx_ul_cb,
+				       void *private_data)
+{
+	common.dtmf_info.dtmf_rx_ul_cb = dtmf_rx_ul_cb;
+	common.dtmf_info.private_data = private_data;
+}
+
 void voc_config_vocoder(uint32_t media_type,
 			  uint32_t rate,
 			  uint32_t network_type,
@@ -3876,6 +4004,7 @@
 			case VSS_ISTREAM_CMD_STOP_PLAYBACK:
 			case VSS_ISTREAM_CMD_START_RECORD:
 			case VSS_ISTREAM_CMD_STOP_RECORD:
+			case VSS_ISTREAM_CMD_SET_RX_DTMF_DETECTION:
 				pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
 				v->cvs_state = CMD_STATUS_SUCCESS;
 				wake_up(&v->cvs_wait);
@@ -3947,8 +4076,30 @@
 	} else if (data->opcode ==  VOICE_EVT_GET_PARAM_ACK) {
 		rtac_make_voice_callback(RTAC_CVS, data->payload,
 					data->payload_size);
-	} else
+	} else if (data->opcode == VSS_ISTREAM_EVT_RX_DTMF_DETECTED) {
+		struct vss_istream_evt_rx_dtmf_detected *dtmf_rx_detected;
+		uint32_t *voc_pkt = data->payload;
+		uint32_t pkt_len = data->payload_size;
+
+		if ((voc_pkt != NULL) &&
+		    (pkt_len ==
+			sizeof(struct vss_istream_evt_rx_dtmf_detected))) {
+
+			dtmf_rx_detected =
+			(struct vss_istream_evt_rx_dtmf_detected *) voc_pkt;
+			pr_debug("RX_DTMF_DETECTED low_freq=%d high_freq=%d\n",
+				 dtmf_rx_detected->low_freq,
+				 dtmf_rx_detected->high_freq);
+			if (c->dtmf_info.dtmf_rx_ul_cb)
+				c->dtmf_info.dtmf_rx_ul_cb((uint8_t *)voc_pkt,
+					voc_get_session_name(v->session_id),
+					c->dtmf_info.private_data);
+		} else {
+			pr_err("Invalid packet\n");
+		}
+	} else {
 		pr_debug("Unknown opcode 0x%x\n", data->opcode);
+	}
 
 fail:
 	return 0;
@@ -3971,7 +4122,6 @@
 	v = voice_get_session(data->dest_port);
 	if (v == NULL) {
 		pr_err("%s: v is NULL\n", __func__);
-
 		return -EINVAL;
 	}
 
@@ -4141,6 +4291,7 @@
 		common.voice[i].dev_tx.port_id = 1;
 		common.voice[i].dev_rx.port_id = 0;
 		common.voice[i].sidetone_gain = 0x512;
+		common.voice[i].dtmf_rx_detect_en = 0;
 
 		common.voice[i].voc_state = VOC_INIT;
 
diff --git a/sound/soc/msm/qdsp6/q6voice.h b/sound/soc/msm/qdsp6/q6voice.h
index 34b1b52..2fc2266 100644
--- a/sound/soc/msm/qdsp6/q6voice.h
+++ b/sound/soc/msm/qdsp6/q6voice.h
@@ -567,6 +567,55 @@
 	/* Reserved, set to 0. */
 };
 
+/*
+ * Event sent by the stream to the client that enables Rx DTMF
+ * detection whenever DTMF is detected in the Rx path.
+ *
+ * The DTMF detection feature can only be used to detect DTMF
+ * frequencies as listed in the vss_istream_evt_rx_dtmf_detected_t
+ * structure.
+ */
+
+#define VSS_ISTREAM_EVT_RX_DTMF_DETECTED (0x0001101A)
+
+struct vss_istream_cmd_set_rx_dtmf_detection {
+	/*
+	 * Enables/disables Rx DTMF detection
+	 *
+	 * Possible values are
+	 * 0 - disable
+	 * 1 - enable
+	 *
+	 */
+	uint32_t enable;
+};
+
+#define VSS_ISTREAM_CMD_SET_RX_DTMF_DETECTION (0x00011027)
+
+struct vss_istream_evt_rx_dtmf_detected {
+	uint16_t low_freq;
+	/*
+	 * Detected low frequency. Possible values:
+	 * 697 Hz
+	 * 770 Hz
+	 * 852 Hz
+	 * 941 Hz
+	 */
+	uint16_t high_freq;
+	/*
+	 * Detected high frequency. Possible values:
+	 * 1209 Hz
+	 * 1336 Hz
+	 * 1477 Hz
+	 * 1633 Hz
+	 */
+};
+
+struct cvs_set_rx_dtmf_detection_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_set_rx_dtmf_detection cvs_dtmf_det;
+} __packed;
+
 struct cvs_create_passive_ctl_session_cmd {
 	struct apr_hdr hdr;
 	struct vss_istream_cmd_create_passive_control_session_t cvs_session;
@@ -858,6 +907,10 @@
 			 uint32_t *pkt_len,
 			 void *private_data);
 
+/* CB for DTMF RX Detection */
+typedef void (*dtmf_rx_det_cb_fn)(uint8_t *pkt,
+				  char *session,
+				  void *private_data);
 
 struct mvs_driver_info {
 	uint32_t media_type;
@@ -869,6 +922,11 @@
 	void *private_data;
 };
 
+struct dtmf_driver_info {
+	dtmf_rx_det_cb_fn dtmf_rx_ul_cb;
+	void *private_data;
+};
+
 struct incall_rec_info {
 	uint32_t rec_enable;
 	uint32_t rec_mode;
@@ -915,6 +973,8 @@
 	/* FENC enable value */
 	uint32_t fens_enable;
 
+	uint32_t dtmf_rx_detect_en;
+
 	struct voice_dev_route_state voc_route_state;
 
 	u16 session_id;
@@ -961,6 +1021,8 @@
 
 	struct mvs_driver_info mvs_info;
 
+	struct dtmf_driver_info dtmf_info;
+
 	struct voice_data voice[MAX_VOC_SESSIONS];
 };
 
@@ -968,6 +1030,9 @@
 			dl_cb_fn dl_cb,
 			void *private_data);
 
+void voc_register_dtmf_rx_detection_cb(dtmf_rx_det_cb_fn dtmf_rx_ul_cb,
+				       void *private_data);
+
 void voc_config_vocoder(uint32_t media_type,
 			uint32_t rate,
 			uint32_t network_type,
@@ -1005,11 +1070,20 @@
 int voc_enable_cvp(uint16_t session_id);
 int voc_set_route_flag(uint16_t session_id, uint8_t path_dir, uint8_t set);
 uint8_t voc_get_route_flag(uint16_t session_id, uint8_t path_dir);
+int voc_enable_dtmf_rx_detection(uint16_t session_id, uint32_t enable);
+void voc_disable_dtmf_det_on_active_sessions(void);
 
+#define MAX_SESSION_NAME_LEN 32
 #define VOICE_SESSION_NAME "Voice session"
 #define VOIP_SESSION_NAME "VoIP session"
 #define VOLTE_SESSION_NAME "VoLTE session"
 #define SGLTE_SESSION_NAME "SGLTE session"
+
+#define VOC_PATH_PASSIVE 0
+#define VOC_PATH_FULL 1
+#define VOC_PATH_VOLTE_PASSIVE 2
+#define VOC_PATH_SGLTE_PASSIVE 3
+
 uint16_t voc_get_session_id(char *name);
 
 int voc_start_playback(uint32_t set);
diff --git a/sound/soc/msm/qdsp6v2/audio_ocmem.c b/sound/soc/msm/qdsp6v2/audio_ocmem.c
index 9e08be3..f151e51 100644
--- a/sound/soc/msm/qdsp6v2/audio_ocmem.c
+++ b/sound/soc/msm/qdsp6v2/audio_ocmem.c
@@ -30,7 +30,7 @@
 
 #define AUDIO_OCMEM_BUF_SIZE (512 * SZ_1K)
 
-static int enable_ocmem_audio_voice;
+static int enable_ocmem_audio_voice = 1;
 module_param(enable_ocmem_audio_voice, int,
 			S_IRUGO | S_IWUSR | S_IWGRP);
 MODULE_PARM_DESC(enable_ocmem_audio_voice, "control OCMEM usage for audio/voice");
@@ -423,10 +423,10 @@
 	struct voice_ocmem_workdata *workdata = NULL;
 
 	if (enable) {
-		if (enable_ocmem_audio_voice)
-			audio_ocmem_lcl.ocmem_en = true;
-		else
+		if (!enable_ocmem_audio_voice)
 			audio_ocmem_lcl.ocmem_en = false;
+		else
+			audio_ocmem_lcl.ocmem_en = true;
 	}
 	if (audio_ocmem_lcl.ocmem_en) {
 		if (audio_ocmem_lcl.voice_ocmem_workqueue == NULL) {
@@ -527,10 +527,10 @@
 	struct audio_ocmem_workdata *workdata = NULL;
 
 	if (enable) {
-		if (enable_ocmem_audio_voice)
-			audio_ocmem_lcl.ocmem_en = true;
-		else
+		if (!enable_ocmem_audio_voice)
 			audio_ocmem_lcl.ocmem_en = false;
+		else
+			audio_ocmem_lcl.ocmem_en = true;
 	}
 
 	if (audio_ocmem_lcl.ocmem_en) {
@@ -611,7 +611,7 @@
 	atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_DEFAULT);
 	atomic_set(&audio_ocmem_lcl.audio_exit, 0);
 	spin_lock_init(&audio_ocmem_lcl.audio_lock);
-	audio_ocmem_lcl.ocmem_en = false;
+	audio_ocmem_lcl.ocmem_en = true;
 
 	/* populate platform data */
 	ret = audio_ocmem_platform_data_populate(pdev);
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
index 7ba6514..94c1c85 100644
--- a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
@@ -457,6 +457,7 @@
 		pr_debug("%s: Trigger start\n", __func__);
 		q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
 		atomic_set(&prtd->start, 1);
+		atomic_set(&prtd->pending_buffer, 1);
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
 		pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 7399053..621d24b 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -37,9 +37,39 @@
 	DECLARE_BITMAP(status_mask, STATUS_MAX);
 	u32 rate;
 	u32 channels;
+	u32 bitwidth;
 	union afe_port_config port_config;
 };
 
+struct msm_dai_q6_mi2s_dai_config {
+	u16 pdata_mi2s_lines;
+	struct msm_dai_q6_dai_data mi2s_dai_data;
+};
+
+struct msm_dai_q6_mi2s_dai_data {
+	struct msm_dai_q6_mi2s_dai_config tx_dai;
+	struct msm_dai_q6_mi2s_dai_config rx_dai;
+	struct snd_pcm_hw_constraint_list rate_constraint;
+	struct snd_pcm_hw_constraint_list bitwidth_constraint;
+};
+
+/* MI2S format field for AFE_PORT_CMD_I2S_CONFIG command
+ *  0: linear PCM
+ *  1: non-linear PCM
+ *  2: PCM data in IEC 60968 container
+ *  3: compressed data in IEC 60958 container
+ */
+static const char *const mi2s_format[] = {
+	"LPCM",
+	"Compr",
+	"LPCM-60958",
+	"Compr-60958"
+};
+
+static const struct soc_enum mi2s_config_enum[] = {
+	SOC_ENUM_SINGLE_EXT(4, mi2s_format),
+};
+
 static struct clk *pcm_src_clk;
 static struct clk *pcm_branch_clk;
 static struct clk *pcm_oe_src_clk;
@@ -1155,6 +1185,611 @@
 	.remove = msm_dai_q6_dai_remove,
 };
 
+static int msm_dai_q6_mi2s_format_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+	int value = ucontrol->value.integer.value[0];
+	dai_data->port_config.i2s.data_format = value;
+	pr_debug("%s: value = %d, channel = %d, line = %d\n",
+		 __func__, value, dai_data->port_config.i2s.mono_stereo,
+		 dai_data->port_config.i2s.channel_mode);
+	return 0;
+}
+
+static int msm_dai_q6_mi2s_format_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data;
+	ucontrol->value.integer.value[0] =
+		dai_data->port_config.i2s.data_format;
+	return 0;
+}
+
+static const struct snd_kcontrol_new mi2s_config_controls[] = {
+	SOC_ENUM_EXT("PRI MI2S RX Format", mi2s_config_enum[0],
+		     msm_dai_q6_mi2s_format_get,
+		     msm_dai_q6_mi2s_format_put),
+	SOC_ENUM_EXT("SEC RX Format", mi2s_config_enum[0],
+		     msm_dai_q6_mi2s_format_get,
+		     msm_dai_q6_mi2s_format_put),
+	SOC_ENUM_EXT("PRI MI2S TX Format", mi2s_config_enum[0],
+		     msm_dai_q6_mi2s_format_get,
+		     msm_dai_q6_mi2s_format_put),
+};
+
+static int msm_dai_q6_dai_mi2s_probe(struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
+			dev_get_drvdata(dai->dev);
+	struct snd_kcontrol *kcontrol = NULL;
+	int rc = 0;
+
+	if (mi2s_dai_data->rx_dai.mi2s_dai_data.port_config.i2s.channel_mode) {
+		kcontrol = snd_ctl_new1(&mi2s_config_controls[0],
+					&mi2s_dai_data->rx_dai.mi2s_dai_data);
+		rc = snd_ctl_add(dai->card->snd_card, kcontrol);
+
+		if (IS_ERR_VALUE(rc)) {
+			dev_err(dai->dev, "%s: err add RX fmt ctl\n", __func__);
+			goto rtn;
+		}
+	}
+	if (mi2s_dai_data->tx_dai.mi2s_dai_data.port_config.i2s.channel_mode) {
+		rc = snd_ctl_add(dai->card->snd_card,
+				snd_ctl_new1(&mi2s_config_controls[2],
+				&mi2s_dai_data->tx_dai.mi2s_dai_data));
+
+		if (IS_ERR_VALUE(rc)) {
+			if (kcontrol)
+				snd_ctl_remove(dai->card->snd_card, kcontrol);
+			dev_err(dai->dev, "%s: err add TX fmt ctl\n", __func__);
+		}
+	}
+rtn:
+	return rc;
+}
+
+
+static int msm_dai_q6_dai_mi2s_remove(struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
+		dev_get_drvdata(dai->dev);
+	int rc;
+
+	/* If AFE port is still up, close it */
+	if (test_bit(STATUS_PORT_STARTED,
+		     mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask)) {
+		rc = afe_close(MI2S_RX); /* can block */
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to close MI2S_RX port\n");
+		clear_bit(STATUS_PORT_STARTED,
+			  mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask);
+	}
+	if (test_bit(STATUS_PORT_STARTED,
+		     mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask)) {
+		rc = afe_close(MI2S_TX); /* can block */
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to close MI2S_TX port\n");
+		clear_bit(STATUS_PORT_STARTED,
+			  mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask);
+	}
+	kfree(mi2s_dai_data);
+	snd_soc_unregister_dai(dai->dev);
+	return 0;
+}
+
+static int msm_dai_q6_mi2s_startup(struct snd_pcm_substream *substream,
+				   struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
+		dev_get_drvdata(dai->dev);
+
+	dev_dbg(dai->dev, "%s: cnst list %p\n", __func__,
+		mi2s_dai_data->rate_constraint.list);
+
+	if (mi2s_dai_data->rate_constraint.list) {
+		snd_pcm_hw_constraint_list(substream->runtime, 0,
+				SNDRV_PCM_HW_PARAM_RATE,
+				&mi2s_dai_data->rate_constraint);
+		snd_pcm_hw_constraint_list(substream->runtime, 0,
+				SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
+				&mi2s_dai_data->bitwidth_constraint);
+	}
+
+	return 0;
+}
+
+
+static int msm_mi2s_get_port_id(u32 mi2s_id, int stream, u16 *port_id)
+{
+	int ret = 0;
+
+	switch (stream) {
+	case SNDRV_PCM_STREAM_PLAYBACK:
+		switch (mi2s_id) {
+		case MSM_PRIM_MI2S:
+			*port_id = MI2S_RX;
+		break;
+		default:
+			ret = -1;
+		break;
+		}
+	break;
+	case SNDRV_PCM_STREAM_CAPTURE:
+		switch (mi2s_id) {
+		case MSM_PRIM_MI2S:
+			*port_id = MI2S_TX;
+		break;
+		default:
+			ret = -1;
+		break;
+		}
+	break;
+	default:
+		ret = -1;
+	break;
+	}
+	pr_debug("%s: port_id = %x\n", __func__, *port_id);
+	return ret;
+}
+
+static int msm_dai_q6_mi2s_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
+		dev_get_drvdata(dai->dev);
+	struct msm_dai_q6_dai_data *dai_data =
+		(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+		 &mi2s_dai_data->rx_dai.mi2s_dai_data :
+		 &mi2s_dai_data->tx_dai.mi2s_dai_data);
+	u16 port_id = 0;
+	int rc = 0;
+
+	dev_dbg(dai->dev, "%s: device name %s dai id %x,port id = %x\n",
+		__func__, dai->name, dai->id, port_id);
+
+	if (msm_mi2s_get_port_id(dai->id, substream->stream,
+				 &port_id) != 0) {
+		dev_err(dai->dev, "%s: Invalid Port ID\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		/* PORT START should be set if prepare called
+		 * in active state.
+		 */
+		rc = afe_port_start(port_id, &dai_data->port_config,
+				    dai_data->rate);
+
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to open AFE port %x\n",
+				dai->id);
+		else
+			set_bit(STATUS_PORT_STARTED,
+				dai_data->status_mask);
+	}
+	return rc;
+}
+
+static int msm_dai_q6_mi2s_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
+		dev_get_drvdata(dai->dev);
+	struct msm_dai_q6_mi2s_dai_config *mi2s_dai_config =
+		(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+		&mi2s_dai_data->rx_dai : &mi2s_dai_data->tx_dai);
+	struct msm_dai_q6_dai_data *dai_data = &mi2s_dai_config->mi2s_dai_data;
+
+	dai_data->channels = params_channels(params);
+	switch (dai_data->channels) {
+	case 8:
+	case 7:
+		if (mi2s_dai_config->pdata_mi2s_lines < AFE_PORT_I2S_8CHS)
+			goto error_invalid_data;
+		dai_data->port_config.i2s.channel_mode = AFE_PORT_I2S_8CHS;
+		break;
+	case 6:
+	case 5:
+		if (mi2s_dai_config->pdata_mi2s_lines < AFE_PORT_I2S_6CHS)
+			goto error_invalid_data;
+		dai_data->port_config.i2s.channel_mode = AFE_PORT_I2S_6CHS;
+		break;
+	case 4:
+	case 3:
+		if (mi2s_dai_config->pdata_mi2s_lines < AFE_PORT_I2S_QUAD01)
+			goto error_invalid_data;
+		if (mi2s_dai_config->pdata_mi2s_lines == AFE_PORT_I2S_QUAD23)
+			dai_data->port_config.i2s.channel_mode =
+				mi2s_dai_config->pdata_mi2s_lines;
+		else
+			dai_data->port_config.i2s.channel_mode =
+					AFE_PORT_I2S_QUAD01;
+		break;
+	case 2:
+	case 1:
+		if (mi2s_dai_config->pdata_mi2s_lines < AFE_PORT_I2S_SD0)
+			goto error_invalid_data;
+		switch (mi2s_dai_config->pdata_mi2s_lines) {
+		case AFE_PORT_I2S_SD0:
+		case AFE_PORT_I2S_SD1:
+		case AFE_PORT_I2S_SD2:
+		case AFE_PORT_I2S_SD3:
+			dai_data->port_config.i2s.channel_mode =
+				mi2s_dai_config->pdata_mi2s_lines;
+			break;
+		case AFE_PORT_I2S_QUAD01:
+		case AFE_PORT_I2S_6CHS:
+		case AFE_PORT_I2S_8CHS:
+			dai_data->port_config.i2s.channel_mode =
+						AFE_PORT_I2S_SD0;
+			break;
+		case AFE_PORT_I2S_QUAD23:
+			dai_data->port_config.i2s.channel_mode =
+						AFE_PORT_I2S_SD2;
+			break;
+		}
+		if (dai_data->channels == 2)
+			dai_data->port_config.i2s.mono_stereo =
+						MSM_AFE_CH_STEREO;
+		else
+			dai_data->port_config.i2s.mono_stereo = MSM_AFE_MONO;
+		break;
+	default:
+		goto error_invalid_data;
+	}
+	dai_data->rate = params_rate(params);
+	dai_data->port_config.i2s.bit_width = 16;
+	dai_data->bitwidth = 16;
+	dai_data->port_config.i2s.i2s_cfg_minor_version =
+			AFE_API_VERSION_I2S_CONFIG;
+	dai_data->port_config.i2s.sample_rate = dai_data->rate;
+	if (!mi2s_dai_data->rate_constraint.list) {
+		mi2s_dai_data->rate_constraint.list = &dai_data->rate;
+		mi2s_dai_data->bitwidth_constraint.list = &dai_data->bitwidth;
+	}
+
+	pr_debug("%s: dai_data->channels = %d, line = %d\n"
+		 ",mono_stereo =%x sample rate = %x\n", __func__,
+		 dai_data->channels, dai_data->port_config.i2s.channel_mode,
+		 dai_data->port_config.i2s.mono_stereo, dai_data->rate);
+	return 0;
+error_invalid_data:
+	pr_debug("%s: dai_data->channels = %d, line = %d\n", __func__,
+		 dai_data->channels, dai_data->port_config.i2s.channel_mode);
+	return -EINVAL;
+}
+
+
+static int msm_dai_q6_mi2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
+	dev_get_drvdata(dai->dev);
+
+	if (test_bit(STATUS_PORT_STARTED,
+	    mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask) ||
+	    test_bit(STATUS_PORT_STARTED,
+	    mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask)) {
+		dev_err(dai->dev, "%s: err chg i2s mode while dai running",
+			__func__);
+		return -EPERM;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBS_CFS:
+		mi2s_dai_data->rx_dai.mi2s_dai_data.port_config.i2s.ws_src = 1;
+		mi2s_dai_data->tx_dai.mi2s_dai_data.port_config.i2s.ws_src = 1;
+		break;
+	case SND_SOC_DAIFMT_CBM_CFM:
+		mi2s_dai_data->rx_dai.mi2s_dai_data.port_config.i2s.ws_src = 0;
+		mi2s_dai_data->tx_dai.mi2s_dai_data.port_config.i2s.ws_src = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void msm_dai_q6_mi2s_shutdown(struct snd_pcm_substream *substream,
+				     struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
+			dev_get_drvdata(dai->dev);
+	struct msm_dai_q6_dai_data *dai_data =
+		(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+		 &mi2s_dai_data->rx_dai.mi2s_dai_data :
+		 &mi2s_dai_data->tx_dai.mi2s_dai_data);
+	 u16 port_id = 0;
+	int rc = 0;
+
+	if (msm_mi2s_get_port_id(dai->id, substream->stream,
+				 &port_id) != 0) {
+		dev_err(dai->dev, "%s: Invalid Port ID\n", __func__);
+	}
+
+	dev_dbg(dai->dev, "%s: device name %s port id = %x\n",
+		__func__, dai->name, port_id);
+
+	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		rc = afe_close(port_id);
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to close AFE port\n");
+		clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
+	}
+
+	if (!test_bit(STATUS_PORT_STARTED,
+			mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask) &&
+		!test_bit(STATUS_PORT_STARTED,
+			mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask)) {
+		mi2s_dai_data->rate_constraint.list = NULL;
+		mi2s_dai_data->bitwidth_constraint.list = NULL;
+	}
+}
+
+static struct snd_soc_dai_ops msm_dai_q6_mi2s_ops = {
+	.startup	= msm_dai_q6_mi2s_startup,
+	.prepare	= msm_dai_q6_mi2s_prepare,
+	.hw_params	= msm_dai_q6_mi2s_hw_params,
+	.set_fmt	= msm_dai_q6_mi2s_set_fmt,
+	.shutdown	= msm_dai_q6_mi2s_shutdown,
+};
+
+/* Channel min and max are initialized base on platform data */
+static struct snd_soc_dai_driver msm_dai_q6_mi2s_dai = {
+	.playback = {
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+		SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.rate_min =     8000,
+		.rate_max =     48000,
+	},
+	.capture = {
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+		SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.rate_min =     8000,
+		.rate_max =     48000,
+	},
+	.ops = &msm_dai_q6_mi2s_ops,
+	.probe = msm_dai_q6_dai_mi2s_probe,
+	.remove = msm_dai_q6_dai_mi2s_remove,
+};
+
+
+static int msm_dai_q6_mi2s_get_lineconfig(u16 sd_lines, u16 *config_ptr,
+					  unsigned int *ch_cnt)
+{
+	u8 num_of_sd_lines;
+
+	num_of_sd_lines = num_of_bits_set(sd_lines);
+	switch (num_of_sd_lines) {
+	case 0:
+		pr_debug("%s: no line is assigned\n", __func__);
+		break;
+	case 1:
+		switch (sd_lines) {
+		case MSM_MI2S_SD0:
+			*config_ptr = AFE_PORT_I2S_SD0;
+			break;
+		case MSM_MI2S_SD1:
+			*config_ptr = AFE_PORT_I2S_SD1;
+			break;
+		case MSM_MI2S_SD2:
+			*config_ptr = AFE_PORT_I2S_SD2;
+			break;
+		case MSM_MI2S_SD3:
+			*config_ptr = AFE_PORT_I2S_SD3;
+			break;
+		default:
+			pr_err("%s: invalid SD line\n",
+				   __func__);
+			goto error_invalid_data;
+		}
+		break;
+	case 2:
+		switch (sd_lines) {
+		case MSM_MI2S_SD0 | MSM_MI2S_SD1:
+			*config_ptr = AFE_PORT_I2S_QUAD01;
+			break;
+		case MSM_MI2S_SD2 | MSM_MI2S_SD3:
+			*config_ptr = AFE_PORT_I2S_QUAD23;
+			break;
+		default:
+			pr_err("%s: invalid SD line\n",
+				   __func__);
+			goto error_invalid_data;
+		}
+		break;
+	case 3:
+		switch (sd_lines) {
+		case MSM_MI2S_SD0 | MSM_MI2S_SD1 | MSM_MI2S_SD2:
+			*config_ptr = AFE_PORT_I2S_6CHS;
+			break;
+		default:
+			pr_err("%s: invalid SD lines\n",
+				   __func__);
+			goto error_invalid_data;
+		}
+		break;
+	case 4:
+		switch (sd_lines) {
+		case MSM_MI2S_SD0 | MSM_MI2S_SD1 | MSM_MI2S_SD2 | MSM_MI2S_SD3:
+			*config_ptr = AFE_PORT_I2S_8CHS;
+			break;
+		default:
+			pr_err("%s: invalid SD lines\n",
+				   __func__);
+			goto error_invalid_data;
+		}
+		break;
+	default:
+		pr_err("%s: invalid SD lines\n", __func__);
+		goto error_invalid_data;
+	}
+	*ch_cnt = num_of_sd_lines;
+	return 0;
+
+error_invalid_data:
+	return -EINVAL;
+}
+
+static int msm_dai_q6_mi2s_platform_data_validation(
+	struct platform_device *pdev, struct snd_soc_dai_driver *dai_driver)
+{
+	struct msm_dai_q6_mi2s_dai_data *dai_data = dev_get_drvdata(&pdev->dev);
+	struct msm_mi2s_pdata *mi2s_pdata =
+			(struct msm_mi2s_pdata *) pdev->dev.platform_data;
+	unsigned int ch_cnt;
+	int rc = 0;
+	u16 sd_line;
+
+	if (mi2s_pdata == NULL) {
+		pr_err("%s: mi2s_pdata NULL", __func__);
+		return -EINVAL;
+	}
+
+	rc = msm_dai_q6_mi2s_get_lineconfig(mi2s_pdata->rx_sd_lines,
+					    &sd_line, &ch_cnt);
+
+	if (IS_ERR_VALUE(rc)) {
+		dev_err(&pdev->dev, "invalid MI2S RX sd line config\n");
+		goto rtn;
+	}
+
+	if (ch_cnt) {
+		dai_data->rx_dai.mi2s_dai_data.port_config.i2s.channel_mode =
+				mi2s_pdata->rx_sd_lines;
+		dai_data->rx_dai.pdata_mi2s_lines = mi2s_pdata->rx_sd_lines;
+		dai_driver->playback.channels_min = 1;
+		dai_driver->playback.channels_max = ch_cnt << 1;
+	} else {
+		dai_driver->playback.channels_min = 0;
+		dai_driver->playback.channels_max = 0;
+	}
+	rc = msm_dai_q6_mi2s_get_lineconfig(mi2s_pdata->tx_sd_lines,
+					    &sd_line, &ch_cnt);
+
+	if (IS_ERR_VALUE(rc)) {
+		dev_err(&pdev->dev, "invalid MI2S TX sd line config\n");
+		goto rtn;
+	}
+
+	if (ch_cnt) {
+		dai_data->tx_dai.mi2s_dai_data.port_config.i2s.channel_mode =
+			mi2s_pdata->tx_sd_lines;
+		dai_data->tx_dai.pdata_mi2s_lines = mi2s_pdata->tx_sd_lines;
+		dai_driver->capture.channels_min = 1;
+		dai_driver->capture.channels_max = ch_cnt << 1;
+	} else {
+		dai_driver->capture.channels_min = 0;
+		dai_driver->capture.channels_max = 0;
+	}
+
+	dev_dbg(&pdev->dev, "%s: playback sdline %x capture sdline %x\n",
+		__func__, dai_data->rx_dai.pdata_mi2s_lines,
+		dai_data->tx_dai.pdata_mi2s_lines);
+	dev_dbg(&pdev->dev, "%s: playback ch_max %d capture ch_mx %d\n",
+		__func__, dai_driver->playback.channels_max,
+		dai_driver->capture.channels_max);
+rtn:
+	return rc;
+}
+
+static __devinit int msm_dai_q6_mi2s_dev_probe(struct platform_device *pdev)
+{
+	struct msm_dai_q6_mi2s_dai_data *dai_data;
+	const char *q6_mi2s_dev_id = "qcom,msm-dai-q6-mi2s-dev-id";
+	u32 tx_line = 0;
+	u32  rx_line = 0;
+	u32 mi2s_intf = 0;
+	struct msm_mi2s_pdata *mi2s_pdata;
+	int rc = 0;
+
+
+	rc = of_property_read_u32(pdev->dev.of_node, q6_mi2s_dev_id,
+				  &mi2s_intf);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: missing %x in dt node\n", __func__, mi2s_intf);
+	return rc;
+	}
+
+	if (mi2s_intf > MSM_QUAD_MI2S) {
+		dev_err(&pdev->dev, "%s: Invalid MI2S ID from Device Tree\n",
+			 __func__);
+		return -EINVAL;
+	}
+
+	if (mi2s_intf == MSM_PRIM_MI2S) {
+		dev_set_name(&pdev->dev, "%s.%d", "msm-dai-q6-mi2s",
+			     MSM_PRIM_MI2S);
+		pdev->id = MSM_PRIM_MI2S;
+	}
+
+	mi2s_pdata = kzalloc(sizeof(struct msm_mi2s_pdata), GFP_KERNEL);
+	if (!mi2s_pdata) {
+		dev_err(&pdev->dev, "fail to allocate mi2s_pdata data\n");
+		rc = -ENOMEM;
+		goto rtn;
+	}
+
+	dev_dbg(&pdev->dev, "dev name %s dev id %x\n", dev_name(&pdev->dev),
+		pdev->id);
+
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,msm-mi2s-rx-lines",
+				  &rx_line);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: Rx line from DT file %s\n", __func__,
+			"qcom,msm-mi2s-rx-lines");
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,msm-mi2s-tx-lines",
+				  &tx_line);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: Tx line from DT file %s\n", __func__,
+			"qcom,msm-mi2s-tx-lines");
+		return rc;
+	}
+	dev_dbg(&pdev->dev, "dev name %s Rx line %x , Tx ine %x\n",
+		dev_name(&pdev->dev), rx_line, tx_line);
+	mi2s_pdata->rx_sd_lines = rx_line;
+	mi2s_pdata->tx_sd_lines = tx_line;
+	dai_data = kzalloc(sizeof(struct msm_dai_q6_mi2s_dai_data),
+				GFP_KERNEL);
+	if (!dai_data) {
+		dev_err(&pdev->dev, "fail to allocate dai data\n");
+		rc = -ENOMEM;
+		goto rtn;
+	} else
+		dev_set_drvdata(&pdev->dev, dai_data);
+	pdev->dev.platform_data = mi2s_pdata;
+	rc = msm_dai_q6_mi2s_platform_data_validation(pdev,
+					&msm_dai_q6_mi2s_dai);
+	if (IS_ERR_VALUE(rc))
+		goto err_pdata;
+	dai_data->rate_constraint.count = 1;
+	dai_data->bitwidth_constraint.count = 1;
+	rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_mi2s_dai);
+	if (IS_ERR_VALUE(rc))
+		goto err_pdata;
+	return 0;
+err_pdata:
+	dev_err(&pdev->dev, "fail to msm_dai_q6_mi2s_dev_probe\n");
+	kfree(dai_data);
+rtn:
+	return rc;
+}
+
+static __devexit int msm_dai_q6_mi2s_dev_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_dai(&pdev->dev);
+	return 0;
+}
+
 static int msm_dai_q6_dev_probe(struct platform_device *pdev)
 {
 	int rc, id;
@@ -1281,6 +1916,57 @@
 	},
 };
 
+static int msm_dai_mi2s_q6_probe(struct platform_device *pdev)
+{
+	int rc;
+	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: failed to add child nodes, rc=%d\n",
+			__func__, rc);
+	} else
+		dev_dbg(&pdev->dev, "%s: added child node\n", __func__);
+	return rc;
+}
+
+static int msm_dai_mi2s_q6_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id msm_dai_mi2s_dt_match[] = {
+	{ .compatible = "qcom,msm-dai-mi2s", },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(of, msm_dai_mi2s_dt_match);
+
+static struct platform_driver msm_dai_mi2s_q6 = {
+	.probe  = msm_dai_mi2s_q6_probe,
+	.remove = msm_dai_mi2s_q6_remove,
+	.driver = {
+		.name = "msm-dai-mi2s",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_dai_mi2s_dt_match,
+	},
+};
+
+static const struct of_device_id msm_dai_q6_mi2s_dev_dt_match[] = {
+	{ .compatible = "qcom,msm-dai-q6-mi2s", },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(of, msm_dai_q6_mi2s_dev_dt_match);
+
+static struct platform_driver msm_dai_q6_mi2s_driver = {
+	.probe  = msm_dai_q6_mi2s_dev_probe,
+	.remove  = __devexit_p(msm_dai_q6_mi2s_dev_remove),
+	.driver = {
+		.name = "msm-dai-q6-mi2s",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_dai_q6_mi2s_dev_dt_match,
+	},
+};
+
 static int __init msm_dai_q6_init(void)
 {
 	int rc;
@@ -1293,26 +1979,44 @@
 
 	if (rc) {
 		pr_err("%s: fail to register cpu dai driver\n", __func__);
-		platform_driver_unregister(&msm_auxpcm_dev_driver);
-		goto fail;
+		goto aux_pcm_resource_fail;
 	}
 
 	rc = platform_driver_register(&msm_dai_q6);
 	if (rc) {
 		pr_err("%s: fail to register dai q6 driver", __func__);
-		platform_driver_unregister(&msm_auxpcm_dev_driver);
-		platform_driver_unregister(&msm_auxpcm_resource_driver);
-		goto fail;
+		goto dai_q6_fail;
 	}
 
 	rc = platform_driver_register(&msm_dai_q6_dev);
 	if (rc) {
 		pr_err("%s: fail to register dai q6 dev driver", __func__);
-		platform_driver_unregister(&msm_dai_q6);
-		platform_driver_unregister(&msm_auxpcm_dev_driver);
-		platform_driver_unregister(&msm_auxpcm_resource_driver);
-		goto fail;
+		goto dai_q6_dev_fail;
 	}
+
+	rc = platform_driver_register(&msm_dai_q6_mi2s_driver);
+	if (rc) {
+		pr_err("%s: fail to register dai MI2S dev drv\n", __func__);
+		goto dai_q6_mi2s_drv_fail;
+	}
+
+	rc = platform_driver_register(&msm_dai_mi2s_q6);
+	if (rc) {
+		pr_err("%s: fail to register dai MI2S\n", __func__);
+		goto dai_mi2s_q6_fail;
+	}
+	return rc;
+
+dai_mi2s_q6_fail:
+	platform_driver_unregister(&msm_dai_q6_mi2s_driver);
+dai_q6_mi2s_drv_fail:
+	platform_driver_unregister(&msm_dai_q6_dev);
+dai_q6_dev_fail:
+	platform_driver_unregister(&msm_dai_q6);
+dai_q6_fail:
+	platform_driver_unregister(&msm_auxpcm_resource_driver);
+aux_pcm_resource_fail:
+	platform_driver_unregister(&msm_auxpcm_dev_driver);
 fail:
 	return rc;
 }
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 4ab3340..2e0c229 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -1352,6 +1352,9 @@
 	SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_MI2S_RX,
 	MSM_BACKEND_DAI_SLIMBUS_1_TX, 1, 0, msm_routing_get_port_mixer,
 	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
 };
 
 static const struct snd_kcontrol_new fm_switch_mixer_controls =
@@ -1608,6 +1611,9 @@
 	SND_SOC_DAPM_AIF_OUT("MI2S_UL_HL", "MI2S_TX_HOSTLESS Capture",
 		0, 0, 0, 0),
 
+	SND_SOC_DAPM_AIF_OUT("MI2S_DL_HL", "MI2S_RX_HOSTLESS Playback",
+		0, 0, 0, 0),
+
 	/* Backend AIF */
 	/* Stream name equals to backend dai link stream name
 	*/
@@ -1933,6 +1939,8 @@
 	{"INTFM_UL_HL", NULL, "INT_FM_TX"},
 	{"AUX_PCM_RX", NULL, "AUXPCM_DL_HL"},
 	{"AUXPCM_UL_HL", NULL, "AUX_PCM_TX"},
+	{"MI2S_RX", NULL, "MI2S_DL_HL"},
+	{"MI2S_UL_HL", NULL, "MI2S_TX"},
 	{"PCM_RX_DL_HL", "Switch", "SLIM0_DL_HL"},
 	{"PCM_RX", NULL, "PCM_RX_DL_HL"},
 	{"MI2S_UL_HL", NULL, "MI2S_TX"},
@@ -1961,6 +1969,9 @@
 	{"SLIMBUS_1_RX Mixer", "Voice Stub", "VOICE_STUB_DL"},
 	{"SLIMBUS_1_RX", NULL, "SLIMBUS_1_RX Mixer"},
 	{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"MI2S_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"},
+	{"MI2S_RX", NULL, "MI2S_RX_Voice Mixer"},
 	{"MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
 	{"MI2S_RX", NULL, "MI2S_RX_Voice Mixer"},
 
@@ -1983,6 +1994,7 @@
 	{"SEC_I2S_RX", NULL, "SEC_I2S_RX Port Mixer"},
 
 	{"MI2S_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
+	{"MI2S_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
 	{"MI2S_RX", NULL, "MI2S_RX Port Mixer"},
 	/* Backend Enablement */
 
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 6acc136..cc69123 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -120,10 +120,10 @@
 			}
 			switch (payload[0]) {
 			case ADM_CMD_SET_PP_PARAMS_V5:
+				pr_debug("%s: ADM_CMD_SET_PP_PARAMS_V5\n",
+					__func__);
 				if (rtac_make_adm_callback(
 					payload, data->payload_size)) {
-					pr_debug("%s: payload[0]: 0x%x\n",
-						__func__, payload[0]);
 					break;
 				}
 			case ADM_CMD_DEVICE_CLOSE_V5:
@@ -148,6 +148,20 @@
 					wake_up(&this_adm.wait[index]);
 				}
 				break;
+			case ADM_CMD_GET_PP_PARAMS_V5:
+				pr_debug("%s: ADM_CMD_GET_PP_PARAMS_V5\n",
+					__func__);
+				/* Should only come here if there is an APR */
+				/* error or malformed APR packet. Otherwise */
+				/* response will be returned as */
+				/* ADM_CMDRSP_GET_PP_PARAMS_V5 */
+				if (payload[1] != 0) {
+					pr_err("%s: ADM get param error = %d, resuming\n",
+						__func__, payload[1]);
+					rtac_make_adm_callback(payload,
+						data->payload_size);
+				}
+				break;
 			default:
 				pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
 								payload[0]);
@@ -174,8 +188,11 @@
 			wake_up(&this_adm.wait[index]);
 			}
 			break;
-		case ADM_CMD_GET_PP_PARAMS_V5:
-			pr_debug("%s: ADM_CMD_GET_PP_PARAMS_V5\n", __func__);
+		case ADM_CMDRSP_GET_PP_PARAMS_V5:
+			pr_debug("%s: ADM_CMDRSP_GET_PP_PARAMS_V5\n", __func__);
+			if (payload[0] != 0)
+				pr_err("%s: ADM_CMDRSP_GET_PP_PARAMS_V5 returned error = 0x%x\n",
+					__func__, payload[0]);
 			rtac_make_adm_callback(payload,
 				data->payload_size);
 			break;
@@ -669,6 +686,11 @@
 	for (i = 0; i < num_copps; i++)
 		send_adm_cal(port_id[i], path);
 
+	for (i = 0; i < num_copps; i++)
+		rtac_add_adm_device(port_id[i],	atomic_read(&this_adm.copp_id
+			[afe_get_port_index(port_id[i])]),
+			path, session_id);
+
 fail_cmd:
 	kfree(matrix_map);
 	return ret;
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index 4819e0a..8d8ff5d 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -630,6 +630,11 @@
 	int ret = 0;
 	int index = 0;
 
+	if (rx_port == MI2S_RX)
+		rx_port = AFE_PORT_ID_PRIMARY_MI2S_RX;
+	if (tx_port == MI2S_TX)
+		tx_port = AFE_PORT_ID_PRIMARY_MI2S_TX;
+
 	ret = afe_q6_interface_prepare();
 	if (ret != 0)
 		return ret;
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 2d52c43..0ddaabe 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -932,6 +932,10 @@
 			__func__, payload[0], payload[1]);
 	if (data->opcode == APR_BASIC_RSP_RESULT) {
 		token = data->token;
+		if (payload[1] != 0) {
+			pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+				__func__, payload[0], payload[1]);
+		}
 		switch (payload[0]) {
 		case ASM_STREAM_CMD_SET_PP_PARAMS_V2:
 			if (rtac_make_asm_callback(ac->session, payload,
@@ -965,6 +969,20 @@
 				ac->cb(data->opcode, data->token,
 					(uint32_t *)data->payload, ac->priv);
 			break;
+		case ASM_STREAM_CMD_GET_PP_PARAMS_V2:
+			pr_debug("%s: ASM_STREAM_CMD_GET_PP_PARAMS_V2\n",
+				__func__);
+			/* Should only come here if there is an APR */
+			/* error or malformed APR packet. Otherwise */
+			/* response will be returned as */
+			/* ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2 */
+			if (payload[1] != 0) {
+				pr_err("%s: ASM get param error = %d, resuming\n",
+					__func__, payload[1]);
+				rtac_make_asm_callback(ac->session, payload,
+							data->payload_size);
+			}
+			break;
 		default:
 			pr_debug("%s:command[0x%x] not expecting rsp\n",
 							__func__, payload[0]);
@@ -1008,6 +1026,10 @@
 		break;
 	}
 	case ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2:
+		pr_debug("%s: ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2\n", __func__);
+			if (payload[0] != 0)
+				pr_err("%s: ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2 returned error = 0x%x\n",
+					__func__, payload[0]);
 		rtac_make_asm_callback(ac->session, payload,
 			data->payload_size);
 		break;
@@ -2347,7 +2369,7 @@
 		buf_node = list_entry(ptr, struct asm_buffer_node,
 						list);
 		if (buf_node->buf_addr_lsw == buf_add) {
-			pr_info("%s: Found the element\n", __func__);
+			pr_debug("%s: Found the element\n", __func__);
 			mem_unmap.mem_map_handle = buf_node->mmap_hdl;
 			break;
 		}
@@ -2365,7 +2387,7 @@
 	rc = wait_event_timeout(ac->cmd_wait,
 			(atomic_read(&ac->cmd_state) == 0), 5 * HZ);
 	if (!rc) {
-		pr_err("timeout. waited for memory_map\n");
+		pr_err("timeout. waited for memory_unmap\n");
 		rc = -EINVAL;
 		goto fail_cmd;
 	}
@@ -2375,6 +2397,7 @@
 		if (buf_node->buf_addr_lsw == buf_add) {
 			list_del(&buf_node->list);
 			kfree(buf_node);
+			break;
 		}
 	}
 
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index 338cfe3..b799e59 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -1,4 +1,4 @@
-/*  Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/*  Copyright (c) 2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,7 @@
 #include <mach/qdsp6v2/audio_acdb.h>
 #include <mach/qdsp6v2/rtac.h>
 #include <mach/socinfo.h>
+#include <mach/qdsp6v2/apr_tal.h>
 
 #include "sound/apr_audio-v2.h"
 #include "sound/q6afe-v2.h"
@@ -208,6 +209,8 @@
 
 static int voice_apr_register(void)
 {
+	void *modem_mvm, *modem_cvs, *modem_cvp;
+
 	pr_debug("%s\n", __func__);
 
 	mutex_lock(&common.common_lock);
@@ -224,6 +227,18 @@
 			pr_err("%s: Unable to register MVM\n", __func__);
 			goto err;
 		}
+
+		/*
+		 * Register with modem for SSR callback. The APR handle
+		 * is not stored since it is used only to receive notifications
+		 * and not for communication
+		 */
+		modem_mvm = apr_register("MODEM", "MVM",
+						qdsp_mvm_callback,
+						0xFFFFFFFF, &common);
+		if (modem_mvm == NULL)
+			pr_err("%s: Unable to register MVM for MODEM\n",
+					__func__);
 	}
 
 	if (common.apr_q6_cvs == NULL) {
@@ -237,6 +252,18 @@
 			pr_err("%s: Unable to register CVS\n", __func__);
 			goto err;
 		}
+		rtac_set_voice_handle(RTAC_CVS, common.apr_q6_cvs);
+		/*
+		 * Register with modem for SSR callback. The APR handle
+		 * is not stored since it is used only to receive notifications
+		 * and not for communication
+		 */
+		modem_cvs = apr_register("MODEM", "CVS",
+						qdsp_cvs_callback,
+						0xFFFFFFFF, &common);
+		 if (modem_cvs == NULL)
+			pr_err("%s: Unable to register CVS for MODEM\n",
+					__func__);
 
 	}
 
@@ -251,6 +278,18 @@
 			pr_err("%s: Unable to register CVP\n", __func__);
 			goto err;
 		}
+		rtac_set_voice_handle(RTAC_CVP, common.apr_q6_cvp);
+		/*
+		 * Register with modem for SSR callback. The APR handle
+		 * is not stored since it is used only to receive notifications
+		 * and not for communication
+		 */
+		modem_cvp = apr_register("MODEM", "CVP",
+						qdsp_cvp_callback,
+						0xFFFFFFFF, &common);
+		if (modem_cvp == NULL)
+			pr_err("%s: Unable to register CVP for MODEM\n",
+					__func__);
 
 	}
 
@@ -262,6 +301,7 @@
 	if (common.apr_q6_cvs != NULL) {
 		apr_deregister(common.apr_q6_cvs);
 		common.apr_q6_cvs = NULL;
+		rtac_set_voice_handle(RTAC_CVS, NULL);
 	}
 	if (common.apr_q6_mvm != NULL) {
 		apr_deregister(common.apr_q6_mvm);
@@ -605,8 +645,9 @@
 	cvs_handle = voice_get_cvs_handle(v);
 
 	/* MVM, CVS sessions are destroyed only for Full control sessions. */
-	if (is_voip_session(v->session_id)) {
-		pr_debug("%s: MVM detach stream\n", __func__);
+	if (is_voip_session(v->session_id) || v->voc_state == VOC_ERROR) {
+		pr_debug("%s: MVM detach stream, VOC_STATE: %d\n", __func__,
+				v->voc_state);
 
 		/* Detach voice stream. */
 		detach_stream.hdr.hdr_field =
@@ -2176,6 +2217,10 @@
 	if (v->rec_info.rec_enable)
 		voice_cvs_start_record(v, v->rec_info.rec_mode);
 
+	rtac_add_voice(voice_get_cvs_handle(v),
+		voice_get_cvp_handle(v),
+		v->dev_rx.port_id, v->dev_tx.port_id,
+		v->session_id);
 
 	return 0;
 
@@ -2526,6 +2571,7 @@
 		goto fail;
 	}
 
+	rtac_remove_voice(voice_get_cvs_handle(v));
 	cvp_handle = 0;
 	voice_set_cvp_handle(v, cvp_handle);
 	return 0;
@@ -3281,6 +3327,7 @@
 	mutex_lock(&v->lock);
 
 	if (v->voc_state == VOC_RUN) {
+		rtac_remove_voice(voice_get_cvs_handle(v));
 		/* send cmd to dsp to disable vocproc */
 		ret = voice_send_disable_vocproc_cmd(v);
 		if (ret < 0) {
@@ -3324,32 +3371,36 @@
 		voice_send_cvp_register_cal_cmd(v);
 		voice_send_cvp_register_vol_cal_cmd(v);
 
-	ret = voice_send_enable_vocproc_cmd(v);
-	if (ret < 0) {
-		pr_err("%s: enable vocproc failed %d\n", __func__, ret);
-		goto fail;
-	}
+		ret = voice_send_enable_vocproc_cmd(v);
+		if (ret < 0) {
+			pr_err("%s: enable vocproc failed %d\n", __func__, ret);
+			goto fail;
+		}
 
-	/* Send tty mode if tty device is used */
-	voice_send_tty_mode_cmd(v);
+		/* Send tty mode if tty device is used */
+		voice_send_tty_mode_cmd(v);
 
-	/* enable widevoice if wv_enable is set */
-	if (v->wv_enable)
-		voice_send_set_widevoice_enable_cmd(v);
+		/* enable widevoice if wv_enable is set */
+		if (v->wv_enable)
+			voice_send_set_widevoice_enable_cmd(v);
 
-	/* enable slowtalk */
-	if (v->st_enable)
-		voice_send_set_pp_enable_cmd(v,
+		/* enable slowtalk */
+		if (v->st_enable)
+			voice_send_set_pp_enable_cmd(v,
 					     MODULE_ID_VOICE_MODULE_ST,
 					     v->st_enable);
 
-	/* enable FENS */
-	if (v->fens_enable)
-		voice_send_set_pp_enable_cmd(v,
+		/* enable FENS */
+		if (v->fens_enable)
+			voice_send_set_pp_enable_cmd(v,
 					     MODULE_ID_VOICE_MODULE_FENS,
 					     v->fens_enable);
 
-	v->voc_state = VOC_RUN;
+		rtac_add_voice(voice_get_cvs_handle(v),
+			voice_get_cvp_handle(v),
+			v->dev_rx.port_id, v->dev_tx.port_id,
+			v->session_id);
+		v->voc_state = VOC_RUN;
 	}
 
 fail:
@@ -3702,7 +3753,9 @@
 
 	mutex_lock(&v->lock);
 
-	if (v->voc_state == VOC_RUN) {
+	if (v->voc_state == VOC_RUN || v->voc_state == VOC_ERROR) {
+		pr_debug("%s: VOC_STATE: %d\n", __func__, v->voc_state);
+
 		ret = voice_destroy_vocproc(v);
 		if (ret < 0)
 			pr_err("%s:  destroy voice failed\n", __func__);
@@ -3727,6 +3780,13 @@
 
 	mutex_lock(&v->lock);
 
+	if (v->voc_state == VOC_ERROR) {
+		pr_debug("%s: VOC in ERR state\n", __func__);
+
+		voice_destroy_mvm_cvs_session(v);
+		v->voc_state = VOC_INIT;
+	}
+
 	if ((v->voc_state == VOC_INIT) ||
 		(v->voc_state == VOC_RELEASE)) {
 		ret = voice_apr_register();
@@ -3817,6 +3877,7 @@
 	struct common_data *c = NULL;
 	struct voice_data *v = NULL;
 	int i = 0;
+	uint16_t session_id = 0;
 
 	if ((data == NULL) || (priv == NULL)) {
 		pr_err("%s: data or priv is NULL\n", __func__);
@@ -3825,6 +3886,36 @@
 
 	c = priv;
 
+	pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
+		data->payload_size, data->opcode);
+
+	if (data->opcode == RESET_EVENTS) {
+
+		if (data->reset_proc == APR_DEST_MODEM) {
+			pr_debug("%s: Received MODEM reset event\n", __func__);
+
+			session_id = voc_get_session_id(VOICE_SESSION_NAME);
+			v = voice_get_session(session_id);
+			if (v != NULL)
+				v->voc_state = VOC_ERROR;
+
+			session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+			v = voice_get_session(session_id);
+			if (v != NULL)
+				v->voc_state = VOC_ERROR;
+		} else {
+			pr_debug("%s: Reset event received in Voice service\n",
+				__func__);
+			apr_reset(c->apr_q6_mvm);
+			c->apr_q6_mvm = NULL;
+
+			/* Sub-system restart is applicable to all sessions. */
+			for (i = 0; i < MAX_VOC_SESSIONS; i++)
+				c->voice[i].mvm_handle = 0;
+		}
+		return 0;
+	}
+
 	pr_debug("%s: session_id 0x%x\n", __func__, data->dest_port);
 
 	v = voice_get_session(data->dest_port);
@@ -3834,23 +3925,6 @@
 		return -EINVAL;
 	}
 
-	pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
-		data->payload_size, data->opcode);
-
-	if (data->opcode == RESET_EVENTS) {
-		pr_debug("%s: Reset event received in Voice service\n",
-			 __func__);
-
-		apr_reset(c->apr_q6_mvm);
-		c->apr_q6_mvm = NULL;
-
-		/* Sub-system restart is applicable to all sessions. */
-		for (i = 0; i < MAX_VOC_SESSIONS; i++)
-			c->voice[i].mvm_handle = 0;
-
-		return 0;
-	}
-
 	if (data->opcode == APR_BASIC_RSP_RESULT) {
 		if (data->payload_size) {
 			ptr = data->payload;
@@ -3935,6 +4009,7 @@
 	struct common_data *c = NULL;
 	struct voice_data *v = NULL;
 	int i = 0;
+	uint16_t session_id = 0;
 
 	if ((data == NULL) || (priv == NULL)) {
 		pr_err("%s: data or priv is NULL\n", __func__);
@@ -3944,6 +4019,35 @@
 	c = priv;
 
 	pr_debug("%s: session_id 0x%x\n", __func__, data->dest_port);
+	pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
+		data->payload_size, data->opcode);
+
+	if (data->opcode == RESET_EVENTS) {
+		if (data->reset_proc == APR_DEST_MODEM) {
+			pr_debug("%s: Received Modem reset event\n", __func__);
+
+			session_id = voc_get_session_id(VOICE_SESSION_NAME);
+			v = voice_get_session(session_id);
+			if (v != NULL)
+				v->voc_state = VOC_ERROR;
+
+			session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+			v = voice_get_session(session_id);
+			if (v != NULL)
+				v->voc_state = VOC_ERROR;
+		} else {
+			pr_debug("%s: Reset event received in Voice service\n",
+				 __func__);
+
+			apr_reset(c->apr_q6_cvs);
+			c->apr_q6_cvs = NULL;
+
+			/* Sub-system restart is applicable to all sessions. */
+			for (i = 0; i < MAX_VOC_SESSIONS; i++)
+				c->voice[i].cvs_handle = 0;
+		}
+		return 0;
+	}
 
 	v = voice_get_session(data->dest_port);
 	if (v == NULL) {
@@ -3952,28 +4056,15 @@
 		return -EINVAL;
 	}
 
-	pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
-		data->payload_size, data->opcode);
-
-	if (data->opcode == RESET_EVENTS) {
-		pr_debug("%s: Reset event received in Voice service\n",
-			 __func__);
-
-		apr_reset(c->apr_q6_cvs);
-		c->apr_q6_cvs = NULL;
-
-		/* Sub-system restart is applicable to all sessions. */
-		for (i = 0; i < MAX_VOC_SESSIONS; i++)
-			c->voice[i].cvs_handle = 0;
-
-		return 0;
-	}
-
 	if (data->opcode == APR_BASIC_RSP_RESULT) {
 		if (data->payload_size) {
 			ptr = data->payload;
 
 			pr_info("%x %x\n", ptr[0], ptr[1]);
+			if (ptr[1] != 0) {
+				pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+					__func__, ptr[0], ptr[1]);
+			}
 			/*response from  CVS */
 			switch (ptr[0]) {
 			case VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION:
@@ -4010,6 +4101,24 @@
 				wake_up(&v->cvs_wait);
 				break;
 			case VOICE_CMD_SET_PARAM:
+				pr_debug("%s: VOICE_CMD_SET_PARAM\n", __func__);
+				rtac_make_voice_callback(RTAC_CVS, ptr,
+							data->payload_size);
+				break;
+			case VOICE_CMD_GET_PARAM:
+				pr_debug("%s: VOICE_CMD_GET_PARAM\n",
+					__func__);
+				/* Should only come here if there is an APR */
+				/* error or malformed APR packet. Otherwise */
+				/* response will be returned as */
+				/* VOICE_EVT_GET_PARAM_ACK */
+				if (ptr[1] != 0) {
+					pr_err("%s: CVP get param error = %d, resuming\n",
+						__func__, ptr[1]);
+					rtac_make_voice_callback(RTAC_CVP,
+						data->payload,
+						data->payload_size);
+				}
 				break;
 			default:
 				pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
@@ -4125,7 +4234,16 @@
 		pr_debug("Recd VSS_ISTREAM_EVT_NOT_READY\n");
 	} else if (data->opcode == VSS_ISTREAM_EVT_READY) {
 		pr_debug("Recd VSS_ISTREAM_EVT_READY\n");
-	} else
+	} else if (data->opcode ==  VOICE_EVT_GET_PARAM_ACK) {
+		pr_debug("%s: VOICE_EVT_GET_PARAM_ACK\n", __func__);
+		ptr = data->payload;
+		if (ptr[0] != 0) {
+			pr_err("%s: VOICE_EVT_GET_PARAM_ACK returned error = 0x%x\n",
+				__func__, ptr[0]);
+		}
+		rtac_make_voice_callback(RTAC_CVS, data->payload,
+					data->payload_size);
+	}  else
 		pr_err("Unknown opcode 0x%x\n", data->opcode);
 
 fail:
@@ -4138,6 +4256,7 @@
 	struct common_data *c = NULL;
 	struct voice_data *v = NULL;
 	int i = 0;
+	uint16_t session_id = 0;
 
 	if ((data == NULL) || (priv == NULL)) {
 		pr_err("%s: data or priv is NULL\n", __func__);
@@ -4146,6 +4265,33 @@
 
 	c = priv;
 
+	if (data->opcode == RESET_EVENTS) {
+		if (data->reset_proc == APR_DEST_MODEM) {
+			pr_debug("%s: Received Modem reset event\n", __func__);
+
+			session_id = voc_get_session_id(VOICE_SESSION_NAME);
+			v = voice_get_session(session_id);
+			if (v != NULL)
+				v->voc_state = VOC_ERROR;
+
+			session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+			v = voice_get_session(session_id);
+			if (v != NULL)
+				v->voc_state = VOC_ERROR;
+		} else {
+			pr_debug("%s: Reset event received in Voice service\n",
+				 __func__);
+
+			apr_reset(c->apr_q6_cvp);
+			c->apr_q6_cvp = NULL;
+
+			/* Sub-system restart is applicable to all sessions. */
+			for (i = 0; i < MAX_VOC_SESSIONS; i++)
+				c->voice[i].cvp_handle = 0;
+		}
+		return 0;
+	}
+
 	v = voice_get_session(data->dest_port);
 	if (v == NULL) {
 		pr_err("%s: v is NULL\n", __func__);
@@ -4153,28 +4299,15 @@
 		return -EINVAL;
 	}
 
-	pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
-		data->payload_size, data->opcode);
-
-	if (data->opcode == RESET_EVENTS) {
-		pr_debug("%s: Reset event received in Voice service\n",
-			 __func__);
-
-		apr_reset(c->apr_q6_cvp);
-		c->apr_q6_cvp = NULL;
-
-		/* Sub-system restart is applicable to all sessions. */
-		for (i = 0; i < MAX_VOC_SESSIONS; i++)
-			c->voice[i].cvp_handle = 0;
-
-		return 0;
-	}
-
 	if (data->opcode == APR_BASIC_RSP_RESULT) {
 		if (data->payload_size) {
 			ptr = data->payload;
 
 			pr_info("%x %x\n", ptr[0], ptr[1]);
+			if (ptr[1] != 0) {
+				pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+					__func__, ptr[0], ptr[1]);
+			}
 			switch (ptr[0]) {
 			case VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION_V2:
 			/*response from  CVP */
@@ -4206,6 +4339,24 @@
 				wake_up(&v->cvp_wait);
 				break;
 			case VOICE_CMD_SET_PARAM:
+				pr_debug("%s: VOICE_CMD_SET_PARAM\n", __func__);
+				rtac_make_voice_callback(RTAC_CVP, ptr,
+							data->payload_size);
+				break;
+			case VOICE_CMD_GET_PARAM:
+				pr_debug("%s: VOICE_CMD_GET_PARAM\n",
+					__func__);
+				/* Should only come here if there is an APR */
+				/* error or malformed APR packet. Otherwise */
+				/* response will be returned as */
+				/* VOICE_EVT_GET_PARAM_ACK */
+				if (ptr[1] != 0) {
+					pr_err("%s: CVP get param error = %d, resuming\n",
+						__func__, ptr[1]);
+					rtac_make_voice_callback(RTAC_CVP,
+						data->payload,
+						data->payload_size);
+				}
 				break;
 			default:
 				pr_debug("%s: not match cmd = 0x%x\n",
@@ -4213,6 +4364,15 @@
 				break;
 			}
 		}
+	} else if (data->opcode ==  VOICE_EVT_GET_PARAM_ACK) {
+		pr_debug("%s: VOICE_EVT_GET_PARAM_ACK\n", __func__);
+		ptr = data->payload;
+		if (ptr[0] != 0) {
+			pr_err("%s: VOICE_EVT_GET_PARAM_ACK returned error = 0x%x\n",
+				__func__, ptr[0]);
+		}
+		rtac_make_voice_callback(RTAC_CVP, data->payload,
+			data->payload_size);
 	}
 	return 0;
 }
diff --git a/sound/soc/msm/qdsp6v2/q6voice.h b/sound/soc/msm/qdsp6v2/q6voice.h
index 9f82694..aef463f 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.h
+++ b/sound/soc/msm/qdsp6v2/q6voice.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -67,6 +67,7 @@
 	VOC_RUN,
 	VOC_CHANGE,
 	VOC_RELEASE,
+	VOC_ERROR,
 };
 
 struct mem_buffer {
@@ -884,10 +885,6 @@
 #define VSS_MEDIA_ID_4GV_WB_MODEM	0x00010FC4
 /*CDMA EVRC-WB vocoder modem format */
 
-#define VOICE_CMD_SET_PARAM				0x00011006
-#define VOICE_CMD_GET_PARAM				0x00011007
-#define VOICE_EVT_GET_PARAM_ACK				0x00011008
-
 #define VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION_V2	0x000112BF
 
 struct vss_ivocproc_cmd_create_full_control_session_v2_t {
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 4c6a5a4..f02e5c5 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -297,9 +297,37 @@
 {
 	u64 config = evsel->attr.config;
 	int type = evsel->attr.type;
+	char *buf;
+	size_t buf_sz;
 
-	if (evsel->name)
+	if (evsel->name) {
+		/* Make new space for the modifier bits. */
+		buf_sz = strlen(evsel->name) + 3;
+		buf = malloc(buf_sz);
+		if (!buf)
+			/*
+			 * Always return what was already in 'name'.
+			 */
+			return evsel->name;
+
+		strlcpy(buf, evsel->name, buf_sz);
+
+		free(evsel->name);
+
+		evsel->name = buf;
+
+		/* User mode profiling. */
+		if (!evsel->attr.exclude_user && evsel->attr.exclude_kernel)
+			strlcpy(&evsel->name[strlen(evsel->name)], ":u",
+					buf_sz);
+		/* Kernel mode profiling. */
+		else if (!evsel->attr.exclude_kernel &&
+				evsel->attr.exclude_user)
+			strlcpy(&evsel->name[strlen(evsel->name)], ":k",
+					buf_sz);
+
 		return evsel->name;
+	}
 
 	return __event_name(type, config, NULL);
 }