Merge "msm: camera: Add support for autofocus driver for ov8825" into msm-3.4
diff --git a/Documentation/block/test-iosched.txt b/Documentation/block/test-iosched.txt
new file mode 100644
index 0000000..75d8134
--- /dev/null
+++ b/Documentation/block/test-iosched.txt
@@ -0,0 +1,39 @@
+Test IO scheduler
+==================
+
+The test scheduler allows testing a block device by dispatching
+specific requests according to the test case and declare PASS/FAIL
+according to the requests completion error code.
+
+The test IO scheduler implements the no-op scheduler operations, and uses
+them in order to dispatch the non-test requests when no test is running.
+This will allow to keep a normal FS operation in parallel to the test
+capability.
+The test IO scheduler keeps two different queues, one for real-world requests
+(inserted by the FS) and the other for the test requests.
+The test IO scheduler chooses the queue for dispatch requests according to the
+test state (IDLE/RUNNING).
+
+the test IO scheduler is compiled by default as a dynamic module and enabled
+only if CONFIG_DEBUG_FS is defined.
+
+Each block device test utility that would like to use the test-iosched test
+services, should register as a blk_dev_test_type and supply an init and exit
+callbacks. Those callback are called upon selection (or removal) of the
+test-iosched as the active scheduler. From that point the block device test
+can start a test and supply its own callbacks for preparing, running, result
+checking and cleanup of the test.
+
+Each test is exposed via debugfs and can be triggered by writing to
+the debugfs file. In order to add a new test one should expose a new debugfs
+file for the new test.
+
+Selecting IO schedulers
+-----------------------
+Refer to Documentation/block/switching-sched.txt for information on
+selecting an io scheduler on a per-device basis.
+
+
+May 10 2012, maya Erez <merez@codeaurora.org>
+
+
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt b/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt
new file mode 100644
index 0000000..9f0c922
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt
@@ -0,0 +1,33 @@
+* Qualcomm MSM Watchdog
+
+Watchdog timer is configured with a bark and a bite time.
+if the watchdog is not "pet" at regular intervals, the system
+is assumed to have become non responsive and needs to be reset.
+A warning in the form of a bark timeout leads to a bark interrupt
+and a kernel panic. if the watchdog timer is still not reset,
+a bite timeout occurs, which is an interrupt in the secure mode,
+which leads to a reset of the SOC via the secure watchdog. The
+driver needs the petting time, and the bark timeout to be programmed
+into the watchdog, as well as the bark and bite irqs.
+
+The device tree parameters for the watchdog are:
+
+Required parameters:
+
+- compatible : "qcom,msm-watchdog"
+- reg : offset and length of the register set for the watchdog block.
+- interrupts : should contain bark and bite irq numbers
+- qcom,pet-time : Non zero time interval at which watchdog should be pet in ms.
+- qcom,bark-time : Non zero timeout value for a watchdog bark in ms.
+- qcom,ipi-ping : send keep alive ping to other cpus if set to 1 else set to 0.
+
+Example:
+
+	qcom,wdt@f9017000 {
+		compatible = "qcom,msm-watchdog";
+		reg = <0xf9017000 0x1000>;
+		interrupts = <0 3 0 0 4 0>;
+		qcom,bark-time = <11000>;
+		qcom,pet-time = <10000>;
+		qcom,ipi-ping = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt b/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
index 786635f..82935ed 100644
--- a/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
+++ b/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
@@ -54,6 +54,14 @@
 - parent-supply:               phandle to the parent supply/regulator node
 - qcom,system-load:            Load in uA present on regulator that is not
 				captured by any consumer request
+- qcom,use-voltage-corner:     Flag that signifies if regulator_set_voltage
+				calls should modify the corner parameter instead
+				of the voltage parameter.  When used, voltages
+				specified inside of the regulator framework
+				represent corners that have been incremented by
+				1.  This value shift is necessary to work around
+				limitations in the regulator framework which
+				treat 0 uV as an error.
 The following properties specify initial values for parameters to be sent to the
 RPM in regulator requests.
 - qcom,init-enable:            0 = regulator disabled
@@ -120,6 +128,24 @@
 					2 = GPS
 					4 = WLAN
 					8 = WAN
+- qcom,init-voltage-corner:    Performance corner to use in order to determine
+				voltage set point.  This value corresponds to
+				the actual value that will be sent and is not
+				incremented by 1 like the values used inside of
+				the regulator framework.  The meaning of corner
+				values is set by the RPM.  It is possible that
+				different regulators on a given platform or
+				similar regulators on different platforms will
+				utilize different corner values.  These are
+				corner values supported on MSM8974 for PMIC
+				PM8841 SMPS 2 (VDD_Dig); nominal voltages for
+				these corners are also shown:
+					0 = Retention    (0.5000 V)
+					1 = SVS Krait    (0.7250 V)
+					2 = SVS SOC      (0.8125 V)
+					3 = Normal       (0.9000 V)
+					4 = Turbo        (0.9875 V)
+					5 = Super Turbo  (1.0500 V)
 
 All properties specified within the core regulator framework can also be used in
 second level nodes.  These bindings can be found in:
@@ -150,4 +176,13 @@
 		regulator-max-microvolt = <1150000>;
 		compatible = "qcom,rpm-regulator-smd";
 	};
+	pm8841_s1_corner: regulator-s1-corner {
+		regulator-name = "8841_s1_corner";
+		qcom,set = <3>;
+		regulator-min-microvolt = <1>;
+		regulator-max-microvolt = <6>;
+		qcom,init-voltage-corner = <3>;
+		qcom,use-voltage-corner;
+		compatible = "qcom,rpm-regulator-smd";
+	};
 };
diff --git a/Documentation/devicetree/bindings/arm/msm/tz-log.txt b/Documentation/devicetree/bindings/arm/msm/tz-log.txt
new file mode 100644
index 0000000..6928611
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/tz-log.txt
@@ -0,0 +1,16 @@
+* TZLOG (Trust Zone Log)
+
+The tz_log driver is a platform device driver that exposes a debugfs
+interface for accessing and displaying diagnostic information
+related to secure code (Trustzone/QSEE).
+
+Required properties:
+- compatible : Should be "qcom,tz-log"
+- reg        : Offset and size of the register set for the device
+
+Example:
+
+	qcom,tz-log@fe805720 {
+		compatible = "qcom,tz-log";
+                reg = <0xfe805720 0x1000>;
+	};
diff --git a/Documentation/devicetree/bindings/gpio/qpnp-gpio.txt b/Documentation/devicetree/bindings/gpio/qpnp-gpio.txt
deleted file mode 100644
index 7cab09b..0000000
--- a/Documentation/devicetree/bindings/gpio/qpnp-gpio.txt
+++ /dev/null
@@ -1,143 +0,0 @@
-* msm-qpnp-gpio
-
-msm-qpnp-gpio is a GPIO chip driver for the MSM SPMI implementation.
-It creates a spmi_device for every spmi-dev-container block of device_nodes.
-These device_nodes contained within specify the PMIC GPIO number associated
-with each GPIO chip. The driver will map these to Linux GPIO numbers.
-
-[PMIC GPIO Device Declarations]
-
--Root Node-
-
-Required properties :
- - spmi-dev-container : Used to specify the following child nodes as part of the
-   same SPMI device.
- - gpio-controller : Specify as gpio-contoller. All child nodes will belong to this
-   gpio_chip.
- - #gpio-cells: We encode a PMIC GPIO number and a 32-bit flag field to
-   specify the gpio configuration. This must be set to '2'.
- - #address-cells: Specify one address field. This must be set to '1'.
- - #size-cells: Specify one size-cell. This must be set to '1'.
- - compatible = "qcom,qpnp-gpio" : Specify driver matching for this driver.
-
--Child Nodes-
-
-Required properties :
- - reg : Specify the spmi offset and size for this gpio device.
- - qcom,gpio-num : Specify the PMIC GPIO number for this gpio device.
-
-Optional configuration properties :
- -  qcom,direction:	indicates whether the gpio should be input, output, or
-			both.
-			QPNP_GPIO_DIR_IN   = 0,
-			QPNP_GPIO_DIR_OUT  = 1,
-			QPNP_GPIO_DIR_BOTH = 2
-
- - qcom,output-type:	indicates gpio should be configured as CMOS or open
-			drain.
-			QPNP_GPIO_OUT_BUF_CMOS = 0
-			QPNP_GPIO_OUT_BUF_OPEN_DRAIN_NMOS = 1,
-			QPNP_GPIO_OUT_BUF_OPEN_DRAIN_PMOS = 2,
-
- - qcom,invert:		Invert the signal of the gpio line -
-			QPNP_GPIO_INVERT_DISABLE = 0
-			QPNP_GPIO_INVERT_ENABLE = 1
-
- - qcom,pull:		Indicates whether a pull up or pull down should be
-			applied. If a pullup is required the current strength
-			needs to be specified. Current values of 30uA, 1.5uA,
-			31.5uA, 1.5uA with 30uA boost are supported.
-			QPNP_GPIO_PULL_UP_30	 = 0,
-			QPNP_GPIO_PULL_UP_1P5	 = 1,
-			QPNP_GPIO_PULL_UP_31P5	 = 2,
-			QPNP_GPIO_PULL_UP_1P5_30 = 3,
-			QPNP_GPIO_PULL_DN	 = 4,
-			QPNP_GPIO_PULL_NO	 = 5
-
-  - qcom,vin-sel:	specifies the voltage level when the output is set to 1.
-			For an input gpio specifies the voltage level at which
-			the input is interpreted as a logical 1.
-			QPNP_GPIO_VIN0 = 0,
-			QPNP_GPIO_VIN1 = 1,
-			QPNP_GPIO_VIN2 = 2,
-			QPNP_GPIO_VIN3 = 3,
-			QPNP_GPIO_VIN4 = 4,
-			QPNP_GPIO_VIN5 = 5,
-			QPNP_GPIO_VIN6 = 6,
-			QPNP_GPIO_VIN7 = 7
-
-  - qcom,out-strength:	the amount of current supplied for an output gpio.
-			QPNP_GPIO_OUT_STRENGTH_LOW  = 1
-			QPNP_GPIO_OUT_STRENGTH_MED  = 2,
-			QPNP_GPIO_OUT_STRENGTH_HIGH = 3,
-
-  - qcom,source-sel:	choose alternate function for the gpio. Certain gpios
-			can be paired (shorted) with each other. Some gpio pin
-			can act as alternate functions.
-			QPNP_GPIO_FUNC_NORMAL   = 0,
-			QPNP_GPIO_FUNC_PAIRED   = 1
-			QPNP_GPIO_FUNC_1	= 2,
-			QPNP_GPIO_FUNC_2	= 3,
-			QPNP_GPIO_DTEST1	= 4,
-			QPNP_GPIO_DTEST2	= 5,
-			QPNP_GPIO_DTEST3	= 6,
-			QPNP_GPIO_DTEST4	= 7
-
- - qcom,master-en:	1 = Enable features within the
-			GPIO block based on configurations.
-			0 = Completely disable the GPIO
-			block and let the pin float with high impedance
-			regardless of other settings.
-
-*Note: If any of the configuration properties are not specified, then the
-       qpnp-gpio driver will not modify that respective configuration in
-       hardware.
-
-[PMIC GPIO clients]
-
-Required properties :
- - gpios : Contains 3 fields of the form <&gpio_controller pmic_gpio_num flags>
-
-[Example]
-
-qpnp: qcom,spmi@fc4c0000 {
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupt-controller;
-		#interrupt-cells = <3>;
-
-		qcom,pm8941@0 {
-			spmi-slave-container;
-			reg = <0x0>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-
-			pm8941_gpios: gpios {
-				spmi-dev-container;
-				compatible = "qcom,qpnp-gpio";
-				gpio-controller;
-				#gpio-cells = <2>;
-				#address-cells = <1>;
-				#size-cells = <1>;
-
-				gpio@c000 {
-					reg = <0xc000 0x100>;
-					qcom,gpio-num = <62>;
-				};
-
-				gpio@c100 {
-					reg = <0xc100 0x100>;
-					qcom,gpio-num = <20>;
-					qcom,source_sel = <2>;
-					qcom,pull = <5>;
-				};
-			};
-
-			qcom,testgpio@1000 {
-				compatible = "qcom,qpnp-testgpio";
-				reg = <0x1000 0x1000>;
-				gpios = <&pm8941_gpios 62 0x0 &pm8941_gpios 20 0x1>;
-			};
-		};
-	};
-};
diff --git a/Documentation/devicetree/bindings/gpio/qpnp-pin.txt b/Documentation/devicetree/bindings/gpio/qpnp-pin.txt
new file mode 100644
index 0000000..c58e073
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/qpnp-pin.txt
@@ -0,0 +1,198 @@
+* msm-qpnp-pin
+
+msm-qpnp-pin is a GPIO chip driver for the MSM SPMI implementation.
+It creates a spmi_device for every spmi-dev-container block of device_nodes.
+These device_nodes contained within specify the PMIC pin number associated
+with each gpio chip. The driver will map these to Linux GPIO numbers.
+
+[PMIC GPIO Device Declarations]
+
+-Root Node-
+
+Required properties :
+ - spmi-dev-container : Used to specify the following child nodes as part of the
+   same SPMI device.
+ - gpio-controller : Specify as gpio-contoller. All child nodes will belong to
+   this gpio_chip.
+ - #gpio-cells: We encode a PMIC pin number and a 32-bit flag field to
+   specify the gpio configuration. This must be set to '2'.
+ - #address-cells: Specify one address field. This must be set to '1'.
+ - #size-cells: Specify one size-cell. This must be set to '1'.
+ - compatible = "qcom,qpnp-pin" : Specify driver matching for this driver.
+ - label: String giving the name for the gpio_chip device. This name
+   should be unique on the system and portray the specifics of the device.
+
+-Child Nodes-
+
+Required properties :
+ - reg : Specify the spmi offset and size for this pin device.
+ - qcom,pin-num : Specify the PMIC pin number for this device.
+
+Optional configuration properties :
+ -  qcom,mode:		indicates whether the pin should be input, output, or
+			both for gpios. mpp pins also support bidirectional,
+			analog in, analog out and current sink.
+			QPNP_PIN_MODE_DIG_IN	 = 0, (GPIO/MPP)
+			QPNP_PIN_MODE_DIG_OUT	 = 1, (GPIO/MPP)
+			QPNP_PIN_MODE_DIG_IN_OUT = 2, (GPIO/MPP)
+			QPNP_PIN_MODE_BIDIR	 = 3, (MPP)
+			QPNP_PIN_MODE_AIN	 = 4, (MPP)
+			QPNP_PIN_MODE_AOUT	 = 5, (MPP)
+			QPNP_PIN_MODE_SINK	 = 6  (MPP)
+
+ - qcom,output-type:	indicates gpio should be configured as CMOS or open
+			drain.
+			QPNP_PIN_OUT_BUF_CMOS		 = 0, (GPIO)
+			QPNP_PIN_OUT_BUF_OPEN_DRAIN_NMOS = 1, (GPIO)
+			QPNP_PIN_OUT_BUF_OPEN_DRAIN_PMOS = 2  (GPIO)
+
+ - qcom,invert:		Invert the signal of the gpio line -
+			QPNP_PIN_INVERT_DISABLE = 0 (GPIO/MPP)
+			QPNP_PIN_INVERT_ENABLE	= 1 (GPIO/MPP)
+
+ - qcom,pull:		This parameter should be programmed to different values
+			depending on whether it's GPIO or MPP.
+			For GPIO, it indicates whether a pull up or pull down
+			should be applied. If a pullup is required the
+			current strength needs to be specified.
+			Current values of 30uA, 1.5uA, 31.5uA, 1.5uA with 30uA
+			boost are supported. This value should be one of
+			the QPNP_PIN_GPIO_PULL_*. Note that the hardware ignores
+			this configuration if the GPIO is not set to input or
+			output open-drain mode.
+			QPNP_PIN_PULL_UP_30	 = 0, (GPIO)
+			QPNP_PIN_PULL_UP_1P5	 = 1, (GPIO)
+			QPNP_PIN_PULL_UP_31P5	 = 2, (GPIO)
+			QPNP_PIN_PULL_UP_1P5_30  = 3, (GPIO)
+			QPNP_PIN_PULL_DN	 = 4, (GPIO)
+			QPNP_PIN_PULL_NO	 = 5  (GPIO)
+
+			For MPP, it indicates whether a pullup should be
+			applied for bidirectitional mode only. The hardware
+			ignores the configuration when operating in other modes.
+			This value should be one of the QPNP_PIN_MPP_PULL_*.
+
+			QPNP_PIN_MPP_PULL_UP_0P6KOHM = 0, (MPP)
+			QPNP_PIN_MPP_PULL_UP_OPEN    = 1  (MPP)
+			QPNP_PIN_MPP_PULL_UP_10KOHM  = 2, (MPP)
+			QPNP_PIN_MPP_PULL_UP_30KOHM  = 3, (MPP)
+
+  - qcom,vin-sel:	specifies the voltage level when the output is set to 1.
+			For an input gpio specifies the voltage level at which
+			the input is interpreted as a logical 1.
+			QPNP_PIN_VIN0 = 0, (GPIO/MPP)
+			QPNP_PIN_VIN1 = 1, (GPIO/MPP)
+			QPNP_PIN_VIN2 = 2, (GPIO/MPP)
+			QPNP_PIN_VIN3 = 3, (GPIO/MPP)
+			QPNP_PIN_VIN4 = 4, (GPIO/MPP)
+			QPNP_PIN_VIN5 = 5, (GPIO/MPP)
+			QPNP_PIN_VIN6 = 6, (GPIO/MPP)
+			QPNP_PIN_VIN7 = 7  (GPIO/MPP)
+
+  - qcom,out-strength:	the amount of current supplied for an output gpio.
+			QPNP_PIN_OUT_STRENGTH_LOW  = 1  (GPIO)
+			QPNP_PIN_OUT_STRENGTH_MED  = 2, (GPIO)
+			QPNP_PIN_OUT_STRENGTH_HIGH = 3, (GPIO)
+
+  - qcom,select:	select a function for the pin. Certain pins
+			can be paired (shorted) with each other. Some gpio pins
+			can act as alternate functions.
+			In the context of gpio, this acts as a source select.
+			For mpps, this is an enable select.
+			QPNP_PIN_SEL_FUNC_CONSTANT = 0, (GPIO/MPP)
+			QPNP_PIN_SEL_FUNC_PAIRED   = 1, (GPIO/MPP)
+			QPNP_PIN_SEL_FUNC_1	   = 2, (GPIO/MPP)
+			QPNP_PIN_SEL_FUNC_2	   = 3, (GPIO/MPP)
+			QPNP_PIN_SEL_DTEST1	   = 4, (GPIO/MPP)
+			QPNP_PIN_SEL_DTEST2	   = 5, (GPIO/MPP)
+			QPNP_PIN_SEL_DTEST3	   = 6, (GPIO/MPP)
+			QPNP_PIN_SEL_DTEST4	   = 7  (GPIO/MPP)
+
+ - qcom,master-en:	1 = Enable features within the
+			pin block based on configurations. (GPIO/MPP)
+			0 = Completely disable the block and
+			let the pin float with high impedance
+			regardless of other settings. (GPIO/MPP)
+ - qcom,aout-ref:	set the analog output reference.
+
+			QPNP_PIN_AOUT_1V25    = 0, (MPP)
+			QPNP_PIN_AOUT_0V625   = 1, (MPP)
+			QPNP_PIN_AOUT_0V3125  = 2, (MPP)
+			QPNP_PIN_AOUT_MPP     = 3, (MPP)
+			QPNP_PIN_AOUT_ABUS1   = 4, (MPP)
+			QPNP_PIN_AOUT_ABUS2   = 5, (MPP)
+			QPNP_PIN_AOUT_ABUS3   = 6, (MPP)
+			QPNP_PIN_AOUT_ABUS4   = 7  (MPP)
+
+ - qcom,ain-route:	Set the destination for analog input.
+			QPNP_PIN_AIN_AMUX_CH5   = 0, (MPP)
+			QPNP_PIN_AIN_AMUX_CH6   = 1, (MPP)
+			QPNP_PIN_AIN_AMUX_CH7   = 2, (MPP)
+			QPNP_PIN_AIN_AMUX_CH8   = 3, (MPP)
+			QPNP_PIN_AIN_AMUX_ABUS1 = 4, (MPP)
+			QPNP_PIN_AIN_AMUX_ABUS2 = 5, (MPP)
+			QPNP_PIN_AIN_AMUX_ABUS3 = 6, (MPP)
+			QPNP_PIN_AIN_AMUX_ABUS4 = 7  (MPP)
+
+ - qcom,cs-out:		Set the the amount of output to sync in mA.
+			QPNP_PIN_CS_OUT_5MA  = 0, (MPP)
+			QPNP_PIN_CS_OUT_10MA = 1, (MPP)
+			QPNP_PIN_CS_OUT_15MA = 2, (MPP)
+			QPNP_PIN_CS_OUT_20MA = 3, (MPP)
+			QPNP_PIN_CS_OUT_25MA = 4, (MPP)
+			QPNP_PIN_CS_OUT_30MA = 5, (MPP)
+			QPNP_PIN_CS_OUT_35MA = 6, (MPP)
+			QPNP_PIN_CS_OUT_40MA = 7  (MPP)
+
+*Note: If any of the configuration properties are not specified, then the
+       qpnp-pin driver will not modify that respective configuration in
+       hardware.
+
+[PMIC GPIO clients]
+
+Required properties :
+ - gpios : Contains 3 fields of the form <&gpio_controller pmic_pin_num flags>
+
+[Example]
+
+qpnp: qcom,spmi@fc4c0000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		interrupt-controller;
+		#interrupt-cells = <3>;
+
+		qcom,pm8941@0 {
+			spmi-slave-container;
+			reg = <0x0>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			pm8941_gpios: gpios {
+				spmi-dev-container;
+				compatible = "qcom,qpnp-pin";
+				gpio-controller;
+				#gpio-cells = <2>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				gpio@c000 {
+					reg = <0xc000 0x100>;
+					qcom,pin-num = <62>;
+				};
+
+				gpio@c100 {
+					reg = <0xc100 0x100>;
+					qcom,pin-num = <20>;
+					qcom,source_sel = <2>;
+					qcom,pull = <5>;
+				};
+			};
+
+			qcom,testgpio@1000 {
+				compatible = "qcom,qpnp-testgpio";
+				reg = <0x1000 0x1000>;
+				gpios = <&pm8941_gpios 62 0x0 &pm8941_gpios 20 0x1>;
+			};
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/mmc/msm_sdcc.txt b/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
index 0c1762d..35ac0ec 100644
--- a/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
+++ b/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
@@ -10,6 +10,8 @@
   - qcom,sdcc-clk-rates : specifies supported SDCC clock frequencies, Units - Hz.
   - qcom,sdcc-sup-voltages: specifies supported voltage ranges for card. Should always be
 			specified in pairs (min, max), Units - mV.
+  - <supply-name>-supply: phandle to the regulator device tree node
+  "supply-name" examples are "vdd", "vdd-io".
 
 Optional Properties:
 	- cell-index - defines slot ID.
@@ -21,7 +23,17 @@
 	- qcom,sdcc-nonremovable - specifies whether the card in slot is
 				hot pluggable or hard wired.
 	- qcom,sdcc-disable_cmd23 - disable sending CMD23 to card when controller can't support it.
-	- qcom,sdcc-hs200 - enable eMMC4.5 HS200 bus speed mode
+	- qcom,sdcc-bus-speed-mode - specifies supported bus speed modes by host.
+	- qcom,sdcc-current-limit - specifies max. current the host can drive.
+	- qcom,sdcc-xpc - specifies if the host can supply more than 150mA for SDXC cards.
+
+In the following, <supply> can be vdd (flash core voltage) or vdd-io (I/O voltage).
+	- qcom,sdcc-<supply>-always_on - specifies whether supply should be kept "on" always.
+	- qcom,sdcc-<supply>-lpm_sup - specifies whether supply can be kept in low power mode (lpm).
+	- qcom,sdcc-<supply>-voltage_level - specifies voltage levels for supply. Should be
+	specified in pairs (min, max), units uV.
+	- qcom,sdcc-<supply>-current_level - specifies load levels for supply in lpm or
+	high power mode (hpm). Should be specified in pairs (lpm, hpm), units uA.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/pil/pil-venus.txt b/Documentation/devicetree/bindings/pil/pil-venus.txt
new file mode 100644
index 0000000..93cba32
--- /dev/null
+++ b/Documentation/devicetree/bindings/pil/pil-venus.txt
@@ -0,0 +1,28 @@
+* Qualcomm Venus Video Subsystem Peripheral Image Loader
+
+pil-venus is a peripheral image loading (PIL) driver. It is used for loading
+venus firmware images for video codec into memory and preparing the subsystem's
+processor to execute code. It is also used for shutting down the processor when
+it's not needed.
+
+Required properties:
+- compatible: "pil-venus"
+- reg: offset and length of the register set for the device. The first pair
+       corresponds to VENUS_WRAPPER, the second pair corresponds to VENUS_VBIF.
+- vdd-supply: regulator to supply venus.
+- qcom,firmware-name: Base name of the firmware image. Ex. "venus"
+- qcom,firmware-min-paddr: The lowest addr boundary for firmware image in DDR
+- qcom,firmware-max-paddr: The highest addr boundary for firmware image in DDR
+
+Example:
+        qcom,venus@fdce0000 {
+                compatible = "qcom,pil-venus";
+                reg = <0xfdce0000 0x4000>,
+                      <0xfdc80208 0x8>;
+                vdd-supply = <&gdsc_venus>;
+
+                qcom,firmware-name = "venus";
+                qcom,firmware-min-paddr = <0xF500000>;
+                qcom,firmware-max-paddr = <0xFA00000>;
+
+        };
diff --git a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
index cd7bdce..30d34f6 100644
--- a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
@@ -2,7 +2,7 @@
 
 The GDSC driver, implemented under the regulator framework, is responsible for
 safely collapsing and restoring power to peripheral cores on chipsets like
-msm-copper for power savings.
+msm8974 for power savings.
 
 Required properties:
  - compatible:      Must be "qcom,gdsc"
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
new file mode 100644
index 0000000..b6086e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -0,0 +1,135 @@
+Qualcomm audio devices for ALSA sound soc
+
+* msm-pcm
+
+Required properties:
+
+ - compatible : "qcom,msm-pcm-dsp"
+
+* msm-pcm-routing
+
+Required properties:
+
+ - compatible : "qcom,msm-pcm-routing"
+
+* msm-pcm-lpa
+
+Required properties:
+
+ - compatible : "qcom,msm-pcm-lpa"
+
+* msm-voip-dsp
+
+Required properties:
+
+ - compatible : "qcom,msm-voip-dsp"
+
+* msm-stub-codec
+
+Required properties:
+
+ - compatible : "qcom,msm-stub-codec"
+
+* msm-dai-fe
+
+Required properties:
+
+ - compatible : "qcom,msm-dai-fe"
+
+* msm-auxpcm
+
+[First Level Nodes]
+
+Required properties:
+
+ - compatible :                           "qcom,msm-auxpcm-resource"
+
+ - qcom,msm-cpudai-auxpcm-clk:            clock for auxpcm
+
+ - qcom,msm-cpudai-auxpcm-mode:           mode information
+                                          0 - for PCM
+
+ - qcom,msm-cpudai-auxpcm-sync:           sync information
+
+ - qcom,msm-cpudai-auxpcm-frame:          No.of bytes per frame
+                                          5 - 256BPF
+
+ - qcom,msm-cpudai-auxpcm-quant:          Type of quantization
+                                          2 - Linear quantization
+
+ - qcom,msm-cpudai-auxpcm-slot:           Slot number for multichannel scenario
+                                          Value is 1
+
+ - qcom,msm-cpudai-auxpcm-data:           Data field - 0
+
+ - qcom,msm-cpudai-auxpcm-pcm-clk-rate:   Clock rate for pcm - 2048000
+
+[Second Level Nodes]
+
+Required Properties:
+
+ - qcom,msm-auxpcm-dev-id:                This property specifies the device
+                                          port id.
+                                          For Rx device, the port id is 4106
+                                          and for Tx device, the port id is 4107
+
+ - compatible:                            "qcom,msm-auxpcm-dev"
+
+* msm-pcm-hostless
+
+Required properties:
+
+ - compatible : "qcom,msm-pcm-hostless"
+
+Example:
+
+        qcom,msm-pcm {
+                compatible = "qcom,msm-pcm-dsp";
+        };
+
+        qcom,msm-pcm-routing {
+                compatible = "qcom,msm-pcm-routing";
+        };
+
+        qcom,msm-pcm-lpa {
+                compatible = "qcom,msm-pcm-lpa";
+        };
+
+        qcom,msm-voip-dsp {
+                compatible = "qcom,msm-voip-dsp";
+        };
+
+        qcom,msm-stub-codec {
+                compatible = "qcom,msm-stub-codec";
+        };
+
+        qcom,msm-dai-fe {
+                compatible = "qcom,msm-dai-fe";
+        };
+
+        qcom,msm-auxpcm {
+                compatible = "qcom,msm-auxpcm-resource";
+                qcom,msm-cpudai-auxpcm-clk = "pcm_clk";
+                qcom,msm-cpudai-auxpcm-mode = <0>;
+                qcom,msm-cpudai-auxpcm-sync = <1>;
+                qcom,msm-cpudai-auxpcm-frame = <5>;
+                qcom,msm-cpudai-auxpcm-quant = <2>;
+                qcom,msm-cpudai-auxpcm-slot = <1>;
+                qcom,msm-cpudai-auxpcm-data = <0>;
+                qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>;
+
+                qcom,msm-auxpcm-rx {
+			qcom,msm-auxpcm-dev-id = <4106>;
+                        compatible = "qcom,msm-auxpcm-dev";
+                };
+
+                qcom,msm-auxpcm-tx {
+			qcom,msm-auxpcm-dev-id = <4107>;
+                        compatible = "qcom,msm-auxpcm-dev";
+                };
+        };
+
+        qcom,msm-pcm-hostless {
+                compatible = "qcom,msm-pcm-hostless";
+        };
+
diff --git a/Documentation/devicetree/bindings/spmi/msm-spmi.txt b/Documentation/devicetree/bindings/spmi/msm-spmi.txt
index d50037f..5e43ea6 100644
--- a/Documentation/devicetree/bindings/spmi/msm-spmi.txt
+++ b/Documentation/devicetree/bindings/spmi/msm-spmi.txt
@@ -36,6 +36,10 @@
    number of interrupts.
  - interrupt-parent : the phandle for the interrupt controller that
    services interrupts for this device.
+ - reg-names : a list of strings that map in order to the list of addresses
+   specified above in the 'reg' property.
+ - interrupt-names : a list of strings that map in order to the list of
+   interrupts specified in the 'interrupts' property.
 
 [Second Level Nodes]
 
@@ -60,6 +64,10 @@
  - spmi-dev-container: This specifies that all the device nodes specified for
    this slave_id should have their resources coalesced into only one
    spmi_device.
+ - reg-names : a list of strings that map in order to the list of addresses
+   specified above in the 'reg' property.
+ - interrupt-names : a list of strings that map in order to the list of
+   interrupts specified in the 'interrupts' property.
 
 [Third Level Nodes]
 
@@ -79,7 +87,14 @@
    number of interrupts.
  - interrupt-parent : the phandle for the interrupt controller that
    services interrupts for this device.
-
+ - reg-names : a list of strings that map in order to the list of addresses
+   specified above in the 'reg' property.
+ - interrupt-names : a list of strings that map in order to the list of
+   interrupts specified in the 'interrupts' property.
+ - label: A single name that names the device. This name can be looked up
+   with spmi_get_node_byname(). This is mostly useful in spmi-dev-container
+   configurations where multiple device_nodes are associated with one spmi
+   device.
 Notes :
  - It is considered an error to include spmi-slave-dev at this level.
 
@@ -97,7 +112,7 @@
 			compatible = "qcom,qpnp-testint";
 			reg = <0xf>;
 			interrupts = <0x3 0x15 0x0 0x3 0x15 0x02 0x1 0x47 0x0>;
-
+			interrupt-names = "testint_0", "testint_1", "testint_err";
 		};
 
 		pm8941@0 {
@@ -108,22 +123,23 @@
 
 			pm8941_gpios: gpios {
 				spmi-dev-container;
-				compatible = "qcom,qpnp-gpio";
+				compatible = "qcom,qpnp-pin";
 				gpio-controller;
 				#gpio-cells = <1>;
 				#address-cells = <1>;
 				#size-cells = <1>;
 
 				pm8941_gpio1@0xc000 {
-					compatible = "qcom,qpnp-gpio";
+					compatible = "qcom,qpnp-pin";
 					reg = <0xc000 0x100>;
 					qcom,qpnp_gpio = <1>;
 					interrupt-parent = <&qpnp>;
 					interrupts = <0x3 0x15 0x02 0x1 0x47 0x0>;
+					label = "foo-dev";
 				};
 
 				pm8941_gpio2@0xc100 {
-					compatible = "qcom,qpnp-gpio";
+					compatible = "qcom,qpnp-pin";
 					reg = <0xc100 0x100>;
 					qcom,qpnp_gpio = <2>;
 					interrupt-parent = <&qpnp>;
@@ -133,7 +149,8 @@
 
 			testgpio@0x1000 {
 				compatible = "qcom,qpnp-testgpio";
-				reg = <0x1000 0x1000>;
+				reg = <0x1000 0x1000 0x2000 0x1000>;
+				reg-names = "foo", "bar";
 				qpnp-gpios = <&pm8941_gpios 0x0>;
 			};
 		};
@@ -143,7 +160,7 @@
 			#address-cells = <1>;
 			#size-cells = <1>;
 			spmi-dev-container;
-			compatible = "qcom,qpnp-gpio";
+			compatible = "qcom,qpnp-pin";
 
 			pm8841_gpio1@0xc000 {
 				reg = <0xc000 0x100>;
diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
index 40b3bc3..a2b7dfc 100644
--- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
@@ -3,7 +3,13 @@
 Required properties :
 - compatible : should be "qcom,dwc-usb3-msm"
 - reg : offset and length of the register set in the memory map
-- interrupts: IRQ line
+- interrupts: IRQ lines used by this controller
+- interrupt-names : Required interrupt resource entries are:
+	"irq" : Interrupt for DWC3 core
+	"otg_irq" : Interrupt for DWC3 core's OTG Events
+- <supply-name>-supply: phandle to the regulator device tree node
+  Required "supply-name" examples are "SSUSB_VDDCX", "SSUSB_1p8",
+  "HSUSB_VDDCX", "HSUSB_1p8", "HSUSB_3p3".
 - qcom,dwc-usb3-msm-dbm-eps: Number of endpoints avaliable for
   the DBM (Device Bus Manager). The DBM is HW unit which is part of
   the MSM USB3.0 core (which also includes the Synopsys DesignWare
@@ -12,7 +18,13 @@
 Example MSM USB3.0 controller device node :
 	usb@f9200000 {
 		compatible = "qcom,dwc-usb3-msm";
-		reg = <0xf9200000 0xCCFF>;
-		interrupts = <0 131 0>
+		reg = <0xF9200000 0xFA000>;
+		interrupts = <0 131 0 0 179 0>;
+		interrupt-names = "irq", "otg_irq";
+		SSUSB_VDDCX-supply = <&pm8841_s2>;
+		SSUSB_1p8-supply = <&pm8941_l6>;
+		HSUSB_VDDCX-supply = <&pm8841_s2>;
+		HSUSB_1p8-supply = <&pm8941_l6>;
+		HSUSB_3p3-supply = <&pm8941_l24>;
 		qcom,dwc-usb3-msm-dbm-eps = <4>
 	};
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 64349f0..9a1c759 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -508,6 +508,11 @@
 			Also note the kernel might malfunction if you disable
 			some critical bits.
 
+	cma=nn[MG]	[ARM,KNL]
+			Sets the size of kernel global memory area for contiguous
+			memory allocations. For more information, see
+			include/linux/dma-contiguous.h
+
 	cmo_free_hint=	[PPC] Format: { yes | no }
 			Specify whether pages are marked as being inactive
 			when they are freed.  This is used in CMO environments
@@ -515,6 +520,10 @@
 			a hypervisor.
 			Default: yes
 
+	coherent_pool=nn[KMG]	[ARM,KNL]
+			Sets the size of memory pool for coherent, atomic dma
+			allocations if Contiguous Memory Allocator (CMA) is used.
+
 	code_bytes	[X86] How many bytes of object code to print
 			in an oops report.
 			Range: 0 - 8192
diff --git a/arch/Kconfig b/arch/Kconfig
index bba59d1..0d88760 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -142,6 +142,9 @@
 config HAVE_DMA_ATTRS
 	bool
 
+config HAVE_DMA_CONTIGUOUS
+	bool
+
 config USE_GENERIC_SMP_HELPERS
 	bool
 
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5e1cd06..8fb7a8e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -4,6 +4,7 @@
 	select HAVE_AOUT
 	select HAVE_DMA_API_DEBUG
 	select HAVE_IDE if PCI || ISA || PCMCIA
+	select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
 	select HAVE_MEMBLOCK
 	select RTC_LIB
 	select SYS_SUPPORTS_APM_EMULATION
diff --git a/arch/arm/boot/dts/msm-pm8841.dtsi b/arch/arm/boot/dts/msm-pm8841.dtsi
index b157e95..a586a90 100644
--- a/arch/arm/boot/dts/msm-pm8841.dtsi
+++ b/arch/arm/boot/dts/msm-pm8841.dtsi
@@ -17,6 +17,47 @@
 		interrupt-controller;
 		#interrupt-cells = <3>;
 
+		qcom,pm8841@4 {
+			spmi-slave-container;
+			reg = <0x4>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			pm8841_mpps {
+				spmi-dev-container;
+				compatible = "qcom,qpnp-pin";
+				gpio-controller;
+				#gpio-cells = <2>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				label = "pm8841-mpp";
+
+				mpp@a000 {
+					reg = <0xa000 0x100>;
+					qcom,pin-num = <1>;
+					status = "disabled";
+				};
+
+				mpp@a100 {
+					reg = <0xa100 0x100>;
+					qcom,pin-num = <2>;
+					status = "disabled";
+				};
+
+				mpp@a200 {
+					reg = <0xa200 0x100>;
+					qcom,pin-num = <3>;
+					status = "disabled";
+				};
+
+				mpp@a300 {
+					reg = <0xa300 0x100>;
+					qcom,pin-num = <4>;
+					status = "disabled";
+				};
+			};
+		};
+
 		qcom,pm8841@5 {
 			spmi-slave-container;
 			reg = <0x5>;
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index e62dfbd..2714d9e 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -25,225 +25,284 @@
 
 			pm8941_gpios {
 				spmi-dev-container;
-				compatible = "qcom,qpnp-gpio";
+				compatible = "qcom,qpnp-pin";
 				gpio-controller;
 				#gpio-cells = <2>;
 				#address-cells = <1>;
 				#size-cells = <1>;
+				label = "pm8941-gpio";
 
 				gpio@c000 {
 					reg = <0xc000 0x100>;
-					qcom,gpio-num = <1>;
+					qcom,pin-num = <1>;
 					status = "disabled";
 				};
 
 				gpio@c100 {
 					reg = <0xc100 0x100>;
-					qcom,gpio-num = <2>;
+					qcom,pin-num = <2>;
 					status = "disabled";
 				};
 
 				gpio@c200 {
 					reg = <0xc200 0x100>;
-					qcom,gpio-num = <3>;
+					qcom,pin-num = <3>;
 					status = "disabled";
 				};
 
 				gpio@c300 {
 					reg = <0xc300 0x100>;
-					qcom,gpio-num = <4>;
+					qcom,pin-num = <4>;
 					status = "disabled";
 				};
 
 				gpio@c400 {
 					reg = <0xc400 0x100>;
-					qcom,gpio-num = <5>;
+					qcom,pin-num = <5>;
 					status = "disabled";
 				};
 
 				gpio@c500 {
 					reg = <0xc500 0x100>;
-					qcom,gpio-num = <6>;
+					qcom,pin-num = <6>;
 					status = "disabled";
 				};
 
 				gpio@c600 {
 					reg = <0xc600 0x100>;
-					qcom,gpio-num = <7>;
+					qcom,pin-num = <7>;
 					status = "disabled";
 				};
 
 				gpio@c700 {
 					reg = <0xc700 0x100>;
-					qcom,gpio-num = <8>;
+					qcom,pin-num = <8>;
 					status = "disabled";
 				};
 
 				gpio@c800 {
 					reg = <0xc800 0x100>;
-					qcom,gpio-num = <9>;
+					qcom,pin-num = <9>;
 					status = "disabled";
 				};
 
 				gpio@c900 {
 					reg = <0xc900 0x100>;
-					qcom,gpio-num = <10>;
+					qcom,pin-num = <10>;
 					status = "disabled";
 				};
 
 				gpio@ca00 {
 					reg = <0xca00 0x100>;
-					qcom,gpio-num = <11>;
+					qcom,pin-num = <11>;
 					status = "disabled";
 				};
 
 				gpio@cb00 {
 					reg = <0xcb00 0x100>;
-					qcom,gpio-num = <12>;
+					qcom,pin-num = <12>;
 					status = "disabled";
 				};
 
 				gpio@cc00 {
 					reg = <0xcc00 0x100>;
-					qcom,gpio-num = <13>;
+					qcom,pin-num = <13>;
 					status = "disabled";
 				};
 
 				gpio@cd00 {
 					reg = <0xcd00 0x100>;
-					qcom,gpio-num = <14>;
+					qcom,pin-num = <14>;
 					status = "disabled";
 				};
 
 				gpio@ce00 {
 					reg = <0xce00 0x100>;
-					qcom,gpio-num = <15>;
+					qcom,pin-num = <15>;
 					status = "disabled";
 				};
 
 				gpio@cf00 {
 					reg = <0xcf00 0x100>;
-					qcom,gpio-num = <16>;
+					qcom,pin-num = <16>;
 					status = "disabled";
 				};
 
 				gpio@d000 {
 					reg = <0xd000 0x100>;
-					qcom,gpio-num = <17>;
+					qcom,pin-num = <17>;
 					status = "disabled";
 				};
 
 				gpio@d100 {
 					reg = <0xd100 0x100>;
-					qcom,gpio-num = <18>;
+					qcom,pin-num = <18>;
 					status = "disabled";
 				};
 
 				gpio@d200 {
 					reg = <0xd200 0x100>;
-					qcom,gpio-num = <19>;
+					qcom,pin-num = <19>;
 					status = "disabled";
 				};
 
 				gpio@d300 {
 					reg = <0xd300 0x100>;
-					qcom,gpio-num = <20>;
+					qcom,pin-num = <20>;
 					status = "disabled";
 				};
 
 				gpio@d400 {
 					reg = <0xd400 0x100>;
-					qcom,gpio-num = <21>;
+					qcom,pin-num = <21>;
 					status = "disabled";
 				};
 
 				gpio@d500 {
 					reg = <0xd500 0x100>;
-					qcom,gpio-num = <22>;
+					qcom,pin-num = <22>;
 					status = "disabled";
 				};
 
 				gpio@d600 {
 					reg = <0xd600 0x100>;
-					qcom,gpio-num = <23>;
+					qcom,pin-num = <23>;
 					status = "disabled";
 				};
 
 				gpio@d700 {
 					reg = <0xd700 0x100>;
-					qcom,gpio-num = <24>;
+					qcom,pin-num = <24>;
 					status = "disabled";
 				};
 
 				gpio@d800 {
 					reg = <0xd800 0x100>;
-					qcom,gpio-num = <25>;
+					qcom,pin-num = <25>;
 					status = "disabled";
 				};
 
 				gpio@d900 {
 					reg = <0xd900 0x100>;
-					qcom,gpio-num = <26>;
+					qcom,pin-num = <26>;
 					status = "disabled";
 				};
 
 				gpio@da00 {
 					reg = <0xda00 0x100>;
-					qcom,gpio-num = <27>;
+					qcom,pin-num = <27>;
 					status = "disabled";
 				};
 
 				gpio@db00 {
 					reg = <0xdb00 0x100>;
-					qcom,gpio-num = <28>;
+					qcom,pin-num = <28>;
 					status = "disabled";
 				};
 
 				gpio@dc00 {
 					reg = <0xdc00 0x100>;
-					qcom,gpio-num = <29>;
+					qcom,pin-num = <29>;
 					status = "disabled";
 				};
 
 				gpio@dd00 {
 					reg = <0xdd00 0x100>;
-					qcom,gpio-num = <30>;
+					qcom,pin-num = <30>;
 					status = "disabled";
 				};
 
 				gpio@de00 {
 					reg = <0xde00 0x100>;
-					qcom,gpio-num = <31>;
+					qcom,pin-num = <31>;
 					status = "disabled";
 				};
 
 				gpio@df00 {
 					reg = <0xdf00 0x100>;
-					qcom,gpio-num = <32>;
+					qcom,pin-num = <32>;
 					status = "disabled";
 				};
 
 				gpio@e000 {
 					reg = <0xe000 0x100>;
-					qcom,gpio-num = <33>;
+					qcom,pin-num = <33>;
 					status = "disabled";
 				};
 
 				gpio@e100 {
 					reg = <0xe100 0x100>;
-					qcom,gpio-num = <34>;
+					qcom,pin-num = <34>;
 					status = "disabled";
 				};
 
 				gpio@e200 {
 					reg = <0xe200 0x100>;
-					qcom,gpio-num = <35>;
+					qcom,pin-num = <35>;
 					status = "disabled";
 				};
 
 				gpio@e300 {
 					reg = <0xe300 0x100>;
-					qcom,gpio-num = <36>;
+					qcom,pin-num = <36>;
+					status = "disabled";
+				};
+			};
+
+			pm8941_mpps {
+				spmi-dev-container;
+				compatible = "qcom,qpnp-pin";
+				gpio-controller;
+				#gpio-cells = <2>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				label = "pm8941-mpp";
+
+				mpp@a000 {
+					reg = <0xa000 0x100>;
+					qcom,pin-num = <1>;
+					status = "disabled";
+				};
+
+				mpp@a100 {
+					reg = <0xa100 0x100>;
+					qcom,pin-num = <2>;
+					status = "disabled";
+				};
+
+				mpp@a200 {
+					reg = <0xa200 0x100>;
+					qcom,pin-num = <3>;
+					status = "disabled";
+				};
+
+				mpp@a300 {
+					reg = <0xa300 0x100>;
+					qcom,pin-num = <4>;
+					status = "disabled";
+				};
+
+				mpp@a400 {
+					reg = <0xa400 0x100>;
+					qcom,pin-num = <5>;
+					status = "disabled";
+				};
+
+				mpp@a500 {
+					reg = <0xa500 0x100>;
+					qcom,pin-num = <6>;
+					status = "disabled";
+				};
+
+				mpp@a600 {
+					reg = <0xa600 0x100>;
+					qcom,pin-num = <7>;
+					status = "disabled";
+				};
+
+				mpp@a700 {
+					reg = <0xa700 0x100>;
+					qcom,pin-num = <8>;
 					status = "disabled";
 				};
 			};
diff --git a/arch/arm/boot/dts/msmcopper-gpio.dtsi b/arch/arm/boot/dts/msm8974-gpio.dtsi
similarity index 72%
rename from arch/arm/boot/dts/msmcopper-gpio.dtsi
rename to arch/arm/boot/dts/msm8974-gpio.dtsi
index 7c3f5ce..59ad8db 100644
--- a/arch/arm/boot/dts/msmcopper-gpio.dtsi
+++ b/arch/arm/boot/dts/msm8974-gpio.dtsi
@@ -18,197 +18,218 @@
 			pm8941_gpios: pm8941_gpios {
 
 				gpio@c000 {
-					qcom,gpio-num = <1>;
 					status = "ok";
 				};
 
 				gpio@c100 {
-					qcom,gpio-num = <2>;
 					status = "ok";
 				};
 
 				gpio@c200 {
-					qcom,gpio-num = <3>;
 					status = "ok";
 				};
 
 				gpio@c300 {
-					qcom,gpio-num = <4>;
 					status = "ok";
 				};
 
 				gpio@c400 {
-					qcom,gpio-num = <5>;
 					status = "ok";
 				};
 
 				gpio@c500 {
-					qcom,gpio-num = <6>;
 					status = "ok";
 				};
 
 				gpio@c600 {
-					qcom,gpio-num = <7>;
 					status = "ok";
 				};
 
 				gpio@c700 {
-					qcom,gpio-num = <8>;
 					status = "ok";
 				};
 
 				gpio@c800 {
-					qcom,gpio-num = <9>;
 					status = "ok";
 				};
 
 				gpio@c900 {
-					qcom,gpio-num = <10>;
 					status = "ok";
 				};
 
 				gpio@ca00 {
-					qcom,gpio-num = <11>;
 					status = "ok";
 				};
 
 				gpio@cb00 {
-					qcom,gpio-num = <12>;
 					status = "ok";
 				};
 
 				gpio@cc00 {
-					qcom,gpio-num = <13>;
 					status = "ok";
 				};
 
 				gpio@cd00 {
-					qcom,gpio-num = <14>;
 					status = "ok";
 				};
 
 				gpio@ce00 {
-					qcom,gpio-num = <15>;
 					status = "ok";
 				};
 
 				gpio@cf00 {
-					qcom,gpio-num = <16>;
 					status = "ok";
 				};
 
 				gpio@d000 {
-					qcom,gpio-num = <17>;
 					status = "ok";
 				};
 
 				gpio@d100 {
-					qcom,gpio-num = <18>;
 					status = "ok";
 				};
 
 				gpio@d200 {
-					qcom,gpio-num = <19>;
 					status = "ok";
 				};
 
 				gpio@d300 {
-					qcom,gpio-num = <20>;
 					status = "ok";
 				};
 
 				gpio@d400 {
-					qcom,gpio-num = <21>;
 					status = "ok";
 				};
 
 				gpio@d500 {
-					qcom,gpio-num = <22>;
 					status = "ok";
 				};
 
 				gpio@d600 {
-					qcom,gpio-num = <23>;
 					status = "ok";
 				};
 
 				gpio@d700 {
-					qcom,gpio-num = <24>;
 					status = "ok";
 				};
 
 				gpio@d800 {
-					qcom,gpio-num = <25>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@d900 {
-					qcom,gpio-num = <26>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@da00 {
-					qcom,gpio-num = <27>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@db00 {
-					qcom,gpio-num = <28>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@dc00 {
-					qcom,gpio-num = <29>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@dd00 {
-					qcom,gpio-num = <30>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@de00 {
-					qcom,gpio-num = <31>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@df00 {
-					qcom,gpio-num = <32>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@e000 {
-					qcom,gpio-num = <33>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@e100 {
-					qcom,gpio-num = <34>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@e200 {
-					qcom,gpio-num = <35>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@e300 {
-					qcom,gpio-num = <36>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 			};
+
+			pm8941_mpps: pm8941_mpps {
+
+				mpp@a000 {
+					status = "ok";
+				};
+
+				mpp@a100 {
+					status = "ok";
+				};
+
+				mpp@a200 {
+					status = "ok";
+				};
+
+				mpp@a300 {
+					status = "ok";
+				};
+
+				mpp@a400 {
+					status = "ok";
+				};
+
+				mpp@a500 {
+					status = "ok";
+				};
+
+				mpp@a600 {
+					status = "ok";
+				};
+
+				mpp@a700 {
+					status = "ok";
+				};
+			};
+		};
+
+		qcom,pm8841@4 {
+
+			pm8841_mpps: pm8841_mpps {
+
+				mpp@a000 {
+					status = "ok";
+				};
+
+				mpp@a100 {
+					status = "ok";
+				};
+
+				mpp@a200 {
+					status = "ok";
+				};
+
+				mpp@a300 {
+					status = "ok";
+				};
+			};
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/msmcopper-iommu.dtsi b/arch/arm/boot/dts/msm8974-iommu.dtsi
similarity index 100%
rename from arch/arm/boot/dts/msmcopper-iommu.dtsi
rename to arch/arm/boot/dts/msm8974-iommu.dtsi
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
new file mode 100644
index 0000000..7b3893e
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -0,0 +1,512 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+
+/* QPNP controlled regulators: */
+
+	qcom,spmi@fc4c0000 {
+
+		qcom,pm8941@1 {
+
+			pm8941_boost: regulator@a000 {
+				regulator-min-microvolt = <5000000>;
+				regulator-max-microvolt = <5000000>;
+				qcom,enable-time = <500>;
+				status = "okay";
+			};
+
+			pm8941_mvs1: regulator@8300 {
+				parent-supply = <&pm8941_boost>;
+				qcom,enable-time = <200>;
+				qcom,pull-down-enable = <1>;
+				status = "okay";
+			};
+
+			pm8941_mvs2: regulator@8400 {
+				parent-supply = <&pm8941_boost>;
+				qcom,enable-time = <200>;
+				qcom,pull-down-enable = <1>;
+				status = "okay";
+			};
+		};
+
+		qcom,pm8841@5 {
+
+			pm8841_s5: regulator@2000 {
+				regulator-min-microvolt = <850000>;
+				regulator-max-microvolt = <1100000>;
+				qcom,enable-time = <500>;
+				qcom,pull-down-enable = <1>;
+				regulator-always-on;
+				status = "okay";
+			};
+
+			pm8841_s6: regulator@2300 {
+				regulator-min-microvolt = <850000>;
+				regulator-max-microvolt = <1100000>;
+				qcom,enable-time = <500>;
+				qcom,pull-down-enable = <1>;
+				status = "okay";
+			};
+
+			pm8841_s7: regulator@2600 {
+				regulator-min-microvolt = <850000>;
+				regulator-max-microvolt = <1100000>;
+				qcom,enable-time = <500>;
+				qcom,pull-down-enable = <1>;
+				status = "okay";
+			};
+
+			pm8841_s8: regulator@2900 {
+				regulator-min-microvolt = <850000>;
+				regulator-max-microvolt = <1100000>;
+				qcom,enable-time = <500>;
+				qcom,pull-down-enable = <1>;
+				status = "okay";
+			};
+		};
+	};
+
+
+/* RPM controlled regulators: */
+
+	qcom,rpm-smd {
+		rpm-regulator-smpb1 {
+			status = "okay";
+			pm8841_s1: regulator-s1 {
+				regulator-min-microvolt = <675000>;
+				regulator-max-microvolt = <1050000>;
+				status = "okay";
+			};
+			pm8841_s1_ao: regulator-s1-ao {
+				regulator-name = "8841_s1_ao";
+				qcom,set = <1>;
+				regulator-min-microvolt = <675000>;
+				regulator-max-microvolt = <1050000>;
+				status = "okay";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-smpb2 {
+			status = "okay";
+			qcom,allow-atomic = <1>;
+			pm8841_s2: regulator-s2 {
+				regulator-min-microvolt = <500000>;
+				regulator-max-microvolt = <1050000>;
+				status = "okay";
+			};
+			pm8841_s2_corner: regulator-s2-corner {
+				regulator-name = "8841_s2_corner";
+				qcom,set = <3>;
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <6>;
+				qcom,use-voltage-corner;
+				compatible = "qcom,rpm-regulator-smd";
+				qcom,consumer-supplies = "vdd_dig", "";
+			};
+			pm8841_s2_corner_ao: regulator-s2-corner-ao {
+				regulator-name = "8841_s2_corner_ao";
+				qcom,set = <1>;
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <6>;
+				qcom,use-voltage-corner;
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-smpb3 {
+			status = "okay";
+			pm8841_s3: regulator-s3 {
+				regulator-min-microvolt = <1150000>;
+				regulator-max-microvolt = <1150000>;
+				qcom,init-voltage = <1150000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-smpb4 {
+			status = "okay";
+			pm8841_s4: regulator-s4 {
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <900000>;
+				qcom,init-voltage = <900000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-smpa1 {
+			status = "okay";
+			pm8941_s1: regulator-s1 {
+				regulator-min-microvolt = <1300000>;
+				regulator-max-microvolt = <1300000>;
+				qcom,init-voltage = <1300000>;
+				qcom,init-current = <100>;
+				qcom,system-load = <100000>;
+				regulator-always-on;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-smpa2 {
+			status = "okay";
+			qcom,allow-atomic = <1>;
+			pm8941_s2: regulator-s2 {
+				regulator-min-microvolt = <2150000>;
+				regulator-max-microvolt = <2150000>;
+				qcom,init-voltage = <2150000>;
+				status = "okay";
+			};
+			pm8941_s2_ao: regulator-s2-ao {
+				regulator-name = "8941_s2_ao";
+				qcom,set = <1>;
+				regulator-min-microvolt = <2150000>;
+				regulator-max-microvolt = <2150000>;
+				status = "okay";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-smpa3 {
+			status = "okay";
+			pm8941_s3: regulator-s3 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				qcom,init-voltage = <1800000>;
+				qcom,init-current = <100>;
+				qcom,system-load = <100000>;
+				regulator-always-on;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa1 {
+			status = "okay";
+			pm8941_l1: regulator-l1 {
+				parent-supply = <&pm8941_s1>;
+				regulator-min-microvolt = <1225000>;
+				regulator-max-microvolt = <1225000>;
+				qcom,init-voltage = <1225000>;
+				qcom,init-current = <10>;
+				qcom,system-load = <10000>;
+				regulator-always-on;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa2 {
+			status = "okay";
+			pm8941_l2: regulator-l2 {
+				parent-supply = <&pm8941_s3>;
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+				qcom,init-voltage = <1200000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa3 {
+			status = "okay";
+			pm8941_l3: regulator-l3 {
+				parent-supply = <&pm8941_s1>;
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+				qcom,init-voltage = <1200000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa4 {
+			status = "okay";
+			pm8941_l4: regulator-l4 {
+				parent-supply = <&pm8941_s1>;
+				regulator-min-microvolt = <1150000>;
+				regulator-max-microvolt = <1150000>;
+				qcom,init-voltage = <1150000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa5 {
+			status = "okay";
+			pm8941_l5: regulator-l5 {
+				parent-supply = <&pm8941_s2>;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				qcom,init-voltage = <1800000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa6 {
+			status = "okay";
+			pm8941_l6: regulator-l6 {
+				parent-supply = <&pm8941_s2>;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				qcom,init-voltage = <1800000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa7 {
+			status = "okay";
+			pm8941_l7: regulator-l7 {
+				parent-supply = <&pm8941_s2>;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				qcom,init-voltage = <1800000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa8 {
+			status = "okay";
+			pm8941_l8: regulator-l8 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				qcom,init-voltage = <1800000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa9 {
+			status = "okay";
+			pm8941_l9: regulator-l9 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <2950000>;
+				qcom,init-voltage = <2950000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa10 {
+			status = "okay";
+			pm8941_l10: regulator-l10 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <2950000>;
+				qcom,init-voltage = <2950000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa11 {
+			status = "okay";
+			pm8941_l11: regulator-l11 {
+				parent-supply = <&pm8941_s1>;
+				regulator-min-microvolt = <1250000>;
+				regulator-max-microvolt = <1250000>;
+				qcom,init-voltage = <1250000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa12 {
+			status = "okay";
+			qcom,allow-atomic = <1>;
+			pm8941_l12: regulator-l12 {
+				parent-supply = <&pm8941_s2>;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				status = "okay";
+			};
+			pm8941_l12_ao: regulator-l12-ao {
+				regulator-name = "8941_l12_ao";
+				parent-supply = <&pm8941_s2_ao>;
+				qcom,set = <1>;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				status = "okay";
+				compatible = "qcom,rpm-regulator-smd";
+			};
+		};
+
+		rpm-regulator-ldoa13 {
+			status = "okay";
+			pm8941_l13: regulator-l13 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <2950000>;
+				qcom,init-voltage = <2950000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa14 {
+			status = "okay";
+			pm8941_l14: regulator-l14 {
+				parent-supply = <&pm8941_s2>;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				qcom,init-voltage = <1800000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa15 {
+			status = "okay";
+			pm8941_l15: regulator-l15 {
+				parent-supply = <&pm8941_s2>;
+				regulator-min-microvolt = <2050000>;
+				regulator-max-microvolt = <2050000>;
+				qcom,init-voltage = <2050000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa16 {
+			status = "okay";
+			pm8941_l16: regulator-l16 {
+				regulator-min-microvolt = <2700000>;
+				regulator-max-microvolt = <2700000>;
+				qcom,init-voltage = <2700000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa17 {
+			status = "okay";
+			pm8941_l17: regulator-l17 {
+				regulator-min-microvolt = <2850000>;
+				regulator-max-microvolt = <2850000>;
+				qcom,init-voltage = <2850000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa18 {
+			status = "okay";
+			pm8941_l18: regulator-l18 {
+				regulator-min-microvolt = <2850000>;
+				regulator-max-microvolt = <2850000>;
+				qcom,init-voltage = <2850000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa19 {
+			status = "okay";
+			pm8941_l19: regulator-l19 {
+				regulator-min-microvolt = <2900000>;
+				regulator-max-microvolt = <2900000>;
+				qcom,init-voltage = <2900000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa20 {
+			status = "okay";
+			pm8941_l20: regulator-l20 {
+				regulator-min-microvolt = <2950000>;
+				regulator-max-microvolt = <2950000>;
+				qcom,init-voltage = <2950000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa21 {
+			status = "okay";
+			pm8941_l21: regulator-l21 {
+				regulator-min-microvolt = <2950000>;
+				regulator-max-microvolt = <2950000>;
+				qcom,init-voltage = <2950000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa22 {
+			status = "okay";
+			pm8941_l22: regulator-l22 {
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3000000>;
+				qcom,init-voltage = <3000000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa23 {
+			status = "okay";
+			pm8941_l23: regulator-l23 {
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3000000>;
+				qcom,init-voltage = <3000000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-ldoa24 {
+			status = "okay";
+			pm8941_l24: regulator-l24 {
+				regulator-min-microvolt = <3075000>;
+				regulator-max-microvolt = <3075000>;
+				qcom,init-voltage = <3075000>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-vsa1 {
+			status = "okay";
+			pm8941_lvs1: regulator-lvs1 {
+				parent-supply = <&pm8941_s3>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-vsa2 {
+			status = "okay";
+			pm8941_lvs2: regulator-lvs2 {
+				parent-supply = <&pm8941_s3>;
+				status = "okay";
+			};
+		};
+
+		rpm-regulator-vsa3 {
+			status = "okay";
+			pm8941_lvs3: regulator-lvs3 {
+				parent-supply = <&pm8941_s3>;
+				status = "okay";
+			};
+		};
+	};
+
+	krait0_vreg: regulator@f9088000 {
+		compatible = "qcom,krait-regulator";
+		regulator-name = "krait0";
+		reg = <0xf9088000 0x1000>;
+		regulator-min-microvolt = <500000>;
+		regulator-max-microvolt = <1100000>;
+	};
+
+	krait1_vreg: regulator@f9098000 {
+		compatible = "qcom,krait-regulator";
+		regulator-name = "krait1";
+		reg = <0xf9098000 0x1000>;
+		regulator-min-microvolt = <500000>;
+		regulator-max-microvolt = <1100000>;
+	};
+
+	krait2_vreg: regulator@f90a8000 {
+		compatible = "qcom,krait-regulator";
+		regulator-name = "krait2";
+		reg = <0xf90a8000 0x1000>;
+		regulator-min-microvolt = <500000>;
+		regulator-max-microvolt = <1100000>;
+	};
+
+	krait3_vreg: regulator@f90b8000 {
+		compatible = "qcom,krait-regulator";
+		regulator-name = "krait3";
+		reg = <0xf90b8000 0x1000>;
+		regulator-min-microvolt = <500000>;
+		regulator-max-microvolt = <1100000>;
+	};
+};
diff --git a/arch/arm/boot/dts/msmcopper-rumi.dts b/arch/arm/boot/dts/msm8974-rumi.dts
similarity index 94%
rename from arch/arm/boot/dts/msmcopper-rumi.dts
rename to arch/arm/boot/dts/msm8974-rumi.dts
index 3f1a2ac..b179d94 100644
--- a/arch/arm/boot/dts/msmcopper-rumi.dts
+++ b/arch/arm/boot/dts/msm8974-rumi.dts
@@ -12,11 +12,11 @@
 
 /dts-v1/;
 
-/include/ "msmcopper.dtsi"
+/include/ "msm8974.dtsi"
 
 / {
-	model = "Qualcomm MSM Copper RUMI";
-	compatible = "qcom,msmcopper-rumi", "qcom,msmcopper";
+	model = "Qualcomm MSM 8974 RUMI";
+	compatible = "qcom,msm8974-rumi", "qcom,msm8974";
 
 	timer {
 		clock-frequency = <5000000>;
diff --git a/arch/arm/boot/dts/msmcopper-sim.dts b/arch/arm/boot/dts/msm8974-sim.dts
similarity index 81%
rename from arch/arm/boot/dts/msmcopper-sim.dts
rename to arch/arm/boot/dts/msm8974-sim.dts
index ab6b8ba..b6044a6 100644
--- a/arch/arm/boot/dts/msmcopper-sim.dts
+++ b/arch/arm/boot/dts/msm8974-sim.dts
@@ -12,9 +12,9 @@
 
 /dts-v1/;
 
-/include/ "msmcopper.dtsi"
+/include/ "msm8974.dtsi"
 
 / {
-	model = "Qualcomm MSM Copper Simulator";
-	compatible = "qcom,msmcopper-sim", "qcom,msmcopper";
+	model = "Qualcomm MSM 8974 Simulator";
+	compatible = "qcom,msm8974-sim", "qcom,msm8974";
 };
diff --git a/arch/arm/boot/dts/msmcopper.dtsi b/arch/arm/boot/dts/msm8974.dtsi
similarity index 72%
rename from arch/arm/boot/dts/msmcopper.dtsi
rename to arch/arm/boot/dts/msm8974.dtsi
index 79d6814..8f8deac 100644
--- a/arch/arm/boot/dts/msmcopper.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -11,18 +11,18 @@
  */
 
 /include/ "skeleton.dtsi"
-/include/ "msmcopper_pm.dtsi"
+/include/ "msm8974_pm.dtsi"
 /include/ "msm-pm8x41-rpm-regulator.dtsi"
 /include/ "msm-pm8841.dtsi"
 /include/ "msm-pm8941.dtsi"
-/include/ "msmcopper-regulator.dtsi"
-/include/ "msmcopper-gpio.dtsi"
-/include/ "msmcopper-iommu.dtsi"
+/include/ "msm8974-regulator.dtsi"
+/include/ "msm8974-gpio.dtsi"
+/include/ "msm8974-iommu.dtsi"
 /include/ "msm-gdsc.dtsi"
 
 / {
-	model = "Qualcomm MSM Copper";
-	compatible = "qcom,msmcopper";
+	model = "Qualcomm MSM 8974";
+	compatible = "qcom,msm8974";
 	interrupt-parent = <&intc>;
 
 	intc: interrupt-controller@F9000000 {
@@ -83,12 +83,21 @@
 		compatible = "qcom,msm-sdcc";
 		reg = <0xf9824000 0x1000>;
 		interrupts = <0 123 0>;
+		vdd-supply = <&pm8941_l20>;
+		vdd-io-supply = <&pm8941_s3>;
+
+		qcom,sdcc-vdd-voltage_level = <2950000 2950000>;
+		qcom,sdcc-vdd-current_level = <800 500000>;
+
+		qcom,sdcc-vdd-io-always_on;
+		qcom,sdcc-vdd-io-voltage_level = <1800000 1800000>;
+		qcom,sdcc-vdd-io-current_level = <250 154000>;
 
 		qcom,sdcc-clk-rates = <400000 25000000 50000000 100000000 200000000>;
 		qcom,sdcc-sup-voltages = <2950 2950>;
 		qcom,sdcc-bus-width = <8>;
-		qcom,sdcc-hs200;
 		qcom,sdcc-nonremovable;
+		qcom,sdcc-bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
 	};
 
 	qcom,sdcc@f98a4000 {
@@ -96,10 +105,23 @@
 		compatible = "qcom,msm-sdcc";
 		reg = <0xf98a4000 0x1000>;
 		interrupts = <0 125 0>;
+		vdd-supply = <&pm8941_l21>;
+		vdd-io-supply = <&pm8941_l13>;
+
+		qcom,sdcc-vdd-voltage_level = <2950000 2950000>;
+		qcom,sdcc-vdd-current_level = <9000 800000>;
+
+		qcom,sdcc-vdd-io-always_on;
+		qcom,sdcc-vdd-io-lpm_sup;
+		qcom,sdcc-vdd-io-voltage_level = <1800000 2950000>;
+		qcom,sdcc-vdd-io-current_level = <6 22000>;
 
 		qcom,sdcc-clk-rates = <400000 25000000 50000000 100000000 200000000>;
 		qcom,sdcc-sup-voltages = <2950 2950>;
 		qcom,sdcc-bus-width = <4>;
+		qcom,sdcc-xpc;
+		qcom,sdcc-bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+		qcom,sdcc-current-limit = <800>;
 	};
 
 	qcom,sdcc@f9864000 {
@@ -111,6 +133,7 @@
 		qcom,sdcc-clk-rates = <400000 25000000 50000000 100000000>;
 		qcom,sdcc-sup-voltages = <1800 1800>;
 		qcom,sdcc-bus-width = <4>;
+		qcom,sdcc-bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50";
 		status = "disable";
 	};
 
@@ -123,6 +146,7 @@
 		qcom,sdcc-clk-rates = <400000 25000000 50000000 100000000>;
 		qcom,sdcc-sup-voltages = <1800 1800>;
 		qcom,sdcc-bus-width = <4>;
+		qcom,sdcc-bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50";
 		status = "disable";
 	};
 
@@ -269,13 +293,36 @@
 	};
 
 	qcom,acpuclk@f9000000 {
-		compatible = "qcom,acpuclk-copper";
+		compatible = "qcom,acpuclk-8974";
+		krait0-supply = <&krait0_vreg>;
+		krait1-supply = <&krait1_vreg>;
+		krait2-supply = <&krait2_vreg>;
+		krait3-supply = <&krait3_vreg>;
+		krait0_mem-supply = <&pm8841_s1_ao>;
+		krait1_mem-supply = <&pm8841_s1_ao>;
+		krait2_mem-supply = <&pm8841_s1_ao>;
+		krait3_mem-supply = <&pm8841_s1_ao>;
+		krait0_dig-supply = <&pm8841_s2_corner_ao>;
+		krait1_dig-supply = <&pm8841_s2_corner_ao>;
+		krait2_dig-supply = <&pm8841_s2_corner_ao>;
+		krait3_dig-supply = <&pm8841_s2_corner_ao>;
+		krait0_hfpll_a-supply = <&pm8941_s2_ao>;
+		krait1_hfpll_a-supply = <&pm8941_s2_ao>;
+		krait2_hfpll_a-supply = <&pm8941_s2_ao>;
+		krait3_hfpll_a-supply = <&pm8941_s2_ao>;
+		l2_hfpll_a-supply = <&pm8941_s2_ao>;
+		krait0_hfpll_b-supply = <&pm8941_l12_ao>;
+		krait1_hfpll_b-supply = <&pm8941_l12_ao>;
+		krait2_hfpll_b-supply = <&pm8941_l12_ao>;
+		krait3_hfpll_b-supply = <&pm8941_l12_ao>;
+		l2_hfpll_b-supply = <&pm8941_l12_ao>;
 	};
 
 	qcom,ssusb@F9200000 {
 		compatible = "qcom,dwc-usb3-msm";
 		reg = <0xF9200000 0xFA000>;
-		interrupts = <0 131 0>;
+		interrupts = <0 131 0 0 179 0>;
+		interrupt-names = "irq", "otg_irq";
 		SSUSB_VDDCX-supply = <&pm8841_s2>;
 		SSUSB_1p8-supply = <&pm8941_l6>;
 		HSUSB_VDDCX-supply = <&pm8841_s2>;
@@ -296,52 +343,55 @@
 		qcom,firmware-name = "adsp";
 	};
 
-        qcom,msm-pcm {
-                compatible = "qcom,msm-pcm-dsp";
-        };
+	qcom,msm-pcm {
+		compatible = "qcom,msm-pcm-dsp";
+	};
 
-        qcom,msm-pcm-routing {
-                compatible = "qcom,msm-pcm-routing";
-        };
+	qcom,msm-pcm-routing {
+		compatible = "qcom,msm-pcm-routing";
+	};
 
-        qcom,msm-pcm-lpa {
-                compatible = "qcom,msm-pcm-lpa";
-        };
+	qcom,msm-pcm-lpa {
+		compatible = "qcom,msm-pcm-lpa";
+	};
 
-        qcom,msm-voip-dsp {
-                compatible = "qcom,msm-voip-dsp";
-        };
+	qcom,msm-voip-dsp {
+		compatible = "qcom,msm-voip-dsp";
+	};
 
-        qcom,msm-stub-codec {
-                compatible = "qcom,msm-stub-codec";
-        };
+	qcom,msm-stub-codec {
+		compatible = "qcom,msm-stub-codec";
+	};
 
-        qcom,msm-dai-fe {
-                compatible = "qcom,msm-dai-fe";
-        };
+	qcom,msm-dai-fe {
+		compatible = "qcom,msm-dai-fe";
+	};
 
-        qcom,msm-dai-q6 {
-                compatible = "qcom,msm-dai-q6";
-                qcom,msm-cpudai-auxpcm-clk = "pcm_clk";
-                qcom,msm-cpudai-auxpcm-mode = <0>;
-                qcom,msm-cpudai-auxpcm-sync = <1>;
-                qcom,msm-cpudai-auxpcm-frame = <5>;
-                qcom,msm-cpudai-auxpcm-quant = <2>;
-                qcom,msm-cpudai-auxpcm-slot = <1>;
-                qcom,msm-cpudai-auxpcm-data = <0>;
-                qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>;
+	qcom,msm-auxpcm {
+		compatible = "qcom,msm-auxpcm-resource";
+		qcom,msm-cpudai-auxpcm-clk = "pcm_clk";
+		qcom,msm-cpudai-auxpcm-mode = <0>;
+		qcom,msm-cpudai-auxpcm-sync = <1>;
+		qcom,msm-cpudai-auxpcm-frame = <5>;
+		qcom,msm-cpudai-auxpcm-quant = <2>;
+		qcom,msm-cpudai-auxpcm-slot = <1>;
+		qcom,msm-cpudai-auxpcm-data = <0>;
+		qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>;
 
-                qcom,msm-dai-q6-rx {
-                        qcom,msm-dai-q6-id = <4106>;
-                };
-                qcom,msm-dai-q6-tx {
-                        qcom,msm-dai-q6-id = <4107>;
-                };
-        };
+		qcom,msm-auxpcm-rx {
+			qcom,msm-auxpcm-dev-id = <4106>;
+			compatible = "qcom,msm-auxpcm-dev";
+		};
 
-        qcom,msm-pcm-hostless {
-                compatible = "qcom,msm-pcm-hostless";
-        };
+		qcom,msm-auxpcm-tx {
+			qcom,msm-auxpcm-dev-id = <4107>;
+			compatible = "qcom,msm-auxpcm-dev";
+		};
+	};
+
+	qcom,msm-pcm-hostless {
+		compatible = "qcom,msm-pcm-hostless";
+	};
 
 	qcom,mss@fc880000 {
 		compatible = "qcom,pil-q6v5-mss";
@@ -398,6 +448,7 @@
 		compatible = "qcom,mdss_mdp";
 		reg = <0xfd900000 0x22100>;
 		interrupts = <0 72 0>;
+		vdd-supply = <&gdsc_mdss>;
 	};
 
 	qcom,mdss_wb_panel {
@@ -406,4 +457,29 @@
 		qcom,mdss_pan_res = <640 480>;
 		qcom,mdss_pan_bpp = <24>;
 	};
+
+	qcom,wdt@f9017000 {
+		compatible = "qcom,msm-watchdog";
+		reg = <0xf9017000 0x1000>;
+		interrupts = <0 3 0 0 4 0>;
+		qcom,bark-time = <11000>;
+		qcom,pet-time = <10000>;
+		qcom,ipi-ping = <1>;
+	};
+
+	qcom,tz-log@fe805720 {
+		compatible = "qcom,tz-log";
+		reg = <0xfe805720 0x1000>;
+	};
+
+	qcom,venus@fdce0000 {
+		compatible = "qcom,pil-venus";
+		reg = <0xfdce0000 0x4000>,
+		      <0xfdc80208 0x8>;
+		vdd-supply = <&gdsc_venus>;
+
+		qcom,firmware-name = "venus";
+		qcom,firmware-min-paddr = <0xF500000>;
+		qcom,firmware-max-paddr = <0xFA00000>;
+	};
 };
diff --git a/arch/arm/boot/dts/msmcopper_pm.dtsi b/arch/arm/boot/dts/msm8974_pm.dtsi
similarity index 100%
rename from arch/arm/boot/dts/msmcopper_pm.dtsi
rename to arch/arm/boot/dts/msm8974_pm.dtsi
diff --git a/arch/arm/boot/dts/msmcopper-regulator.dtsi b/arch/arm/boot/dts/msmcopper-regulator.dtsi
deleted file mode 100644
index a926aa3..0000000
--- a/arch/arm/boot/dts/msmcopper-regulator.dtsi
+++ /dev/null
@@ -1,394 +0,0 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-/ {
-	qcom,spmi@fc4c0000 {
-
-		qcom,pm8941@1 {
-
-			pm8941_s1: regulator@1400 {
-				regulator-min-microvolt = <1300000>;
-				regulator-max-microvolt = <1300000>;
-				qcom,enable-time = <500>;
-				qcom,pull-down-enable = <1>;
-				regulator-always-on;
-				status = "okay";
-			};
-
-			pm8941_s2: regulator@1700 {
-				regulator-min-microvolt = <2150000>;
-				regulator-max-microvolt = <2150000>;
-				qcom,enable-time = <500>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_s3: regulator@1a00 {
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <1800000>;
-				qcom,enable-time = <500>;
-				qcom,pull-down-enable = <1>;
-				regulator-always-on;
-				status = "okay";
-			};
-
-			pm8941_boost: regulator@a000 {
-				regulator-min-microvolt = <5000000>;
-				regulator-max-microvolt = <5000000>;
-				qcom,enable-time = <500>;
-				status = "okay";
-			};
-
-			pm8941_l1: regulator@4000 {
-				parent-supply = <&pm8941_s1>;
-				regulator-min-microvolt = <1225000>;
-				regulator-max-microvolt = <1225000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				regulator-always-on;
-				status = "okay";
-			};
-
-			pm8941_l2: regulator@4100 {
-				parent-supply = <&pm8941_s3>;
-				regulator-min-microvolt = <1200000>;
-				regulator-max-microvolt = <1200000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l3: regulator@4200 {
-				parent-supply = <&pm8941_s1>;
-				regulator-min-microvolt = <1200000>;
-				regulator-max-microvolt = <1200000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l4: regulator@4300 {
-				parent-supply = <&pm8941_s1>;
-				regulator-min-microvolt = <1150000>;
-				regulator-max-microvolt = <1150000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l5: regulator@4400 {
-				parent-supply = <&pm8941_s2>;
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <1800000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l6: regulator@4500 {
-				parent-supply = <&pm8941_s2>;
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <1800000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l7: regulator@4600 {
-				parent-supply = <&pm8941_s2>;
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <1800000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l8: regulator@4700 {
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <1800000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l9: regulator@4800 {
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <2950000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l10: regulator@4900 {
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <2950000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l11: regulator@4a00 {
-				parent-supply = <&pm8941_s1>;
-				regulator-min-microvolt = <1250000>;
-				regulator-max-microvolt = <1250000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l12: regulator@4b00 {
-				parent-supply = <&pm8941_s2>;
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <1800000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l13: regulator@4c00 {
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <2950000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l14: regulator@4d00 {
-				parent-supply = <&pm8941_s2>;
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <1800000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l15: regulator@4e00 {
-				parent-supply = <&pm8941_s2>;
-				regulator-min-microvolt = <2050000>;
-				regulator-max-microvolt = <2050000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l16: regulator@4f00 {
-				regulator-min-microvolt = <2700000>;
-				regulator-max-microvolt = <2700000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l17: regulator@5000 {
-				regulator-min-microvolt = <2850000>;
-				regulator-max-microvolt = <2850000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l18: regulator@5100 {
-				regulator-min-microvolt = <2850000>;
-				regulator-max-microvolt = <2850000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l19: regulator@5200 {
-				regulator-min-microvolt = <2900000>;
-				regulator-max-microvolt = <2900000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l20: regulator@5300 {
-				regulator-min-microvolt = <2950000>;
-				regulator-max-microvolt = <2950000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l21: regulator@5400 {
-				regulator-min-microvolt = <2950000>;
-				regulator-max-microvolt = <2950000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l22: regulator@5500 {
-				regulator-min-microvolt = <3000000>;
-				regulator-max-microvolt = <3000000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l23: regulator@5600 {
-				regulator-min-microvolt = <3000000>;
-				regulator-max-microvolt = <3000000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_l24: regulator@5700 {
-				regulator-min-microvolt = <3075000>;
-				regulator-max-microvolt = <3075000>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_lvs1: regulator@8000 {
-				parent-supply = <&pm8941_s3>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_lvs2: regulator@8100 {
-				parent-supply = <&pm8941_s3>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_lvs3: regulator@8200 {
-				parent-supply = <&pm8941_s3>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_mvs1: regulator@8300 {
-				parent-supply = <&pm8941_boost>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8941_mvs2: regulator@8400 {
-				parent-supply = <&pm8941_boost>;
-				qcom,enable-time = <200>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-		};
-
-		qcom,pm8841@5 {
-
-			pm8841_s1: regulator@1400 {
-				regulator-min-microvolt = <900000>;
-				regulator-max-microvolt = <1150000>;
-				qcom,enable-time = <500>;
-				qcom,pull-down-enable = <1>;
-				regulator-always-on;
-				status = "okay";
-			};
-
-			pm8841_s2: regulator@1700 {
-				regulator-min-microvolt = <900000>;
-				regulator-max-microvolt = <1150000>;
-				qcom,enable-time = <500>;
-				qcom,pull-down-enable = <1>;
-				regulator-always-on;
-				status = "okay";
-			};
-
-			pm8841_s3: regulator@1a00 {
-				regulator-min-microvolt = <1150000>;
-				regulator-max-microvolt = <1150000>;
-				qcom,enable-time = <500>;
-				qcom,pull-down-enable = <1>;
-				regulator-always-on;
-				status = "okay";
-			};
-
-			pm8841_s4: regulator@1d00 {
-				regulator-min-microvolt = <900000>;
-				regulator-max-microvolt = <900000>;
-				qcom,enable-time = <500>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8841_s5: regulator@2000 {
-				regulator-min-microvolt = <850000>;
-				regulator-max-microvolt = <1100000>;
-				qcom,enable-time = <500>;
-				qcom,pull-down-enable = <1>;
-				regulator-always-on;
-				status = "okay";
-			};
-
-			pm8841_s6: regulator@2300 {
-				regulator-min-microvolt = <850000>;
-				regulator-max-microvolt = <1100000>;
-				qcom,enable-time = <500>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8841_s7: regulator@2600 {
-				regulator-min-microvolt = <850000>;
-				regulator-max-microvolt = <1100000>;
-				qcom,enable-time = <500>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-
-			pm8841_s8: regulator@2900 {
-				regulator-min-microvolt = <850000>;
-				regulator-max-microvolt = <1100000>;
-				qcom,enable-time = <500>;
-				qcom,pull-down-enable = <1>;
-				status = "okay";
-			};
-		};
-	};
-
-	krait0_vreg: regulator@f9088000 {
-		compatible = "qcom,krait-regulator";
-		regulator-name = "krait0";
-		reg = <0xf9088000 0x1000>;
-		regulator-min-microvolt = <500000>;
-		regulator-max-microvolt = <1100000>;
-	};
-
-	krait1_vreg: regulator@f9098000 {
-		compatible = "qcom,krait-regulator";
-		regulator-name = "krait1";
-		reg = <0xf9098000 0x1000>;
-		regulator-min-microvolt = <500000>;
-		regulator-max-microvolt = <1100000>;
-	};
-
-	krait2_vreg: regulator@f90a8000 {
-		compatible = "qcom,krait-regulator";
-		regulator-name = "krait2";
-		reg = <0xf90a8000 0x1000>;
-		regulator-min-microvolt = <500000>;
-		regulator-max-microvolt = <1100000>;
-	};
-
-	krait3_vreg: regulator@f90b8000 {
-		compatible = "qcom,krait-regulator";
-		regulator-name = "krait3";
-		reg = <0xf90b8000 0x1000>;
-		regulator-min-microvolt = <500000>;
-		regulator-max-microvolt = <1100000>;
-	};
-};
diff --git a/arch/arm/common/cpaccess.c b/arch/arm/common/cpaccess.c
index e71e318..12e2c38 100644
--- a/arch/arm/common/cpaccess.c
+++ b/arch/arm/common/cpaccess.c
@@ -63,6 +63,8 @@
 	.name = "cpaccess",
 };
 
+void cpaccess_dummy_inst(void);
+
 #ifdef CONFIG_ARCH_MSM_KRAIT
 /*
  * do_read_il2 - Read indirect L2 registers
@@ -143,9 +145,12 @@
  */
 static noinline unsigned long cpaccess_dummy(unsigned long write_val)
 {
-	asm("mrc p15, 0, r0, c0, c0, 0\n\t");
-	asm("bx	lr\n\t");
-	return 0xBEEF;
+	unsigned long ret = 0xBEEF;
+
+	asm volatile (".globl cpaccess_dummy_inst\n"
+			"cpaccess_dummy_inst:\n\t"
+			"mrc p15, 0, %0, c0, c0, 0\n\t" : "=r" (ret));
+	return ret;
 } __attribute__((aligned(32)))
 
 /*
@@ -195,7 +200,7 @@
 	 * Grab address of the Dummy function, write the MRC/MCR
 	 * instruction, ensuring cache coherency.
 	 */
-	p_opcode = (unsigned long *)&cpaccess_dummy;
+	p_opcode = (unsigned long *)&cpaccess_dummy_inst;
 	mem_text_write_kernel_word(p_opcode, opcode);
 
 #ifdef CONFIG_SMP
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index b14ecf8..88c4862 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -64,9 +64,7 @@
 	u32 __percpu *saved_ppi_enable;
 	u32 __percpu *saved_ppi_conf;
 #endif
-#ifdef CONFIG_IRQ_DOMAIN
-	struct irq_domain domain;
-#endif
+	struct irq_domain *domain;
 	unsigned int gic_irqs;
 #ifdef CONFIG_GIC_NON_BANKED
 	void __iomem *(*get_base)(union gic_base *);
@@ -447,7 +445,7 @@
 		irqnr = irqstat & ~0x1c00;
 
 		if (likely(irqnr > 15 && irqnr < 1021)) {
-			irqnr = irq_domain_to_irq(&gic->domain, irqnr);
+			irqnr = irq_find_mapping(gic->domain, irqnr);
 			handle_IRQ(irqnr, regs);
 			continue;
 		}
@@ -485,8 +483,8 @@
 	if (gic_irq == 1023)
 		goto out;
 
-	cascade_irq = irq_domain_to_irq(&chip_data->domain, gic_irq);
-	if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS))
+	cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
+	if (unlikely(gic_irq < 32 || gic_irq > 1020))
 		do_bad_IRQ(cascade_irq, desc);
 	else
 		generic_handle_irq(cascade_irq);
@@ -520,10 +518,9 @@
 
 static void __init gic_dist_init(struct gic_chip_data *gic)
 {
-	unsigned int i, irq;
+	unsigned int i;
 	u32 cpumask;
 	unsigned int gic_irqs = gic->gic_irqs;
-	struct irq_domain *domain = &gic->domain;
 	void __iomem *base = gic_data_dist_base(gic);
 	u32 cpu = cpu_logical_map(smp_processor_id());
 
@@ -566,23 +563,6 @@
 	for (i = 32; i < gic_irqs; i += 32)
 		writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
 
-	/*
-	 * Setup the Linux IRQ subsystem.
-	 */
-	irq_domain_for_each_irq(domain, i, irq) {
-		if (i < 32) {
-			irq_set_percpu_devid(irq);
-			irq_set_chip_and_handler(irq, &gic_chip,
-						 handle_percpu_devid_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
-		} else {
-			irq_set_chip_and_handler(irq, &gic_chip,
-						 handle_fasteoi_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-		}
-		irq_set_chip_data(irq, gic);
-	}
-
 	gic->max_irq = gic_irqs;
 
 	if (is_cpu_secure())
@@ -831,13 +811,45 @@
 static void __init gic_pm_init(struct gic_chip_data *gic)
 {
 }
+
+static void gic_cpu_restore(unsigned int gic_nr)
+{
+}
+
+static void gic_cpu_save(unsigned int gic_nr)
+{
+}
+
+static void gic_dist_restore(unsigned int gic_nr)
+{
+}
+
+static void gic_dist_save(unsigned int gic_nr)
+{
+}
 #endif
 
-#ifdef CONFIG_OF
-static int gic_irq_domain_dt_translate(struct irq_domain *d,
-				       struct device_node *controller,
-				       const u32 *intspec, unsigned int intsize,
-				       unsigned long *out_hwirq, unsigned int *out_type)
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+				irq_hw_number_t hw)
+{
+	if (hw < 32) {
+		irq_set_percpu_devid(irq);
+		irq_set_chip_and_handler(irq, &gic_chip,
+					 handle_percpu_devid_irq);
+		set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+	} else {
+		irq_set_chip_and_handler(irq, &gic_chip,
+					 handle_fasteoi_irq);
+		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+	}
+	irq_set_chip_data(irq, d->host_data);
+	return 0;
+}
+
+static int gic_irq_domain_xlate(struct irq_domain *d,
+				struct device_node *controller,
+				const u32 *intspec, unsigned int intsize,
+				unsigned long *out_hwirq, unsigned int *out_type)
 {
 	if (d->of_node != controller)
 		return -EINVAL;
@@ -854,26 +866,23 @@
 	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
 	return 0;
 }
-#endif
 
 const struct irq_domain_ops gic_irq_domain_ops = {
-#ifdef CONFIG_OF
-	.dt_translate = gic_irq_domain_dt_translate,
-#endif
+	.map = gic_irq_domain_map,
+	.xlate = gic_irq_domain_xlate,
 };
 
 void __init gic_init_bases(unsigned int gic_nr, int irq_start,
 			   void __iomem *dist_base, void __iomem *cpu_base,
-			   u32 percpu_offset)
+			   u32 percpu_offset, struct device_node *node)
 {
+	irq_hw_number_t hwirq_base;
 	struct gic_chip_data *gic;
-	struct irq_domain *domain;
-	int gic_irqs, rc;
+	int gic_irqs, irq_base;
 
 	BUG_ON(gic_nr >= MAX_GIC_NR);
 
 	gic = &gic_data[gic_nr];
-	domain = &gic->domain;
 #ifdef CONFIG_GIC_NON_BANKED
 	if (percpu_offset) { /* Frankein-GIC without banked registers... */
 		unsigned int cpu;
@@ -881,8 +890,11 @@
 		gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
 		gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
 		if (WARN_ON(!gic->dist_base.percpu_base ||
-			    !gic->cpu_base.percpu_base))
-			goto init_bases_err;
+			     !gic->cpu_base.percpu_base)) {
+			free_percpu(gic->dist_base.percpu_base);
+			free_percpu(gic->cpu_base.percpu_base);
+			return;
+		}
 
 		for_each_possible_cpu(cpu) {
 			unsigned long offset = percpu_offset * cpu_logical_map(cpu);
@@ -906,13 +918,12 @@
 	 * For primary GICs, skip over SGIs.
 	 * For secondary GICs, skip over PPIs, too.
 	 */
-	domain->hwirq_base = 32;
-	if (gic_nr == 0) {
-		if ((irq_start & 31) > 0) {
-			domain->hwirq_base = 16;
-			if (irq_start != -1)
-				irq_start = (irq_start & ~31) + 16;
-		}
+	if (gic_nr == 0 && (irq_start & 31) > 0) {
+		hwirq_base = 16;
+		if (irq_start != -1)
+			irq_start = (irq_start & ~31) + 16;
+	} else {
+		hwirq_base = 32;
 	}
 
 	/*
@@ -925,33 +936,22 @@
 		gic_irqs = 1020;
 	gic->gic_irqs = gic_irqs;
 
-	domain->nr_irq = gic_irqs - domain->hwirq_base;
-	domain->irq_base = irq_alloc_descs(irq_start, 16, domain->nr_irq,
-					   numa_node_id());
-	if (IS_ERR_VALUE(domain->irq_base)) {
+	gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
+	irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
+	if (IS_ERR_VALUE(irq_base)) {
 		WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
 		     irq_start);
-		domain->irq_base = irq_start;
+		irq_base = irq_start;
 	}
-	domain->priv = gic;
-	domain->ops = &gic_irq_domain_ops;
-	rc = irq_domain_add(domain);
-	if (rc) {
-		WARN(1, "Unable to create irq_domain\n");
-		goto init_bases_err;
-	}
-	irq_domain_register(domain);
+	gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
+				    hwirq_base, &gic_irq_domain_ops, gic);
+	if (WARN_ON(!gic->domain))
+		return;
 
 	gic_chip.flags |= gic_arch_extn.flags;
 	gic_dist_init(gic);
 	gic_cpu_init(gic);
 	gic_pm_init(gic);
-
-	return;
-
-init_bases_err:
-	free_percpu(gic->dist_base.percpu_base);
-	free_percpu(gic->cpu_base.percpu_base);
 }
 
 void __cpuinit gic_secondary_init(unsigned int gic_nr)
@@ -1036,7 +1036,6 @@
 	void __iomem *dist_base;
 	u32 percpu_offset;
 	int irq;
-	struct irq_domain *domain = &gic_data[gic_cnt].domain;
 
 	if (WARN_ON(!node))
 		return -ENODEV;
@@ -1050,9 +1049,7 @@
 	if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
 		percpu_offset = 0;
 
-	domain->of_node = of_node_get(node);
-
-	gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset);
+	gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
 
 	if (parent) {
 		irq = irq_of_parse_and_map(node, 0);
@@ -1152,6 +1149,7 @@
 
 	return 0;
 }
+#endif
 
 void msm_gic_save(bool modem_wake, int from_idle)
 {
@@ -1218,4 +1216,3 @@
 	mb();
 	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 }
-#endif
diff --git a/arch/arm/configs/msm7627a-perf_defconfig b/arch/arm/configs/msm7627a-perf_defconfig
index a163829..3cd06a2 100644
--- a/arch/arm/configs/msm7627a-perf_defconfig
+++ b/arch/arm/configs/msm7627a-perf_defconfig
@@ -53,6 +53,7 @@
 CONFIG_MSM_RPC_VIBRATOR=y
 CONFIG_PM8XXX_RPC_VIBRATOR=y
 CONFIG_MSM_SPM_V2=y
+CONFIG_MSM_MULTIMEDIA_USE_ION=y
 CONFIG_ARM_THUMBEE=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -250,6 +251,8 @@
 CONFIG_MSM_ACTUATOR=y
 CONFIG_OV7692=y
 CONFIG_RADIO_TAVARUA=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
 CONFIG_MSM_KGSL=y
 CONFIG_FB=y
 CONFIG_FB_MSM=y
@@ -304,7 +307,6 @@
 # CONFIG_MMC_BLOCK_BOUNCE is not set
 CONFIG_MMC_TEST=m
 CONFIG_MMC_MSM=y
-CONFIG_MMC_MSM_CARD_HW_DETECTION=y
 CONFIG_MMC_MSM_SDC3_SUPPORT=y
 CONFIG_MMC_MSM_SDC3_8_BIT_SUPPORT=y
 CONFIG_LEDS_GPIO=y
diff --git a/arch/arm/configs/msm7627a_defconfig b/arch/arm/configs/msm7627a_defconfig
index d8e2e3c..058349a 100644
--- a/arch/arm/configs/msm7627a_defconfig
+++ b/arch/arm/configs/msm7627a_defconfig
@@ -53,6 +53,7 @@
 CONFIG_MSM_RPC_VIBRATOR=y
 CONFIG_PM8XXX_RPC_VIBRATOR=y
 CONFIG_MSM_SPM_V2=y
+CONFIG_MSM_MULTIMEDIA_USE_ION=y
 CONFIG_ARM_THUMBEE=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -251,6 +252,8 @@
 CONFIG_MSM_ACTUATOR=y
 CONFIG_OV7692=y
 CONFIG_RADIO_TAVARUA=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
 CONFIG_MSM_KGSL=y
 CONFIG_FB=y
 CONFIG_FB_MSM=y
@@ -305,7 +308,6 @@
 # CONFIG_MMC_BLOCK_BOUNCE is not set
 CONFIG_MMC_TEST=m
 CONFIG_MMC_MSM=y
-CONFIG_MMC_MSM_CARD_HW_DETECTION=y
 CONFIG_MMC_MSM_SDC3_SUPPORT=y
 CONFIG_MMC_MSM_SDC3_8_BIT_SUPPORT=y
 CONFIG_LEDS_MSM_PDM=y
diff --git a/arch/arm/configs/msm7630-perf_defconfig b/arch/arm/configs/msm7630-perf_defconfig
index 7de7fee..262c983 100644
--- a/arch/arm/configs/msm7630-perf_defconfig
+++ b/arch/arm/configs/msm7630-perf_defconfig
@@ -330,7 +330,6 @@
 # CONFIG_MMC_BLOCK_BOUNCE is not set
 CONFIG_MMC_TEST=m
 CONFIG_MMC_MSM=y
-CONFIG_MMC_MSM_CARD_HW_DETECTION=y
 # CONFIG_MMC_MSM_SDC1_SUPPORT is not set
 CONFIG_MMC_MSM_SDC2_8_BIT_SUPPORT=y
 CONFIG_MMC_MSM_SDC3_SUPPORT=y
diff --git a/arch/arm/configs/msm7630_defconfig b/arch/arm/configs/msm7630_defconfig
index bead86e..7eaaf35 100644
--- a/arch/arm/configs/msm7630_defconfig
+++ b/arch/arm/configs/msm7630_defconfig
@@ -328,7 +328,6 @@
 # CONFIG_MMC_BLOCK_BOUNCE is not set
 CONFIG_MMC_TEST=m
 CONFIG_MMC_MSM=y
-CONFIG_MMC_MSM_CARD_HW_DETECTION=y
 # CONFIG_MMC_MSM_SDC1_SUPPORT is not set
 CONFIG_MMC_MSM_SDC2_8_BIT_SUPPORT=y
 CONFIG_MMC_MSM_SDC3_SUPPORT=y
diff --git a/arch/arm/configs/msm8660-perf_defconfig b/arch/arm/configs/msm8660-perf_defconfig
index fe30dc8..ea4c0f6 100644
--- a/arch/arm/configs/msm8660-perf_defconfig
+++ b/arch/arm/configs/msm8660-perf_defconfig
@@ -68,6 +68,7 @@
 CONFIG_MSM_PIL_QDSP6V3=y
 CONFIG_MSM_PIL_TZAPPS=y
 CONFIG_MSM_PIL_DSPS=y
+CONFIG_MSM_PIL_VIDC=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_TZ_LOG=y
 CONFIG_MSM_RPM_LOG=y
@@ -390,7 +391,6 @@
 # CONFIG_MMC_BLOCK_BOUNCE is not set
 CONFIG_MMC_TEST=m
 CONFIG_MMC_MSM=y
-CONFIG_MMC_MSM_CARD_HW_DETECTION=y
 CONFIG_MMC_MSM_SDC1_8_BIT_SUPPORT=y
 CONFIG_MMC_MSM_SDC2_8_BIT_SUPPORT=y
 CONFIG_MMC_MSM_SDC3_SUPPORT=y
diff --git a/arch/arm/configs/msm8660_defconfig b/arch/arm/configs/msm8660_defconfig
index 45339ee..20557e5 100644
--- a/arch/arm/configs/msm8660_defconfig
+++ b/arch/arm/configs/msm8660_defconfig
@@ -67,6 +67,7 @@
 CONFIG_MSM_PIL_QDSP6V3=y
 CONFIG_MSM_PIL_TZAPPS=y
 CONFIG_MSM_PIL_DSPS=y
+CONFIG_MSM_PIL_VIDC=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_TZ_LOG=y
 CONFIG_MSM_RPM_LOG=y
@@ -392,7 +393,6 @@
 # CONFIG_MMC_BLOCK_BOUNCE is not set
 CONFIG_MMC_TEST=m
 CONFIG_MMC_MSM=y
-CONFIG_MMC_MSM_CARD_HW_DETECTION=y
 CONFIG_MMC_MSM_SDC1_8_BIT_SUPPORT=y
 CONFIG_MMC_MSM_SDC2_8_BIT_SUPPORT=y
 CONFIG_MMC_MSM_SDC3_SUPPORT=y
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index 57e644d..6c213c3 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -92,6 +92,8 @@
 CONFIG_MSM_SLEEP_STATS=y
 CONFIG_MSM_EBI_ERP=y
 CONFIG_MSM_CACHE_ERP=y
+CONFIG_MSM_L1_ERR_PANIC=y
+CONFIG_MSM_L1_ERR_LOG=y
 CONFIG_MSM_L2_ERP_2BIT_PANIC=y
 CONFIG_MSM_DCVS=y
 CONFIG_MSM_HSIC_SYSMON=y
@@ -424,7 +426,6 @@
 # CONFIG_MMC_BLOCK_BOUNCE is not set
 CONFIG_MMC_TEST=m
 CONFIG_MMC_MSM=y
-CONFIG_MMC_MSM_CARD_HW_DETECTION=y
 CONFIG_MMC_MSM_SDC1_8_BIT_SUPPORT=y
 # CONFIG_MMC_MSM_SDC2_SUPPORT is not set
 CONFIG_MMC_MSM_SDC3_SUPPORT=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index ca8a909..9eea6f6 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -94,6 +94,7 @@
 CONFIG_MSM_EBI_ERP=y
 CONFIG_MSM_CACHE_ERP=y
 CONFIG_MSM_L1_ERR_PANIC=y
+CONFIG_MSM_L1_ERR_LOG=y
 CONFIG_MSM_L2_ERP_PRINT_ACCESS_ERRORS=y
 CONFIG_MSM_L2_ERP_1BIT_PANIC=y
 CONFIG_MSM_L2_ERP_2BIT_PANIC=y
@@ -427,7 +428,6 @@
 # CONFIG_MMC_BLOCK_BOUNCE is not set
 CONFIG_MMC_TEST=m
 CONFIG_MMC_MSM=y
-CONFIG_MMC_MSM_CARD_HW_DETECTION=y
 CONFIG_MMC_MSM_SDC1_8_BIT_SUPPORT=y
 # CONFIG_MMC_MSM_SDC2_SUPPORT is not set
 CONFIG_MMC_MSM_SDC3_SUPPORT=y
@@ -471,7 +471,6 @@
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
 CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_LOCKUP_DETECTOR=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_TIMER_STATS=y
diff --git a/arch/arm/configs/msm-copper_defconfig b/arch/arm/configs/msm8974_defconfig
similarity index 90%
rename from arch/arm/configs/msm-copper_defconfig
rename to arch/arm/configs/msm8974_defconfig
index 78ab155..07bd8e6 100644
--- a/arch/arm/configs/msm-copper_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -31,7 +31,7 @@
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_EFI_PARTITION=y
 CONFIG_ARCH_MSM=y
-CONFIG_ARCH_MSMCOPPER=y
+CONFIG_ARCH_MSM8974=y
 CONFIG_MSM_KRAIT_TBB_ABORT_HANDLER=y
 # CONFIG_MSM_STACKED_MEMORY is not set
 CONFIG_KERNEL_PMEM_EBI_REGION=y
@@ -47,6 +47,7 @@
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
 CONFIG_MSM_PIL_MBA=y
 CONFIG_MSM_PIL_PRONTO=y
+CONFIG_MSM_PIL_VENUS=y
 CONFIG_MSM_TZ_LOG=y
 CONFIG_MSM_DIRECT_SCLK_ACCESS=y
 CONFIG_MSM_OCMEM=y
@@ -136,10 +137,13 @@
 CONFIG_SPMI=y
 CONFIG_SPMI_MSM_PMIC_ARB=y
 CONFIG_MSM_QPNP=y
+CONFIG_MSM_QPNP_INT=y
 CONFIG_SLIMBUS=y
 CONFIG_SLIMBUS_MSM_CTRL=y
 CONFIG_DEBUG_GPIO=y
 CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_GPIO_QPNP_PIN_DEBUG=y
 CONFIG_POWER_SUPPLY=y
 # CONFIG_BATTERY_MSM is not set
 # CONFIG_HWMON is not set
@@ -164,12 +168,28 @@
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 # CONFIG_BACKLIGHT_GENERIC is not set
 # CONFIG_HID_SUPPORT is not set
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_ALAUDA=y
+CONFIG_USB_STORAGE_ONETOUCH=y
+CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_CYPRESS_ATACB=y
+CONFIG_USB_STORAGE_ENE_UB6250=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_CI13XXX_MSM=y
 CONFIG_USB_G_ANDROID=y
 CONFIG_MMC=y
 CONFIG_MMC_PERF_PROFILING=y
 CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_CLKGATE=y
 CONFIG_MMC_PARANOID_SD_INIT=y
 CONFIG_MMC_BLOCK_MINORS=32
 # CONFIG_MMC_BLOCK_BOUNCE is not set
diff --git a/arch/arm/configs/msm9615_defconfig b/arch/arm/configs/msm9615_defconfig
index 37bc416..c9bc610 100644
--- a/arch/arm/configs/msm9615_defconfig
+++ b/arch/arm/configs/msm9615_defconfig
@@ -229,8 +229,8 @@
 CONFIG_USB_STORAGE_CYPRESS_ATACB=y
 CONFIG_USB_EHSET_TEST_FIXTURE=y
 CONFIG_USB_GADGET=y
-CONFIG_USB_CI13XXX_MSM=m
-CONFIG_USB_CI13XXX_MSM_HSIC=m
+CONFIG_USB_CI13XXX_MSM=y
+CONFIG_USB_CI13XXX_MSM_HSIC=y
 CONFIG_USB_G_ANDROID=y
 CONFIG_RMNET_SMD_CTL_CHANNEL="DATA36_CNTL"
 CONFIG_RMNET_SMD_DATA_CHANNEL="DATA36"
@@ -244,7 +244,6 @@
 # CONFIG_MMC_BLOCK_BOUNCE is not set
 CONFIG_MMC_TEST=m
 CONFIG_MMC_MSM=y
-CONFIG_MMC_MSM_CARD_HW_DETECTION=y
 CONFIG_MMC_MSM_SPS_SUPPORT=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
new file mode 100644
index 0000000..3ed37b4
--- /dev/null
+++ b/arch/arm/include/asm/dma-contiguous.h
@@ -0,0 +1,15 @@
+#ifndef ASMARM_DMA_CONTIGUOUS_H
+#define ASMARM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_CMA
+
+#include <linux/types.h>
+#include <asm-generic/dma-contiguous.h>
+
+void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+
+#endif
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index a244039..926ac0e 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -90,6 +90,7 @@
 #define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT	16
 #define L2X0_AUX_CTRL_WAY_SIZE_SHIFT		17
 #define L2X0_AUX_CTRL_WAY_SIZE_MASK		(0x7 << 17)
+#define L2X0_AUX_CTRL_EVNT_MON_BUS_EN_SHIFT	20
 #define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT	22
 #define L2X0_AUX_CTRL_L2_FORCE_NWA_SHIFT	23
 #define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT		26
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 3783ff3..5078148 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -40,7 +40,7 @@
 extern struct irq_chip gic_arch_extn;
 
 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
-		    u32 offset);
+		    u32 offset, struct device_node *);
 int gic_of_init(struct device_node *node, struct device_node *parent);
 void gic_secondary_init(unsigned int);
 void gic_handle_irq(struct pt_regs *regs);
@@ -56,12 +56,10 @@
 static inline void gic_init(unsigned int nr, int start,
 			    void __iomem *dist , void __iomem *cpu)
 {
-	gic_init_bases(nr, start, dist, cpu, 0);
+	gic_init_bases(nr, start, dist, cpu, 0, NULL);
 }
 void gic_set_irq_secure(unsigned int irq);
-#endif
 
-#ifdef CONFIG_ARCH_MSM8625
 void msm_gic_save(bool modem_wake, int from_idle);
 void msm_gic_restore(void);
 void core1_gic_configure_and_raise(void);
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 0a45dee..669a626 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -22,7 +22,7 @@
 	const char *const 	*dt_compat;	/* array of device tree
 						 * 'compatible' strings	*/
 
-	int			nr_irqs;	/* number of IRQs */
+	unsigned int		nr_irqs;	/* number of IRQs */
 
 #ifdef CONFIG_ZONE_DMA
 	unsigned long		dma_zone_size;	/* size of DMA-able area */
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index 5f731df..cd5be28 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -36,6 +36,7 @@
 #define MT_MEMORY_R		15
 #define MT_MEMORY_RW		16
 #define MT_MEMORY_RX		17
+#define MT_MEMORY_DMA_READY	18
 
 #ifdef CONFIG_MMU
 extern void iotable_init(struct map_desc *, int);
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
index 4bcbfc2..562f13c 100644
--- a/arch/arm/include/asm/mach/mmc.h
+++ b/arch/arm/include/asm/mach/mmc.h
@@ -51,6 +51,15 @@
 	bool always_on;
 	/* is low power mode setting required for this regulator? */
 	bool lpm_sup;
+	/*
+	 * Use to indicate if the regulator should be reset at boot time.
+	 * Its needed only when sd card's vdd regulator is always on
+	 * since always on regulators dont get reset at boot time.
+	 *
+	 * It is needed for sd 3.0 card to be detected as a sd 3.0 card
+	 * on device reboot.
+	 */
+	bool reset_at_init;
 };
 
 /*
@@ -133,6 +142,8 @@
 	unsigned int xpc_cap;
 	/* Supported UHS-I Modes */
 	unsigned int uhs_caps;
+	/* More capabilities */
+	unsigned int uhs_caps2;
 	void (*sdio_lpm_gpio_setup)(struct device *, unsigned int);
         unsigned int status_irq;
 	unsigned int status_gpio;
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index bc48bff..65c8c0f 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -132,18 +132,8 @@
 #ifdef CONFIG_SPARSE_IRQ
 int __init arch_probe_nr_irqs(void)
 {
-	/*
-	 * machine_desc->nr_irqs < 0 is a special case that
-	 * specifies not to preallocate any irq_descs.
-	 */
-	if (machine_desc->nr_irqs < 0) {
-		nr_irqs = 0;
-		return nr_irqs;
-	} else {
-		nr_irqs = machine_desc->nr_irqs ?
-			  machine_desc->nr_irqs : NR_IRQS;
-		return nr_irqs;
-	}
+	nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS;
+	return nr_irqs;
 }
 #endif
 
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index e37b28b..778128b 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -616,16 +616,14 @@
 	atomic_set(&armpmu->active_events, 0);
 	mutex_init(&armpmu->reserve_mutex);
 
-	armpmu->pmu = (struct pmu) {
-		.pmu_enable	= armpmu_enable,
-		.pmu_disable	= armpmu_disable,
-		.event_init	= armpmu_event_init,
-		.add		= armpmu_add,
-		.del		= armpmu_del,
-		.start		= armpmu_start,
-		.stop		= armpmu_stop,
-		.read		= armpmu_read,
-	};
+	armpmu->pmu.pmu_enable = armpmu_enable;
+	armpmu->pmu.pmu_disable = armpmu_disable;
+	armpmu->pmu.event_init = armpmu_event_init;
+	armpmu->pmu.add = armpmu_add;
+	armpmu->pmu.del = armpmu_del;
+	armpmu->pmu.start = armpmu_start;
+	armpmu->pmu.stop = armpmu_stop;
+	armpmu->pmu.read = armpmu_read;
 }
 
 int armpmu_register(struct arm_pmu *armpmu, char *name, int type)
@@ -857,14 +855,12 @@
 		case 0x02D0:    /* 8x60 */
 //			fabricmon_pmu_init();
 			cpu_pmu = armv7_scorpionmp_pmu_init();
-//			scorpionmp_l2_pmu_init();
 			break;
 		case 0x0490:    /* 8960 sim */
 		case 0x04D0:    /* 8960 */
 		case 0x06F0:    /* 8064 */
 //			fabricmon_pmu_init();
 			cpu_pmu = armv7_krait_pmu_init();
-//			krait_l2_pmu_init();
 			break;
 		}
 	}
diff --git a/arch/arm/kernel/perf_event_msm.c b/arch/arm/kernel/perf_event_msm.c
index 46fa8fe..90c9c9e 100644
--- a/arch/arm/kernel/perf_event_msm.c
+++ b/arch/arm/kernel/perf_event_msm.c
@@ -720,6 +720,8 @@
 	.start			= armv7pmu_start,
 	.stop			= armv7pmu_stop,
 	.reset			= scorpion_pmu_reset,
+	.test_set_event_constraints	= msm_test_set_ev_constraint,
+	.clear_event_constraints	= msm_clear_ev_constraint,
 	.max_period		= (1LLU << 32) - 1,
 };
 
@@ -728,6 +730,7 @@
 	scorpion_pmu.id		= ARM_PERF_PMU_ID_SCORPION;
 	scorpion_pmu.name	= "ARMv7 Scorpion";
 	scorpion_pmu.num_events	= armv7_read_num_pmnc_events();
+	scorpion_pmu.pmu.attr_groups	= msm_l1_pmu_attr_grps;
 	scorpion_clear_pmuregs();
 	return &scorpion_pmu;
 }
@@ -737,6 +740,7 @@
 	scorpion_pmu.id		= ARM_PERF_PMU_ID_SCORPIONMP;
 	scorpion_pmu.name	= "ARMv7 Scorpion-MP";
 	scorpion_pmu.num_events	= armv7_read_num_pmnc_events();
+	scorpion_pmu.pmu.attr_groups	= msm_l1_pmu_attr_grps;
 	scorpion_clear_pmuregs();
 	return &scorpion_pmu;
 }
diff --git a/arch/arm/kernel/perf_event_msm_krait.c b/arch/arm/kernel/perf_event_msm_krait.c
index 1b115b4..8d8f47a 100644
--- a/arch/arm/kernel/perf_event_msm_krait.c
+++ b/arch/arm/kernel/perf_event_msm_krait.c
@@ -573,10 +573,10 @@
  */
 static int msm_test_set_ev_constraint(struct perf_event *event)
 {
-	u32 krait_evt_type = event->attr.config & KRAIT_EVENT_MASK;
-	u8 prefix = (krait_evt_type & 0xF0000) >> 16;
-	u8 reg = (krait_evt_type & 0x0F000) >> 12;
-	u8 group = krait_evt_type & 0x0000F;
+	u32 evt_type = event->attr.config & KRAIT_EVENT_MASK;
+	u8 prefix = (evt_type & 0xF0000) >> 16;
+	u8 reg = (evt_type & 0x0F000) >> 12;
+	u8 group = evt_type & 0x0000F;
 	u64 cpu_pmu_bitmap = __get_cpu_var(pmu_bitmap);
 	u64 bitmap_t;
 
@@ -598,10 +598,10 @@
 
 static int msm_clear_ev_constraint(struct perf_event *event)
 {
-	u32 krait_evt_type = event->attr.config & KRAIT_EVENT_MASK;
-	u8 prefix = (krait_evt_type & 0xF0000) >> 16;
-	u8 reg = (krait_evt_type & 0x0F000) >> 12;
-	u8 group = krait_evt_type & 0x0000F;
+	u32 evt_type = event->attr.config & KRAIT_EVENT_MASK;
+	u8 prefix = (evt_type & 0xF0000) >> 16;
+	u8 reg = (evt_type & 0x0F000) >> 12;
+	u8 group = evt_type & 0x0000F;
 	u64 cpu_pmu_bitmap = __get_cpu_var(pmu_bitmap);
 	u64 bitmap_t;
 
@@ -636,6 +636,34 @@
 	.max_period		= (1LLU << 32) - 1,
 };
 
+/* NRCCG format for perf RAW codes. */
+PMU_FORMAT_ATTR(prefix,	"config:16-19");
+PMU_FORMAT_ATTR(reg,	"config:12-15");
+PMU_FORMAT_ATTR(code,	"config:4-11");
+PMU_FORMAT_ATTR(grp,	"config:0-3");
+
+static struct attribute *msm_l1_ev_formats[] = {
+	&format_attr_prefix.attr,
+	&format_attr_reg.attr,
+	&format_attr_code.attr,
+	&format_attr_grp.attr,
+	NULL,
+};
+
+/*
+ * Format group is essential to access PMU's from userspace
+ * via their .name field.
+ */
+static struct attribute_group msm_pmu_format_group = {
+	.name = "format",
+	.attrs = msm_l1_ev_formats,
+};
+
+static const struct attribute_group *msm_l1_pmu_attr_grps[] = {
+	&msm_pmu_format_group,
+	NULL,
+};
+
 int get_krait_ver(void)
 {
 	int ver = 0;
@@ -655,6 +683,7 @@
 	krait_pmu.name	        = "ARMv7 Krait";
 	krait_pmu.map_event	= krait_8960_map_event;
 	krait_pmu.num_events	= armv7_read_num_pmnc_events();
+	krait_pmu.pmu.attr_groups	= msm_l1_pmu_attr_grps;
 	krait_clear_pmuregs();
 
 	krait_ver = get_krait_ver();
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index e30f1d8..b012f0f 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -81,6 +81,7 @@
 extern void paging_init(struct machine_desc *desc);
 extern void sanity_check_meminfo(void);
 extern void reboot_setup(char *str);
+extern void setup_dma_zone(struct machine_desc *desc);
 
 unsigned int processor_id;
 EXPORT_SYMBOL(processor_id);
@@ -941,12 +942,8 @@
 	machine_desc = mdesc;
 	machine_name = mdesc->name;
 
-#ifdef CONFIG_ZONE_DMA
-	if (mdesc->dma_zone_size) {
-		extern unsigned long arm_dma_zone_size;
-		arm_dma_zone_size = mdesc->dma_zone_size;
-	}
-#endif
+	setup_dma_zone(mdesc);
+
 	if (mdesc->restart_mode)
 		reboot_setup(&mdesc->restart_mode);
 
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 24c57ae..2acf3ce 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -38,6 +38,7 @@
 	select MSM_PM2 if PM
 	select MSM_RUN_QUEUE_STATS if MSM_SOC_REV_A
 	select DONT_MAP_HOLE_AFTER_MEMBANK0
+	select MIGHT_HAVE_CACHE_L2X0
 
 config ARCH_MSM7X30
 	bool "MSM7x30"
@@ -135,6 +136,7 @@
 	select MSM_MULTIMEDIA_USE_ION
 	select MSM_PM8X60 if PM
 	select MSM_RUN_QUEUE_STATS
+	select ARM_HAS_SG_CHAIN
 
 config ARCH_MSM8960
 	bool "MSM8960"
@@ -168,6 +170,7 @@
 	select MSM_PM8X60 if PM
 	select HOLES_IN_ZONE if SPARSEMEM
 	select MSM_RUN_QUEUE_STATS
+	select ARM_HAS_SG_CHAIN
 
 config ARCH_MSM8930
 	bool "MSM8930"
@@ -198,6 +201,7 @@
 	select MULTI_IRQ_HANDLER
 	select MSM_PM8X60 if PM
 	select HOLES_IN_ZONE if SPARSEMEM
+	select ARM_HAS_SG_CHAIN
 
 config ARCH_APQ8064
 	bool "APQ8064"
@@ -223,9 +227,10 @@
 	select QCACHE
 	select MIGHT_HAVE_PCI
 	select ARCH_SUPPORTS_MSI
+	select ARM_HAS_SG_CHAIN
 
-config ARCH_MSMCOPPER
-	bool "MSM Copper"
+config ARCH_MSM8974
+	bool "MSM8974"
 	select ARCH_MSM_KRAITMP
 	select GPIO_MSM_V3
 	select ARM_GIC
@@ -245,6 +250,8 @@
 	select MSM_QDSP6_APR
 	select MSM_QDSP6V2_CODECS
 	select MSM_AUDIO_QDSP6V2 if SND_SOC
+	select MSM_RPM_REGULATOR_SMD
+	select ARM_HAS_SG_CHAIN
 
 config ARCH_FSM9XXX
 	bool "FSM9XXX"
@@ -276,6 +283,7 @@
 	select MSM_QDSP6_APR
 	select MSM_AUDIO_QDSP6 if SND_SOC
 	select FIQ
+	select ARM_HAS_SG_CHAIN
 
 config ARCH_MSM8625
 	bool "MSM8625"
@@ -288,6 +296,7 @@
 	select MULTI_IRQ_HANDLER
 	select ARM_TICKET_LOCKS
 	select MSM_RUN_QUEUE_STATS
+	select MIGHT_HAVE_CACHE_L2X0
 
 config ARCH_MSM9625
 	bool "MSM9625"
@@ -345,12 +354,14 @@
 	select ARCH_MSM_SCORPION
 	select MSM_SMP
 	select HAVE_ARCH_HAS_CURRENT_TIMER
+	select MSM_JTAG if MSM_QDSS
 	bool
 
 config  ARCH_MSM_KRAITMP
 	select ARCH_MSM_KRAIT
 	select MSM_SMP
 	select HAVE_ARCH_HAS_CURRENT_TIMER
+	select MSM_JTAG if MSM_QDSS
 	bool
 	select HAVE_HW_BRKPT_RESERVED_RW_ACCESS
 
@@ -369,6 +380,7 @@
 	select MULTI_IRQ_HANDLER
 	select ARM_GIC
 	select ARCH_MSM_CORTEXMP
+	select MIGHT_HAVE_CACHE_L2X0
 
 config  MSM_VIC
 	bool
@@ -872,7 +884,7 @@
 	default "0x80200000" if ARCH_APQ8064
 	default "0x80200000" if ARCH_MSM8960
 	default "0x80200000" if ARCH_MSM8930
-	default "0x00000000" if ARCH_MSMCOPPER
+	default "0x00000000" if ARCH_MSM8974
 	default "0x10000000" if ARCH_FSM9XXX
 	default "0x20200000" if ARCH_MSM9625
 	default "0x00200000" if !MSM_STACKED_MEMORY
@@ -884,7 +896,7 @@
 config KERNEL_PMEM_EBI_REGION
 	bool "Enable in-kernel PMEM region for EBI"
 	default y if ARCH_MSM8X60
-	depends on ANDROID_PMEM && (ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_MSMCOPPER)
+	depends on ANDROID_PMEM && (ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_MSM8974)
 	help
 	   Enable the in-kernel PMEM allocator to use EBI memory.
 
@@ -1002,13 +1014,13 @@
 		Say Y here if you want the debug print routines to direct
 		their output to the serial port on APQ 8064 devices.
 
-	config DEBUG_MSMCOPPER_UART
-		bool "Kernel low-level debugging messages via MSM Copper UART"
-		depends on ARCH_MSMCOPPER
+	config DEBUG_MSM8974_UART
+		bool "Kernel low-level debugging messages via MSM 8974 UART"
+		depends on ARCH_MSM8974
 		select MSM_HAS_DEBUG_UART_HS_V14
 		help
 		  Say Y here if you want the debug print routines to direct
-		  their output to the serial port on MSM Copper devices.
+		  their output to the serial port on MSM 8974 devices.
 endchoice
 
 choice
@@ -1894,6 +1906,13 @@
 	help
 	  Support for authenticating the video core image.
 
+config MSM_PIL_VENUS
+	tristate "VENUS (Video) Boot Support"
+	depends on MSM_PIL
+	help
+	  Support for booting and shutting down the VENUS processor (Video).
+	  Venus is the Video subsystem processor used for video codecs.
+
 config MSM_PIL_GSS
 	tristate "GSS (Coretx A5) Boot Support"
 	depends on MSM_PIL
@@ -2060,6 +2079,14 @@
 		not run during the bootup process, so it will not catch any early
 		lockups.
 
+config MSM_WATCHDOG_V2
+	bool "MSM Watchdog Support"
+	help
+		This enables the watchdog module. It causes kernel panic if the
+		watchdog times out. It allows for detection of cpu hangs and
+		deadlocks. It does not run during the bootup process, so it will
+		not catch any early lockups.
+
 config MSM_DLOAD_MODE
 	bool "Enable download mode on crashes"
 	depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_MSM9615
@@ -2071,9 +2098,15 @@
 		enabled via another mechanism.
 
 config MSM_JTAG
-        bool "JTAG debug and trace support"
+	bool "JTAG and kernel debug and trace support across power collapse"
 	help
-          Add additional support for JTAG kernel debugging and tracing.
+	  Enables support for kernel debugging (specifically breakpoints) and
+	  processor tracing across power collapse both for JTag and OS hosted
+	  software running on the target. Enabling this will ensure debug
+	  and ETM registers are saved and restored across power collapse.
+
+	  For production builds, you should probably say 'N' here to avoid
+	  potential power, performance and memory penalty.
 
 config MSM_ETM
 	tristate "Enable MSM ETM and ETB"
@@ -2082,30 +2115,6 @@
 	help
 	  Enables embedded trace collection on MSM8660
 
-config MSM_QDSS
-	bool "Qualcomm Debug Subsystem"
-	select MSM_JTAG
-	help
-	  Enables support for Qualcomm Debug Subsystem.
-
-config MSM_QDSS_STM_DEFAULT_ENABLE
-	bool "Turn on QDSS STM Tracing by Default"
-	depends on MSM_QDSS
-	help
-	  Turns on QDSS STM tracing (hardware assisted software
-	  instrumentation based tracing) by default. Otherwise, tracing is
-	  disabled by default but can be enabled via sysfs.
-
-	  For production builds, you should probably say 'N' here to avoid
-	  potential power, performance and memory penalty.
-
-config MSM_QDSS_ETM_DEFAULT_ENABLE
-	bool "Turn on QDSS ETM Tracing by Default"
-	depends on MSM_QDSS
-	help
-	  Turns on QDSS ETM tracing by default. Otherwise, tracing is
-	  disabled by default but can be enabled by other means.
-
 config MSM_SLEEP_STATS
 	bool "Enable exporting of MSM sleep stats to userspace"
 	depends on CPU_IDLE
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 8315d70..702667b 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -55,7 +55,6 @@
 
 msm-etm-objs := etm.o
 obj-$(CONFIG_MSM_ETM) += msm-etm.o
-obj-$(CONFIG_MSM_QDSS) += qdss.o qdss-etb.o qdss-tpiu.o qdss-funnel.o qdss-stm.o qdss-etm.o
 
 quiet_cmd_mkrpcsym = MKCAP   $@
       cmd_mkrpcsym = $(PERL) $(srctree)/$(src)/mkrpcsym.pl $< $@
@@ -80,6 +79,7 @@
 obj-$(CONFIG_MSM_PIL_DSPS) += pil-dsps.o
 obj-$(CONFIG_MSM_PIL_GSS) += pil-gss.o
 obj-$(CONFIG_MSM_PIL_PRONTO) += pil-pronto.o
+obj-$(CONFIG_MSM_PIL_VENUS) += pil-venus.o
 obj-$(CONFIG_ARCH_QSD8X50) += sirc.o
 obj-$(CONFIG_ARCH_FSM9XXX) += sirc-fsm9xxx.o
 obj-$(CONFIG_MSM_FIQ_SUPPORT) += fiq_glue.o
@@ -100,7 +100,7 @@
 ifndef CONFIG_ARCH_APQ8064
 ifndef CONFIG_ARCH_MSM8960
 ifndef CONFIG_ARCH_MSM8X60
-ifndef CONFIG_ARCH_MSMCOPPER
+ifndef CONFIG_ARCH_MSM8974
 	obj-$(CONFIG_MSM_SMD) += pmic.o
 	obj-$(CONFIG_MSM_ONCRPCROUTER) += rpc_hsusb.o rpc_pmapp.o rpc_fsusb.o
 endif
@@ -111,7 +111,7 @@
 ifndef CONFIG_ARCH_MSM8960
 ifndef CONFIG_ARCH_MSM8X60
 ifndef CONFIG_ARCH_APQ8064
-ifndef CONFIG_ARCH_MSMCOPPER
+ifndef CONFIG_ARCH_MSM8974
 ifndef CONFIG_ARCH_MSM9625
 	obj-y += nand_partitions.o
 endif
@@ -213,7 +213,7 @@
 	obj-$(CONFIG_ARCH_MSM8960) += cpuidle.o
 	obj-$(CONFIG_ARCH_MSM8X60) += cpuidle.o
 	obj-$(CONFIG_ARCH_MSM9615) += cpuidle.o
-	obj-$(CONFIG_ARCH_MSMCOPPER) += cpuidle.o
+	obj-$(CONFIG_ARCH_MSM8974) += cpuidle.o
 endif
 
 ifdef CONFIG_MSM_CAMERA_V4L2
@@ -226,6 +226,7 @@
 
 obj-$(CONFIG_MSM_WATCHDOG) += msm_watchdog.o
 obj-$(CONFIG_MSM_WATCHDOG) += msm_watchdog_asm.o
+obj-$(CONFIG_MSM_WATCHDOG_V2) += msm_watchdog_v2.o
 obj-$(CONFIG_MACH_MSM8X60_RUMI3) += board-msm8x60.o
 obj-$(CONFIG_MACH_MSM8X60_SIM) += board-msm8x60.o
 obj-$(CONFIG_MACH_MSM8X60_SURF) += board-msm8x60.o
@@ -288,10 +289,10 @@
 obj-$(CONFIG_MACH_MPQ8064_DTV) += board-8064-all.o board-8064-regulator.o
 obj-$(CONFIG_ARCH_MSM9615) += board-9615.o devices-9615.o board-9615-regulator.o board-9615-gpiomux.o board-9615-storage.o board-9615-display.o
 obj-$(CONFIG_ARCH_MSM9615) += clock-local.o clock-9615.o acpuclock-9615.o clock-rpm.o clock-pll.o
-obj-$(CONFIG_ARCH_MSMCOPPER) += board-copper.o board-dt.o board-copper-regulator.o board-copper-gpiomux.o
-obj-$(CONFIG_ARCH_MSMCOPPER) += acpuclock-krait.o acpuclock-copper.o
-obj-$(CONFIG_ARCH_MSMCOPPER) += clock-local2.o clock-pll.o clock-copper.o clock-rpm.o clock-voter.o
-obj-$(CONFIG_ARCH_MSMCOPPER) += gdsc.o
+obj-$(CONFIG_ARCH_MSM8974) += board-8974.o board-dt.o board-8974-regulator.o board-8974-gpiomux.o
+obj-$(CONFIG_ARCH_MSM8974) += acpuclock-krait.o acpuclock-8974.o
+obj-$(CONFIG_ARCH_MSM8974) += clock-local2.o clock-pll.o clock-8974.o clock-rpm.o clock-voter.o
+obj-$(CONFIG_ARCH_MSM8974) += gdsc.o
 obj-$(CONFIG_ARCH_MSM9625) += board-9625.o board-9625-gpiomux.o
 
 obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire.o board-sapphire-gpio.o
@@ -300,6 +301,7 @@
 obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire-rfkill.o msm_vibrator.o
 
 CFLAGS_msm_vibrator.o += -Idrivers/staging/android
+CFLAGS_board-9615.o += -Idrivers/usb/gadget
 
 obj-$(CONFIG_ARCH_FSM9XXX) += board-fsm9xxx.o
 
@@ -317,7 +319,7 @@
 	obj-$(CONFIG_ARCH_MSM9615) += rpm_resources.o
 endif
 ifdef CONFIG_MSM_RPM_SMD
-	obj-$(CONFIG_ARCH_MSMCOPPER) += lpm_levels.o lpm_resources.o
+	obj-$(CONFIG_ARCH_MSM8974) += lpm_levels.o lpm_resources.o
 endif
 obj-$(CONFIG_MSM_MPM) += mpm.o
 obj-$(CONFIG_MSM_RPM_STATS_LOG) += rpm_stats.o
@@ -342,7 +344,7 @@
 obj-$(CONFIG_ARCH_MSM8960) += gpiomux-v2.o gpiomux.o
 obj-$(CONFIG_ARCH_APQ8064) += gpiomux-v2.o gpiomux.o
 obj-$(CONFIG_ARCH_MSM9615) += gpiomux-v2.o gpiomux.o
-obj-$(CONFIG_ARCH_MSMCOPPER) += gpiomux-v2.o gpiomux.o
+obj-$(CONFIG_ARCH_MSM8974) += gpiomux-v2.o gpiomux.o
 obj-$(CONFIG_ARCH_MSM9625) += gpiomux-v2.o gpiomux.o
 
 
diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot
index bd8d153..b57d4e1 100644
--- a/arch/arm/mach-msm/Makefile.boot
+++ b/arch/arm/mach-msm/Makefile.boot
@@ -45,8 +45,8 @@
 # APQ8064
    zreladdr-$(CONFIG_ARCH_APQ8064)	:= 0x80208000
 
-# MSMCOPPER
-   zreladdr-$(CONFIG_ARCH_MSMCOPPER)	:= 0x00008000
+# MSM8974
+   zreladdr-$(CONFIG_ARCH_MSM8974)	:= 0x00008000
 
 # MSM9615
    zreladdr-$(CONFIG_ARCH_MSM9615)	:= 0x40808000
diff --git a/arch/arm/mach-msm/acpuclock-8960.c b/arch/arm/mach-msm/acpuclock-8960.c
index d29fee6..9809bdf 100644
--- a/arch/arm/mach-msm/acpuclock-8960.c
+++ b/arch/arm/mach-msm/acpuclock-8960.c
@@ -154,6 +154,8 @@
 	struct l2_level *l2_vote;
 	struct vreg vreg[NUM_VREG];
 	unsigned int *hfpll_vdd_tbl;
+	bool regulators_initialized;
+	bool clocks_initialized;
 };
 
 static unsigned int hfpll_vdd_tbl_8960[] = {
@@ -735,10 +737,6 @@
 	{ 1, {  1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1175000 },
 	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1175000 },
 	{ 1, {  1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1200000 },
-	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(16), 1200000 },
-	{ 1, {  1296000, HFPLL, 1, 0, 0x30 }, L2(16), 1225000 },
-	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(16), 1225000 },
-	{ 1, {  1404000, HFPLL, 1, 0, 0x34 }, L2(16), 1237500 },
 	{ 0, { 0 } }
 };
 
@@ -760,10 +758,6 @@
 	{ 1, {  1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1150000 },
 	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1150000 },
 	{ 1, {  1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1175000 },
-	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(16), 1175000 },
-	{ 1, {  1296000, HFPLL, 1, 0, 0x30 }, L2(16), 1200000 },
-	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(16), 1200000 },
-	{ 1, {  1404000, HFPLL, 1, 0, 0x34 }, L2(16), 1212500 },
 	{ 0, { 0 } }
 };
 
@@ -785,10 +779,6 @@
 	{ 1, {  1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1100000 },
 	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1100000 },
 	{ 1, {  1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1125000 },
-	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(16), 1125000 },
-	{ 1, {  1296000, HFPLL, 1, 0, 0x30 }, L2(16), 1150000 },
-	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(16), 1150000 },
-	{ 1, {  1404000, HFPLL, 1, 0, 0x34 }, L2(16), 1162500 },
 	{ 0, { 0 } }
 };
 /* TODO: Update vdd_dig, vdd_mem and bw when data is available. */
@@ -854,6 +844,8 @@
 	[PVS_FAST] = acpu_freq_tbl_8930_fast,
 };
 
+static struct acpu_level *max_acpu_level;
+
 static unsigned long acpuclk_8960_get_rate(int cpu)
 {
 	return scalable[cpu].current_speed->khz;
@@ -1317,7 +1309,7 @@
 }
 
 /* Initialize a HFPLL at a given rate and enable it. */
-static void __init hfpll_init(struct scalable *sc, struct core_speed *tgt_s)
+static void __cpuinit hfpll_init(struct scalable *sc, struct core_speed *tgt_s)
 {
 	pr_debug("Initializing HFPLL%d\n", sc - scalable);
 
@@ -1338,69 +1330,66 @@
 }
 
 /* Voltage regulator initialization. */
-static void __init regulator_init(struct acpu_level *lvl)
+static void __cpuinit regulator_init(int cpu, struct acpu_level *lvl)
 {
-	int cpu, ret;
-	struct scalable *sc;
+	int ret;
+	struct scalable *sc = &scalable[cpu];
 	unsigned int vdd_mem, vdd_dig, vdd_core;
 
 	vdd_mem = calculate_vdd_mem(lvl);
 	vdd_dig = calculate_vdd_dig(lvl);
 
-	for_each_possible_cpu(cpu) {
-		sc = &scalable[cpu];
-
-		/* Set initial vdd_mem vote. */
-		ret = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
-				sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
-				sc->vreg[VREG_MEM].max_vdd, 0);
-		if (ret) {
-			pr_err("%s initialization failed (%d)\n",
-				sc->vreg[VREG_MEM].name, ret);
-			BUG();
-		}
-		sc->vreg[VREG_MEM].cur_vdd  = vdd_mem;
-
-		/* Set initial vdd_dig vote. */
-		ret = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
-				sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
-				sc->vreg[VREG_DIG].max_vdd, 0);
-		if (ret) {
-			pr_err("%s initialization failed (%d)\n",
-				sc->vreg[VREG_DIG].name, ret);
-			BUG();
-		}
-		sc->vreg[VREG_DIG].cur_vdd  = vdd_dig;
-
-		/* Setup Krait CPU regulators and initial core voltage. */
-		sc->vreg[VREG_CORE].reg = regulator_get(NULL,
-					  sc->vreg[VREG_CORE].name);
-		if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
-			pr_err("regulator_get(%s) failed (%ld)\n",
-			       sc->vreg[VREG_CORE].name,
-			       PTR_ERR(sc->vreg[VREG_CORE].reg));
-			BUG();
-		}
-		vdd_core = calculate_vdd_core(lvl);
-		ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
-					    sc->vreg[VREG_CORE].max_vdd);
-		if (ret) {
-			pr_err("%s initialization failed (%d)\n",
-				sc->vreg[VREG_CORE].name, ret);
-			BUG();
-		}
-		sc->vreg[VREG_CORE].cur_vdd = vdd_core;
-		ret = regulator_enable(sc->vreg[VREG_CORE].reg);
-		if (ret) {
-			pr_err("regulator_enable(%s) failed (%d)\n",
-			       sc->vreg[VREG_CORE].name, ret);
-			BUG();
-		}
+	/* Set initial vdd_mem vote. */
+	ret = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
+			sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
+			sc->vreg[VREG_MEM].max_vdd, 0);
+	if (ret) {
+		pr_err("%s initialization failed (%d)\n",
+			sc->vreg[VREG_MEM].name, ret);
+		BUG();
 	}
+	sc->vreg[VREG_MEM].cur_vdd  = vdd_mem;
+
+	/* Set initial vdd_dig vote. */
+	ret = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
+			sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
+			sc->vreg[VREG_DIG].max_vdd, 0);
+	if (ret) {
+		pr_err("%s initialization failed (%d)\n",
+			sc->vreg[VREG_DIG].name, ret);
+		BUG();
+	}
+	sc->vreg[VREG_DIG].cur_vdd  = vdd_dig;
+
+	/* Setup Krait CPU regulators and initial core voltage. */
+	sc->vreg[VREG_CORE].reg = regulator_get(NULL,
+				  sc->vreg[VREG_CORE].name);
+	if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
+		pr_err("regulator_get(%s) failed (%ld)\n",
+		       sc->vreg[VREG_CORE].name,
+		       PTR_ERR(sc->vreg[VREG_CORE].reg));
+		BUG();
+	}
+	vdd_core = calculate_vdd_core(lvl);
+	ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
+				    sc->vreg[VREG_CORE].max_vdd);
+	if (ret) {
+		pr_err("%s initialization failed (%d)\n",
+			sc->vreg[VREG_CORE].name, ret);
+		BUG();
+	}
+	sc->vreg[VREG_CORE].cur_vdd = vdd_core;
+	ret = regulator_enable(sc->vreg[VREG_CORE].reg);
+	if (ret) {
+		pr_err("regulator_enable(%s) failed (%d)\n",
+		       sc->vreg[VREG_CORE].name, ret);
+		BUG();
+	}
+	sc->regulators_initialized = true;
 }
 
 /* Set initial rate for a given core. */
-static void __init init_clock_sources(struct scalable *sc,
+static void __cpuinit init_clock_sources(struct scalable *sc,
 				      struct core_speed *tgt_s)
 {
 	uint32_t regval;
@@ -1424,13 +1413,13 @@
 	sc->current_speed = tgt_s;
 }
 
-static void __init per_cpu_init(void *data)
+static void __cpuinit per_cpu_init(void *data)
 {
-	struct acpu_level *max_acpu_level = data;
 	int cpu = smp_processor_id();
 
 	init_clock_sources(&scalable[cpu], &max_acpu_level->speed);
 	scalable[cpu].l2_vote = max_acpu_level->l2_level;
+	scalable[cpu].clocks_initialized = true;
 }
 
 /* Register with bus driver. */
@@ -1514,17 +1503,23 @@
 		/* Fall through. */
 	case CPU_UP_CANCELED:
 	case CPU_UP_CANCELED_FROZEN:
-		acpuclk_8960_set_rate(cpu, HOT_UNPLUG_KHZ, SETRATE_HOTPLUG);
+		if (scalable[cpu].clocks_initialized)
+			acpuclk_8960_set_rate(cpu, HOT_UNPLUG_KHZ,
+					      SETRATE_HOTPLUG);
 		break;
 	case CPU_UP_PREPARE:
 	case CPU_UP_PREPARE_FROZEN:
-		if (WARN_ON(!prev_khz[cpu]))
-			return NOTIFY_BAD;
-		acpuclk_8960_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
+		if (scalable[cpu].clocks_initialized)
+			acpuclk_8960_set_rate(cpu, prev_khz[cpu],
+					      SETRATE_HOTPLUG);
+		if (!scalable[cpu].regulators_initialized)
+			regulator_init(cpu, max_acpu_level);
 		break;
 	case CPU_STARTING:
 	case CPU_STARTING_FROZEN:
-		if (cpu_is_krait_v1() || cpu_is_apq8064()) {
+		if (!scalable[cpu].clocks_initialized) {
+			per_cpu_init(NULL);
+		} else if (cpu_is_krait_v1() || cpu_is_apq8064()) {
 			set_sec_clk_src(&scalable[cpu], prev_sec_src[cpu]);
 			set_pri_clk_src(&scalable[cpu], prev_pri_src[cpu]);
 		}
@@ -1590,9 +1585,9 @@
 	}
 }
 
-static struct acpu_level * __init select_freq_plan(void)
+static void __init select_freq_plan(void)
 {
-	struct acpu_level *l, *max_acpu_level = NULL;
+	struct acpu_level *l;
 
 	/* Select frequency tables. */
 	if (cpu_is_msm8960()) {
@@ -1640,8 +1635,6 @@
 			max_acpu_level = l;
 	BUG_ON(!max_acpu_level);
 	pr_info("Max ACPU freq: %u KHz\n", max_acpu_level->speed.khz);
-
-	return max_acpu_level;
 }
 
 static struct acpuclk_data acpuclk_8960_data = {
@@ -1653,13 +1646,16 @@
 
 static int __init acpuclk_8960_probe(struct platform_device *pdev)
 {
-	struct acpu_level *max_acpu_level = select_freq_plan();
+	int cpu;
 
-	regulator_init(max_acpu_level);
+	select_freq_plan();
+
+	for_each_online_cpu(cpu)
+		regulator_init(cpu, max_acpu_level);
 	bus_init(max_acpu_level->l2_level->bw_level);
 
 	init_clock_sources(&scalable[L2], &max_acpu_level->l2_level->speed);
-	on_each_cpu(per_cpu_init, max_acpu_level, true);
+	on_each_cpu(per_cpu_init, NULL, true);
 
 	cpufreq_table_init();
 
diff --git a/arch/arm/mach-msm/acpuclock-copper.c b/arch/arm/mach-msm/acpuclock-8974.c
similarity index 66%
rename from arch/arm/mach-msm/acpuclock-copper.c
rename to arch/arm/mach-msm/acpuclock-8974.c
index f0da74c..c67109e 100644
--- a/arch/arm/mach-msm/acpuclock-copper.c
+++ b/arch/arm/mach-msm/acpuclock-8974.c
@@ -14,7 +14,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
-#include <mach/rpm-regulator.h>
+#include <mach/rpm-regulator-smd.h>
 #include <mach/msm_bus_board.h>
 #include <mach/msm_bus.h>
 #include <mach/socinfo.h>
@@ -23,10 +23,10 @@
 #include "acpuclock-krait.h"
 
 /* Corner type vreg VDD values */
-#define LVL_NONE	RPM_VREG_CORNER_NONE
-#define LVL_LOW		RPM_VREG_CORNER_LOW
-#define LVL_NOM		RPM_VREG_CORNER_NOMINAL
-#define LVL_HIGH	RPM_VREG_CORNER_HIGH
+#define LVL_NONE	RPM_REGULATOR_CORNER_RETENTION
+#define LVL_LOW		RPM_REGULATOR_CORNER_SVS_SOC
+#define LVL_NOM		RPM_REGULATOR_CORNER_NORMAL
+#define LVL_HIGH	RPM_REGULATOR_CORNER_SUPER_TURBO
 
 static struct hfpll_data hfpll_data_cpu = {
 	.mode_offset = 0x00,
@@ -34,7 +34,7 @@
 	.m_offset = 0x08,
 	.n_offset = 0x0C,
 	.config_offset = 0x14,
-	/* TODO: Verify magic number for copper when available. */
+	/* TODO: Verify magic number for 8974 when available. */
 	.config_val = 0x7845C665,
 	.low_vdd_l_max = 52,
 	.vdd[HFPLL_VDD_NONE] = 0,
@@ -48,7 +48,7 @@
 	.m_offset = 0x08,
 	.n_offset = 0x0C,
 	.config_offset = 0x14,
-	/* TODO: Verify magic number for copper when available. */
+	/* TODO: Verify magic number for 8974 when available. */
 	.config_val = 0x7845C665,
 	.low_vdd_l_max = 52,
 	.vdd[HFPLL_VDD_NONE] = LVL_NONE,
@@ -62,68 +62,47 @@
 		.hfpll_data = &hfpll_data_cpu,
 		.l2cpmr_iaddr = 0x4501,
 		.vreg[VREG_CORE] = { "krait0",     1050000, 3200000 },
-		.vreg[VREG_MEM]  = { "krait0_mem", 1050000, 0,
-				     RPM_VREG_VOTER1,
-				     RPM_VREG_ID_PM8941_S1 },
-		.vreg[VREG_DIG]  = { "krait0_dig", 1050000, 0,
-				     RPM_VREG_VOTER1,
-				     RPM_VREG_ID_PM8941_S2 },
-		.vreg[VREG_HFPLL_A] = { "hfpll", 1800000, 0,
-				     RPM_VREG_VOTER1,
-				     RPM_VREG_ID_PM8941_L12 },
+		.vreg[VREG_MEM]  = { "krait0_mem", 1050000 },
+		.vreg[VREG_DIG]  = { "krait0_dig", LVL_HIGH },
+		.vreg[VREG_HFPLL_A] = { "krait0_hfpll_a", 2150000 },
+		.vreg[VREG_HFPLL_B] = { "krait0_hfpll_b", 1800000 },
 	},
 	[CPU1] = {
 		.hfpll_phys_base = 0xF909A000,
 		.hfpll_data = &hfpll_data_cpu,
 		.l2cpmr_iaddr = 0x5501,
 		.vreg[VREG_CORE] = { "krait1",     1050000, 3200000 },
-		.vreg[VREG_MEM]  = { "krait1_mem", 1050000, 0,
-				     RPM_VREG_VOTER2,
-				     RPM_VREG_ID_PM8941_S1 },
-		.vreg[VREG_DIG]  = { "krait1_dig", 1050000, 0,
-				     RPM_VREG_VOTER2,
-				     RPM_VREG_ID_PM8941_S2 },
-		.vreg[VREG_HFPLL_A] = { "hfpll", 1800000, 0,
-				     RPM_VREG_VOTER2,
-				     RPM_VREG_ID_PM8941_L12 },
+		.vreg[VREG_MEM]  = { "krait1_mem", 1050000 },
+		.vreg[VREG_DIG]  = { "krait1_dig", LVL_HIGH },
+		.vreg[VREG_HFPLL_A] = { "krait1_hfpll_a", 2150000 },
+		.vreg[VREG_HFPLL_B] = { "krait1_hfpll_b", 1800000 },
 	},
 	[CPU2] = {
 		.hfpll_phys_base = 0xF90AA000,
 		.hfpll_data = &hfpll_data_cpu,
 		.l2cpmr_iaddr = 0x6501,
 		.vreg[VREG_CORE] = { "krait2",     1050000, 3200000 },
-		.vreg[VREG_MEM]  = { "krait2_mem", 1050000, 0,
-				     RPM_VREG_VOTER4,
-				     RPM_VREG_ID_PM8921_S1 },
-		.vreg[VREG_DIG]  = { "krait2_dig", 1050000, 0,
-				     RPM_VREG_VOTER4,
-				     RPM_VREG_ID_PM8921_S2 },
-		.vreg[VREG_HFPLL_A] = { "hfpll", 1800000, 0,
-				     RPM_VREG_VOTER4,
-				     RPM_VREG_ID_PM8941_L12 },
+		.vreg[VREG_MEM]  = { "krait2_mem", 1050000 },
+		.vreg[VREG_DIG]  = { "krait2_dig", LVL_HIGH },
+		.vreg[VREG_HFPLL_A] = { "krait2_hfpll_a", 2150000 },
+		.vreg[VREG_HFPLL_B] = { "krait2_hfpll_b", 1800000 },
 	},
 	[CPU3] = {
 		.hfpll_phys_base = 0xF90BA000,
 		.hfpll_data = &hfpll_data_cpu,
 		.l2cpmr_iaddr = 0x7501,
 		.vreg[VREG_CORE] = { "krait3",     1050000, 3200000 },
-		.vreg[VREG_MEM]  = { "krait3_mem", 1050000, 0,
-				     RPM_VREG_VOTER5,
-				     RPM_VREG_ID_PM8941_S1 },
-		.vreg[VREG_DIG]  = { "krait3_dig", 1050000, 0,
-				     RPM_VREG_VOTER5,
-				     RPM_VREG_ID_PM8941_S2 },
-		.vreg[VREG_HFPLL_A] = { "hfpll", 1800000, 0,
-				     RPM_VREG_VOTER5,
-				     RPM_VREG_ID_PM8941_L12 },
+		.vreg[VREG_MEM]  = { "krait3_mem", 1050000 },
+		.vreg[VREG_DIG]  = { "krait3_dig", LVL_HIGH },
+		.vreg[VREG_HFPLL_A] = { "krait3_hfpll_a", 2150000 },
+		.vreg[VREG_HFPLL_B] = { "krait3_hfpll_b", 1800000 },
 	},
 	[L2] = {
 		.hfpll_phys_base = 0xF9016000,
 		.hfpll_data = &hfpll_data_l2,
 		.l2cpmr_iaddr = 0x0500,
-		.vreg[VREG_HFPLL_A] = { "hfpll", 1800000, 0,
-				     RPM_VREG_VOTER6,
-				     RPM_VREG_ID_PM8941_L12 },
+		.vreg[VREG_HFPLL_A] = { "l2_hfpll_a", 2150000 },
+		.vreg[VREG_HFPLL_B] = { "l2_hfpll_b", 1800000 },
 	},
 };
 
@@ -139,7 +118,7 @@
 	.usecase = bw_level_tbl,
 	.num_usecases = ARRAY_SIZE(bw_level_tbl),
 	.active_only = 1,
-	.name = "acpuclk-copper",
+	.name = "acpuclk-8974",
 };
 
 #define L2(x) (&l2_freq_tbl[(x)])
@@ -174,7 +153,7 @@
 	{ 0, { 0 } }
 };
 
-static struct acpuclk_krait_params acpuclk_copper_params = {
+static struct acpuclk_krait_params acpuclk_8974_params = {
 	.scalable = scalable,
 	.pvs_acpu_freq_tbl[PVS_SLOW] = acpu_freq_tbl,
 	.pvs_acpu_freq_tbl[PVS_NOMINAL] = acpu_freq_tbl,
@@ -185,27 +164,27 @@
 	.qfprom_phys_base = 0xFC4A8000,
 };
 
-static int __init acpuclk_copper_probe(struct platform_device *pdev)
+static int __init acpuclk_8974_probe(struct platform_device *pdev)
 {
-	return acpuclk_krait_init(&pdev->dev, &acpuclk_copper_params);
+	return acpuclk_krait_init(&pdev->dev, &acpuclk_8974_params);
 }
 
-static struct of_device_id acpuclk_copper_match_table[] = {
-	{ .compatible = "qcom,acpuclk-copper" },
+static struct of_device_id acpuclk_8974_match_table[] = {
+	{ .compatible = "qcom,acpuclk-8974" },
 	{}
 };
 
-static struct platform_driver acpuclk_copper_driver = {
+static struct platform_driver acpuclk_8974_driver = {
 	.driver = {
-		.name = "acpuclk-copper",
-		.of_match_table = acpuclk_copper_match_table,
+		.name = "acpuclk-8974",
+		.of_match_table = acpuclk_8974_match_table,
 		.owner = THIS_MODULE,
 	},
 };
 
-static int __init acpuclk_8960_init(void)
+static int __init acpuclk_8974_init(void)
 {
-	return platform_driver_probe(&acpuclk_copper_driver,
-				     acpuclk_copper_probe);
+	return platform_driver_probe(&acpuclk_8974_driver,
+				     acpuclk_8974_probe);
 }
-device_initcall(acpuclk_8960_init);
+device_initcall(acpuclk_8974_init);
diff --git a/arch/arm/mach-msm/acpuclock-krait.c b/arch/arm/mach-msm/acpuclock-krait.c
index 5682ac3..84253b8 100644
--- a/arch/arm/mach-msm/acpuclock-krait.c
+++ b/arch/arm/mach-msm/acpuclock-krait.c
@@ -32,6 +32,7 @@
 #include <mach/socinfo.h>
 #include <mach/msm-krait-l2-accessors.h>
 #include <mach/rpm-regulator.h>
+#include <mach/rpm-regulator-smd.h>
 #include <mach/msm_bus.h>
 
 #include "acpuclock.h"
@@ -52,7 +53,8 @@
 static DEFINE_SPINLOCK(l2_lock);
 
 static struct drv_data {
-	const struct acpu_level *acpu_freq_tbl;
+	struct acpu_level *acpu_freq_tbl;
+	const struct acpu_level *max_acpu_lvl;
 	const struct l2_level *l2_freq_tbl;
 	struct scalable *scalable;
 	u32 bus_perf_client;
@@ -92,35 +94,39 @@
 	udelay(1);
 }
 
-/* Enable an already-configured HFPLL. */
-static void hfpll_enable(struct scalable *sc, bool skip_regulators)
+static int enable_rpm_vreg(struct vreg *vreg)
+{
+	int ret = 0;
+
+	if (vreg->rpm_reg) {
+		ret = rpm_regulator_enable(vreg->rpm_reg);
+		if (ret)
+			dev_err(drv.dev, "%s regulator enable failed (%d)\n",
+				vreg->name, ret);
+	}
+
+	return ret;
+}
+
+static void disable_rpm_vreg(struct vreg *vreg)
 {
 	int rc;
 
+	if (vreg->rpm_reg) {
+		rc = rpm_regulator_disable(vreg->rpm_reg);
+		if (rc)
+			dev_err(drv.dev, "%s regulator disable failed (%d)\n",
+				vreg->name, rc);
+	}
+}
+
+/* Enable an already-configured HFPLL. */
+static void hfpll_enable(struct scalable *sc, bool skip_regulators)
+{
 	if (!skip_regulators) {
 		/* Enable regulators required by the HFPLL. */
-		if (sc->vreg[VREG_HFPLL_A].rpm_vreg_id) {
-			rc = rpm_vreg_set_voltage(
-				sc->vreg[VREG_HFPLL_A].rpm_vreg_id,
-				sc->vreg[VREG_HFPLL_A].rpm_vreg_voter,
-				sc->vreg[VREG_HFPLL_A].cur_vdd,
-				sc->vreg[VREG_HFPLL_A].max_vdd, 0);
-			if (rc)
-				dev_err(drv.dev,
-					"%s regulator enable failed (%d)\n",
-					sc->vreg[VREG_HFPLL_A].name, rc);
-		}
-		if (sc->vreg[VREG_HFPLL_B].rpm_vreg_id) {
-			rc = rpm_vreg_set_voltage(
-				sc->vreg[VREG_HFPLL_B].rpm_vreg_id,
-				sc->vreg[VREG_HFPLL_B].rpm_vreg_voter,
-				sc->vreg[VREG_HFPLL_B].cur_vdd,
-				sc->vreg[VREG_HFPLL_B].max_vdd, 0);
-			if (rc)
-				dev_err(drv.dev,
-					"%s regulator enable failed (%d)\n",
-					sc->vreg[VREG_HFPLL_B].name, rc);
-		}
+		enable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]);
+		enable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]);
 	}
 
 	/* Disable PLL bypass mode. */
@@ -147,8 +153,6 @@
 /* Disable a HFPLL for power-savings or while it's being reprogrammed. */
 static void hfpll_disable(struct scalable *sc, bool skip_regulators)
 {
-	int rc;
-
 	/*
 	 * Disable the PLL output, disable test mode, enable the bypass mode,
 	 * and assert the reset.
@@ -157,26 +161,8 @@
 
 	if (!skip_regulators) {
 		/* Remove voltage votes required by the HFPLL. */
-		if (sc->vreg[VREG_HFPLL_B].rpm_vreg_id) {
-			rc = rpm_vreg_set_voltage(
-				sc->vreg[VREG_HFPLL_B].rpm_vreg_id,
-				sc->vreg[VREG_HFPLL_B].rpm_vreg_voter,
-				0, 0, 0);
-			if (rc)
-				dev_err(drv.dev,
-					"%s regulator enable failed (%d)\n",
-					sc->vreg[VREG_HFPLL_B].name, rc);
-		}
-		if (sc->vreg[VREG_HFPLL_A].rpm_vreg_id) {
-			rc = rpm_vreg_set_voltage(
-				sc->vreg[VREG_HFPLL_A].rpm_vreg_id,
-				sc->vreg[VREG_HFPLL_A].rpm_vreg_voter,
-				0, 0, 0);
-			if (rc)
-				dev_err(drv.dev,
-					"%s regulator enable failed (%d)\n",
-					sc->vreg[VREG_HFPLL_A].name, rc);
-		}
+		disable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]);
+		disable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]);
 	}
 }
 
@@ -228,19 +214,19 @@
 		set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
 
 		/* Re-program HFPLL. */
-		hfpll_disable(sc, 1);
+		hfpll_disable(sc, true);
 		hfpll_set_rate(sc, tgt_s);
-		hfpll_enable(sc, 1);
+		hfpll_enable(sc, true);
 
 		/* Move to HFPLL. */
 		set_pri_clk_src(sc, tgt_s->pri_src_sel);
 	} else if (strt_s->src == HFPLL && tgt_s->src != HFPLL) {
 		set_sec_clk_src(sc, tgt_s->sec_src_sel);
 		set_pri_clk_src(sc, tgt_s->pri_src_sel);
-		hfpll_disable(sc, 0);
+		hfpll_disable(sc, false);
 	} else if (strt_s->src != HFPLL && tgt_s->src == HFPLL) {
 		hfpll_set_rate(sc, tgt_s);
-		hfpll_enable(sc, 0);
+		hfpll_enable(sc, false);
 		set_pri_clk_src(sc, tgt_s->pri_src_sel);
 	} else {
 		set_sec_clk_src(sc, tgt_s->sec_src_sel);
@@ -261,9 +247,8 @@
 	 * vdd_mem should be >= vdd_dig.
 	 */
 	if (vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
-		rc = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
-				sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
-				sc->vreg[VREG_MEM].max_vdd, 0);
+		rc = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
+				vdd_mem, sc->vreg[VREG_MEM].max_vdd);
 		if (rc) {
 			dev_err(drv.dev,
 				"vdd_mem (cpu%d) increase failed (%d)\n",
@@ -275,9 +260,8 @@
 
 	/* Increase vdd_dig active-set vote. */
 	if (vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
-		rc = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
-				sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
-				sc->vreg[VREG_DIG].max_vdd, 0);
+		rc = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
+				vdd_dig, sc->vreg[VREG_DIG].max_vdd);
 		if (rc) {
 			dev_err(drv.dev,
 				"vdd_dig (cpu%d) increase failed (%d)\n",
@@ -336,9 +320,8 @@
 
 	/* Decrease vdd_dig active-set vote. */
 	if (vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
-		ret = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
-				sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
-				sc->vreg[VREG_DIG].max_vdd, 0);
+		ret = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
+				vdd_dig, sc->vreg[VREG_DIG].max_vdd);
 		if (ret) {
 			dev_err(drv.dev,
 				"vdd_dig (cpu%d) decrease failed (%d)\n",
@@ -353,9 +336,8 @@
 	 * vdd_mem should be >= vdd_dig.
 	 */
 	if (vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
-		ret = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
-				sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
-				sc->vreg[VREG_MEM].max_vdd, 0);
+		ret = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
+				vdd_mem, sc->vreg[VREG_MEM].max_vdd);
 		if (ret) {
 			dev_err(drv.dev,
 				"vdd_mem (cpu%d) decrease failed (%d)\n",
@@ -484,7 +466,7 @@
 	pr_debug("Initializing HFPLL%d\n", sc - drv.scalable);
 
 	/* Disable the PLL for re-programming. */
-	hfpll_disable(sc, 1);
+	hfpll_disable(sc, true);
 
 	/* Configure PLL parameters for integer mode. */
 	writel_relaxed(sc->hfpll_data->config_val,
@@ -492,89 +474,162 @@
 	writel_relaxed(0, sc->hfpll_base + sc->hfpll_data->m_offset);
 	writel_relaxed(1, sc->hfpll_base + sc->hfpll_data->n_offset);
 
+	/* Program droop controller, if supported */
+	if (sc->hfpll_data->has_droop_ctl)
+		writel_relaxed(sc->hfpll_data->droop_val,
+			       sc->hfpll_base + sc->hfpll_data->droop_offset);
+
 	/* Set an initial rate and enable the PLL. */
 	hfpll_set_rate(sc, tgt_s);
-	hfpll_enable(sc, 0);
+	hfpll_enable(sc, false);
+}
+
+static int __cpuinit rpm_regulator_init(struct scalable *sc, enum vregs vreg,
+					 int vdd, bool enable)
+{
+	int ret;
+
+	if (!sc->vreg[vreg].name)
+		return 0;
+
+	sc->vreg[vreg].rpm_reg = rpm_regulator_get(drv.dev,
+						   sc->vreg[vreg].name);
+	if (IS_ERR(sc->vreg[vreg].rpm_reg)) {
+		ret = PTR_ERR(sc->vreg[vreg].rpm_reg);
+		dev_err(drv.dev, "rpm_regulator_get(%s) failed (%d)\n",
+			sc->vreg[vreg].name, ret);
+		goto err_get;
+	}
+
+	ret = rpm_regulator_set_voltage(sc->vreg[vreg].rpm_reg, vdd,
+					sc->vreg[vreg].max_vdd);
+	if (ret) {
+		dev_err(drv.dev, "%s initialization failed (%d)\n",
+			sc->vreg[vreg].name, ret);
+		goto err_conf;
+	}
+	sc->vreg[vreg].cur_vdd = vdd;
+
+	if (enable) {
+		ret = enable_rpm_vreg(&sc->vreg[vreg]);
+		if (ret)
+			goto err_conf;
+	}
+
+	return 0;
+
+err_conf:
+	rpm_regulator_put(sc->vreg[vreg].rpm_reg);
+err_get:
+	return ret;
+}
+
+static void __cpuinit rpm_regulator_cleanup(struct scalable *sc,
+						enum vregs vreg)
+{
+	if (!sc->vreg[vreg].rpm_reg)
+		return;
+
+	disable_rpm_vreg(&sc->vreg[vreg]);
+	rpm_regulator_put(sc->vreg[vreg].rpm_reg);
 }
 
 /* Voltage regulator initialization. */
-static void __init regulator_init(const struct acpu_level *lvl)
+static int __cpuinit regulator_init(struct scalable *sc)
 {
-	int cpu, ret;
-	struct scalable *sc;
-	int vdd_mem, vdd_dig, vdd_core;
+	int ret, vdd_mem, vdd_dig, vdd_core;
 
-	vdd_mem = calculate_vdd_mem(lvl);
-	vdd_dig = calculate_vdd_dig(lvl);
+	vdd_mem = calculate_vdd_mem(drv.max_acpu_lvl);
+	vdd_dig = calculate_vdd_dig(drv.max_acpu_lvl);
 
-	for_each_possible_cpu(cpu) {
-		sc = &drv.scalable[cpu];
+	ret = rpm_regulator_init(sc, VREG_MEM, vdd_mem, true);
+	if (ret)
+		goto err_mem;
+	ret = rpm_regulator_init(sc, VREG_DIG, vdd_dig, true);
+	if (ret)
+		goto err_dig;
+	ret = rpm_regulator_init(sc, VREG_HFPLL_A,
+			   sc->vreg[VREG_HFPLL_A].max_vdd, false);
+	if (ret)
+		goto err_hfpll_a;
+	ret = rpm_regulator_init(sc, VREG_HFPLL_B,
+			   sc->vreg[VREG_HFPLL_B].max_vdd, false);
+	if (ret)
+		goto err_hfpll_b;
 
-		/* Set initial vdd_mem vote. */
-		ret = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
-				sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
-				sc->vreg[VREG_MEM].max_vdd, 0);
-		if (ret) {
-			dev_err(drv.dev, "%s initialization failed (%d)\n",
-				sc->vreg[VREG_MEM].name, ret);
-			BUG();
-		}
-		sc->vreg[VREG_MEM].cur_vdd  = vdd_mem;
-
-		/* Set initial vdd_dig vote. */
-		ret = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
-				sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
-				sc->vreg[VREG_DIG].max_vdd, 0);
-		if (ret) {
-			dev_err(drv.dev, "%s initialization failed (%d)\n",
-				sc->vreg[VREG_DIG].name, ret);
-			BUG();
-		}
-		sc->vreg[VREG_DIG].cur_vdd  = vdd_dig;
-
-		/* Setup Krait CPU regulators and initial core voltage. */
-		sc->vreg[VREG_CORE].reg = regulator_get(NULL,
-					  sc->vreg[VREG_CORE].name);
-		if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
-			dev_err(drv.dev, "regulator_get(%s) failed (%ld)\n",
-				sc->vreg[VREG_CORE].name,
-				PTR_ERR(sc->vreg[VREG_CORE].reg));
-			BUG();
-		}
-		vdd_core = calculate_vdd_core(lvl);
-		ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
-					    sc->vreg[VREG_CORE].max_vdd);
-		if (ret) {
-			dev_err(drv.dev, "regulator_set_voltage(%s) (%d)\n",
-				sc->vreg[VREG_CORE].name, ret);
-			BUG();
-		}
-		sc->vreg[VREG_CORE].cur_vdd = vdd_core;
-		ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
-						 sc->vreg[VREG_CORE].peak_ua);
-		if (ret < 0) {
-			dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed"
-				" (%d)\n", sc->vreg[VREG_CORE].name, ret);
-			BUG();
-		}
-		ret = regulator_enable(sc->vreg[VREG_CORE].reg);
-		if (ret) {
-			dev_err(drv.dev, "regulator_enable(%s) failed (%d)\n",
-				sc->vreg[VREG_CORE].name, ret);
-			BUG();
-		}
+	/* Setup Krait CPU regulators and initial core voltage. */
+	sc->vreg[VREG_CORE].reg = regulator_get(drv.dev,
+				  sc->vreg[VREG_CORE].name);
+	if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
+		ret = PTR_ERR(sc->vreg[VREG_CORE].reg);
+		dev_err(drv.dev, "regulator_get(%s) failed (%d)\n",
+			sc->vreg[VREG_CORE].name, ret);
+		goto err_core_get;
 	}
+	vdd_core = calculate_vdd_core(drv.max_acpu_lvl);
+	ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
+				    sc->vreg[VREG_CORE].max_vdd);
+	if (ret) {
+		dev_err(drv.dev, "regulator_set_voltage(%s) (%d)\n",
+			sc->vreg[VREG_CORE].name, ret);
+		goto err_core_conf;
+	}
+	sc->vreg[VREG_CORE].cur_vdd = vdd_core;
+	ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
+					 sc->vreg[VREG_CORE].peak_ua);
+	if (ret < 0) {
+		dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
+			sc->vreg[VREG_CORE].name, ret);
+		goto err_core_conf;
+	}
+	ret = regulator_enable(sc->vreg[VREG_CORE].reg);
+	if (ret) {
+		dev_err(drv.dev, "regulator_enable(%s) failed (%d)\n",
+			sc->vreg[VREG_CORE].name, ret);
+		goto err_core_conf;
+	}
+
+	return 0;
+
+err_core_conf:
+	regulator_put(sc->vreg[VREG_CORE].reg);
+err_core_get:
+	rpm_regulator_cleanup(sc, VREG_HFPLL_B);
+err_hfpll_b:
+	rpm_regulator_cleanup(sc, VREG_HFPLL_A);
+err_hfpll_a:
+	rpm_regulator_cleanup(sc, VREG_DIG);
+err_dig:
+	rpm_regulator_cleanup(sc, VREG_MEM);
+err_mem:
+	return ret;
+}
+
+static void __cpuinit regulator_cleanup(struct scalable *sc)
+{
+	regulator_disable(sc->vreg[VREG_CORE].reg);
+	regulator_put(sc->vreg[VREG_CORE].reg);
+	rpm_regulator_cleanup(sc, VREG_HFPLL_B);
+	rpm_regulator_cleanup(sc, VREG_HFPLL_A);
+	rpm_regulator_cleanup(sc, VREG_DIG);
+	rpm_regulator_cleanup(sc, VREG_MEM);
 }
 
 /* Set initial rate for a given core. */
-static void __init init_clock_sources(struct scalable *sc,
-				      const struct core_speed *tgt_s)
+static int __cpuinit init_clock_sources(struct scalable *sc,
+					 const struct core_speed *tgt_s)
 {
 	u32 regval;
+	void __iomem *aux_reg;
 
 	/* Program AUX source input to the secondary MUX. */
-	if (sc->aux_clk_sel_addr)
-		writel_relaxed(sc->aux_clk_sel, sc->aux_clk_sel_addr);
+	if (sc->aux_clk_sel_phys) {
+		aux_reg = ioremap(sc->aux_clk_sel_phys, 4);
+		if (!aux_reg)
+			return -ENOMEM;
+		writel_relaxed(sc->aux_clk_sel, aux_reg);
+		iounmap(aux_reg);
+	}
 
 	/* Switch away from the HFPLL while it's re-initialized. */
 	set_sec_clk_src(sc, SEC_SRC_SEL_AUX);
@@ -590,21 +645,43 @@
 	set_sec_clk_src(sc, tgt_s->sec_src_sel);
 	set_pri_clk_src(sc, tgt_s->pri_src_sel);
 	sc->cur_speed = tgt_s;
+
+	return 0;
 }
 
-static void __init per_cpu_init(int cpu, const struct acpu_level *max_level)
+static int __cpuinit per_cpu_init(int cpu)
 {
-	drv.scalable[cpu].hfpll_base =
-		ioremap(drv.scalable[cpu].hfpll_phys_base, SZ_32);
-	BUG_ON(!drv.scalable[cpu].hfpll_base);
+	struct scalable *sc = &drv.scalable[cpu];
+	int ret;
 
-	init_clock_sources(&drv.scalable[cpu], &max_level->speed);
-	drv.scalable[cpu].l2_vote = max_level->l2_level;
+	sc->hfpll_base = ioremap(sc->hfpll_phys_base, SZ_32);
+	if (!sc->hfpll_base) {
+		ret = -ENOMEM;
+		goto err_ioremap;
+	}
+
+	ret = regulator_init(sc);
+	if (ret)
+		goto err_regulators;
+
+	ret = init_clock_sources(sc, &drv.max_acpu_lvl->speed);
+	if (ret)
+		goto err_clocks;
+	sc->l2_vote = drv.max_acpu_lvl->l2_level;
+	sc->initialized = true;
+
+	return 0;
+
+err_clocks:
+	regulator_cleanup(sc);
+err_regulators:
+	iounmap(sc->hfpll_base);
+err_ioremap:
+	return ret;
 }
 
 /* Register with bus driver. */
-static void __init bus_init(struct msm_bus_scale_pdata *bus_scale_data,
-			    unsigned int init_bw)
+static void __init bus_init(struct msm_bus_scale_pdata *bus_scale_data)
 {
 	int ret;
 
@@ -614,7 +691,8 @@
 		BUG();
 	}
 
-	ret = msm_bus_scale_client_update_request(drv.bus_perf_client, init_bw);
+	ret = msm_bus_scale_client_update_request(drv.bus_perf_client,
+					drv.max_acpu_lvl->l2_level->bw_level);
 	if (ret)
 		dev_err(drv.dev, "initial bandwidth req failed (%d)\n", ret);
 }
@@ -672,6 +750,12 @@
 		regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, 0);
 		break;
 	case CPU_UP_PREPARE:
+		if (!sc->initialized) {
+			rc = per_cpu_init(cpu);
+			if (rc)
+				return NOTIFY_BAD;
+			break;
+		}
 		if (WARN_ON(!prev_khz[cpu]))
 			return NOTIFY_BAD;
 		rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
@@ -691,10 +775,29 @@
 	.notifier_call = acpuclk_cpu_callback,
 };
 
-static const struct acpu_level __init *select_freq_plan(
-		const struct acpu_level *const *pvs_tbl, u32 qfprom_phys)
+static const int krait_needs_vmin(void)
 {
-	const struct acpu_level *l, *max_acpu_level = NULL;
+	switch (read_cpuid_id()) {
+	case 0x511F04D0: /* KR28M2A20 */
+	case 0x511F04D1: /* KR28M2A21 */
+	case 0x510F06F0: /* KR28M4A10 */
+		return 1;
+	default:
+		return 0;
+	};
+}
+
+static void krait_apply_vmin(struct acpu_level *tbl)
+{
+	for (; tbl->speed.khz != 0; tbl++)
+		if (tbl->vdd_core < 1150000)
+			tbl->vdd_core = 1150000;
+}
+
+static void __init select_freq_plan(struct acpu_level *const *pvs_tbl,
+				    u32 qfprom_phys)
+{
+	const struct acpu_level *l;
 	void __iomem *qfprom_base;
 	u32 pte_efuse, pvs, tbl_idx;
 	char *pvs_names[] = { "Slow", "Nominal", "Fast", "Unknown" };
@@ -735,15 +838,16 @@
 	}
 	drv.acpu_freq_tbl = pvs_tbl[tbl_idx];
 
+	if (krait_needs_vmin())
+		krait_apply_vmin(drv.acpu_freq_tbl);
+
 	/* Find the max supported scaling frequency. */
 	for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
 		if (l->use_for_scaling)
-			max_acpu_level = l;
-	BUG_ON(!max_acpu_level);
+			drv.max_acpu_lvl = l;
+	BUG_ON(!drv.max_acpu_lvl);
 	dev_info(drv.dev, "Max ACPU freq: %lu KHz\n",
-		 max_acpu_level->speed.khz);
-
-	return max_acpu_level;
+		 drv.max_acpu_lvl->speed.khz);
 }
 
 static struct acpuclk_data acpuclk_krait_data = {
@@ -756,24 +860,34 @@
 int __init acpuclk_krait_init(struct device *dev,
 			      const struct acpuclk_krait_params *params)
 {
-	const struct acpu_level *max_acpu_level;
-	int cpu;
+	struct scalable *l2;
+	int cpu, rc;
 
 	drv.scalable = params->scalable;
 	drv.l2_freq_tbl = params->l2_freq_tbl;
 	drv.dev = dev;
 
-	drv.scalable[L2].hfpll_base =
-		ioremap(drv.scalable[L2].hfpll_phys_base, SZ_32);
-	BUG_ON(!drv.scalable[L2].hfpll_base);
+	select_freq_plan(params->pvs_acpu_freq_tbl, params->qfprom_phys_base);
+	bus_init(params->bus_scale_data);
 
-	max_acpu_level = select_freq_plan(params->pvs_acpu_freq_tbl,
-					  params->qfprom_phys_base);
-	regulator_init(max_acpu_level);
-	bus_init(params->bus_scale_data, max_acpu_level->l2_level->bw_level);
-	init_clock_sources(&drv.scalable[L2], &max_acpu_level->l2_level->speed);
-	for_each_online_cpu(cpu)
-		per_cpu_init(cpu, max_acpu_level);
+	l2 = &drv.scalable[L2];
+	l2->hfpll_base = ioremap(l2->hfpll_phys_base, SZ_32);
+	BUG_ON(!l2->hfpll_base);
+
+	rc = rpm_regulator_init(l2, VREG_HFPLL_A,
+				l2->vreg[VREG_HFPLL_A].max_vdd, false);
+	BUG_ON(rc);
+	rc = rpm_regulator_init(l2, VREG_HFPLL_B,
+				l2->vreg[VREG_HFPLL_B].max_vdd, false);
+	BUG_ON(rc);
+
+	rc = init_clock_sources(l2, &drv.max_acpu_lvl->l2_level->speed);
+	BUG_ON(rc);
+
+	for_each_online_cpu(cpu) {
+		rc = per_cpu_init(cpu);
+		BUG_ON(rc);
+	}
 
 	cpufreq_table_init();
 
diff --git a/arch/arm/mach-msm/acpuclock-krait.h b/arch/arm/mach-msm/acpuclock-krait.h
index fbf1f5f..f121548 100644
--- a/arch/arm/mach-msm/acpuclock-krait.h
+++ b/arch/arm/mach-msm/acpuclock-krait.h
@@ -40,6 +40,7 @@
 	PLL_0 = 0,
 	HFPLL,
 	QSB,
+	PLL_8,
 };
 
 /**
@@ -91,18 +92,17 @@
  * struct vreg - Voltage regulator data.
  * @name: Name of requlator.
  * @max_vdd: Limit the maximum-settable voltage.
- * @rpm_vreg_id: ID to use with rpm_vreg_*() APIs.
  * @reg: Regulator handle.
+ * @rpm_reg: RPM Regulator handle.
  * @cur_vdd: Last-set voltage in uV.
  * @peak_ua: Maximum current draw expected in uA.
  */
 struct vreg {
-	const char name[15];
+	const char *name;
 	const int max_vdd;
 	const int peak_ua;
-	const int rpm_vreg_voter;
-	const int rpm_vreg_id;
 	struct regulator *reg;
+	struct rpm_regulator *rpm_reg;
 	int cur_vdd;
 };
 
@@ -147,7 +147,7 @@
 	const int use_for_scaling;
 	const struct core_speed speed;
 	const struct l2_level *l2_level;
-	const int vdd_core;
+	int vdd_core;
 };
 
 /**
@@ -158,6 +158,10 @@
  * @n_offset: "N" value register offset from base address.
  * @config_offset: Configuration register offset from base address.
  * @config_val: Value to initialize the @config_offset register to.
+ * @has_droop_ctl: Indicates the presence of a voltage droop controller.
+ * @droop_offset: Droop controller register offset from base address.
+ * @droop_val: Value to initialize the @config_offset register to.
+ * @low_vdd_l_max: Maximum "L" value supported at HFPLL_VDD_LOW.
  * @vdd: voltage requirements for each VDD level.
  */
 struct hfpll_data {
@@ -167,6 +171,9 @@
 	const u32 n_offset;
 	const u32 config_offset;
 	const u32 config_val;
+	const bool has_droop_ctl;
+	const u32 droop_offset;
+	const u32 droop_val;
 	const u32 low_vdd_l_max;
 	const int vdd[NUM_HFPLL_VDD];
 };
@@ -175,24 +182,26 @@
  * struct scalable - Register locations and state associated with a scalable HW.
  * @hfpll_phys_base: Physical base address of HFPLL register.
  * @hfpll_base: Virtual base address of HFPLL registers.
- * @aux_clk_sel_addr: Virtual address of auxiliary MUX.
+ * @aux_clk_sel_phys: Physical address of auxiliary MUX.
  * @aux_clk_sel: Auxiliary mux input to select at boot.
  * @l2cpmr_iaddr: Indirect address of the CPMR MUX/divider CP15 register.
  * @hfpll_data: Descriptive data of HFPLL hardware.
  * @cur_speed: Pointer to currently-set speed.
  * @l2_vote: L2 performance level vote associate with the current CPU speed.
  * @vreg: Array of voltage regulators needed by the scalable.
+ * @initialized: Flag set to true when per_cpu_init() has been called.
  */
 struct scalable {
-	const u32 hfpll_phys_base;
+	const phys_addr_t hfpll_phys_base;
 	void __iomem *hfpll_base;
-	void __iomem *aux_clk_sel_addr;
+	const phys_addr_t aux_clk_sel_phys;
 	const u32 aux_clk_sel;
 	const u32 l2cpmr_iaddr;
 	const struct hfpll_data *hfpll_data;
 	const struct core_speed *cur_speed;
 	const struct l2_level *l2_vote;
 	struct vreg vreg[NUM_VREG];
+	bool initialized;
 };
 
 /**
@@ -206,10 +215,10 @@
  */
 struct acpuclk_krait_params {
 	struct scalable *scalable;
-	const struct acpu_level *pvs_acpu_freq_tbl[NUM_PVS];
+	struct acpu_level *pvs_acpu_freq_tbl[NUM_PVS];
 	const struct l2_level *l2_freq_tbl;
 	const size_t l2_freq_tbl_size;
-	const u32 qfprom_phys_base;
+	const phys_addr_t qfprom_phys_base;
 	struct msm_bus_scale_pdata *bus_scale_data;
 };
 
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index d53e471..3df566c 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -238,11 +238,12 @@
 static DEFINE_SPINLOCK(wakelock_reference_lock);
 static int wakelock_reference_count;
 static int a2_pc_disabled_wakelock_skipped;
-static int disconnect_ack;
+static int disconnect_ack = 1;
 static LIST_HEAD(bam_other_notify_funcs);
 static DEFINE_MUTEX(smsm_cb_lock);
 static DEFINE_MUTEX(delayed_ul_vote_lock);
 static int need_delayed_ul_vote;
+static int power_management_only_mode;
 
 struct outside_notify_func {
 	void (*notify)(void *, int, unsigned long);
@@ -561,9 +562,9 @@
 		bam_dmux_log("%s: opening cid %d PC enabled\n", __func__,
 				rx_hdr->ch_id);
 		handle_bam_mux_cmd_open(rx_hdr);
-		if (rx_hdr->reserved & ENABLE_DISCONNECT_ACK) {
-			bam_dmux_log("%s: activating disconnect ack\n");
-			disconnect_ack = 1;
+		if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
+			bam_dmux_log("%s: deactivating disconnect ack\n");
+			disconnect_ack = 0;
 		}
 		dev_kfree_skb_any(rx_skb);
 		break;
@@ -1688,21 +1689,28 @@
 
 	in_global_reset = 0;
 	vote_dfab();
-	i = sps_device_reset(a2_device_handle);
-	if (i)
-		pr_err("%s: device reset failed rc = %d\n", __func__, i);
-	i = sps_connect(bam_tx_pipe, &tx_connection);
-	if (i)
-		pr_err("%s: tx connection failed rc = %d\n", __func__, i);
-	i = sps_connect(bam_rx_pipe, &rx_connection);
-	if (i)
-		pr_err("%s: rx connection failed rc = %d\n", __func__, i);
-	i = sps_register_event(bam_tx_pipe, &tx_register_event);
-	if (i)
-		pr_err("%s: tx event reg failed rc = %d\n", __func__, i);
-	i = sps_register_event(bam_rx_pipe, &rx_register_event);
-	if (i)
-		pr_err("%s: rx event reg failed rc = %d\n", __func__, i);
+	if (!power_management_only_mode) {
+		i = sps_device_reset(a2_device_handle);
+		if (i)
+			pr_err("%s: device reset failed rc = %d\n", __func__,
+									i);
+		i = sps_connect(bam_tx_pipe, &tx_connection);
+		if (i)
+			pr_err("%s: tx connection failed rc = %d\n", __func__,
+									i);
+		i = sps_connect(bam_rx_pipe, &rx_connection);
+		if (i)
+			pr_err("%s: rx connection failed rc = %d\n", __func__,
+									i);
+		i = sps_register_event(bam_tx_pipe, &tx_register_event);
+		if (i)
+			pr_err("%s: tx event reg failed rc = %d\n", __func__,
+									i);
+		i = sps_register_event(bam_rx_pipe, &rx_register_event);
+		if (i)
+			pr_err("%s: rx event reg failed rc = %d\n", __func__,
+									i);
+	}
 
 	bam_connection_is_active = 1;
 
@@ -1711,7 +1719,8 @@
 
 	toggle_apps_ack();
 	complete_all(&bam_connection_completion);
-	queue_rx();
+	if (!power_management_only_mode)
+		queue_rx();
 }
 
 static void disconnect_to_bam(void)
@@ -1733,11 +1742,13 @@
 
 	/* tear down BAM connection */
 	INIT_COMPLETION(bam_connection_completion);
-	sps_disconnect(bam_tx_pipe);
-	sps_disconnect(bam_rx_pipe);
+	if (!power_management_only_mode) {
+		sps_disconnect(bam_tx_pipe);
+		sps_disconnect(bam_rx_pipe);
+		__memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
+		__memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
+	}
 	unvote_dfab();
-	__memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
-	__memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
 
 	mutex_lock(&bam_rx_pool_mutexlock);
 	while (!list_empty(&bam_rx_pool)) {
@@ -2081,7 +2092,6 @@
 	int ret;
 	void *a2_virt_addr;
 
-	unvote_dfab();
 	/* init BAM */
 	a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
 	if (!a2_virt_addr) {
@@ -2114,6 +2124,10 @@
 	mutex_unlock(&delayed_ul_vote_lock);
 	toggle_apps_ack();
 
+	power_management_only_mode = 1;
+	bam_connection_is_active = 1;
+	complete_all(&bam_connection_completion);
+
 	return 0;
 
 register_bam_failed:
diff --git a/arch/arm/mach-msm/board-8064-gpiomux.c b/arch/arm/mach-msm/board-8064-gpiomux.c
index b941bd4..1c19442 100644
--- a/arch/arm/mach-msm/board-8064-gpiomux.c
+++ b/arch/arm/mach-msm/board-8064-gpiomux.c
@@ -797,6 +797,13 @@
 	.pull = GPIOMUX_PULL_DOWN,
 };
 
+static struct gpiomux_setting mdm2ap_pblrdy = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_16MA,
+	.pull = GPIOMUX_PULL_DOWN,
+};
+
+
 static struct gpiomux_setting ap2mdm_soft_reset_cfg = {
 	.func = GPIOMUX_FUNC_GPIO,
 	.drv = GPIOMUX_DRV_8MA,
@@ -852,6 +859,13 @@
 			[GPIOMUX_SUSPENDED] = &ap2mdm_wakeup,
 		}
 	},
+	/* MDM2AP_PBL_READY*/
+	{
+		.gpio = 46,
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &mdm2ap_pblrdy,
+		}
+	},
 };
 
 static struct gpiomux_setting mi2s_act_cfg = {
@@ -1168,6 +1182,22 @@
 };
 #endif
 
+static struct gpiomux_setting apq8064_sdc3_card_det_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_UP,
+};
+
+static struct msm_gpiomux_config apq8064_sdc3_configs[] __initdata = {
+	{
+		.gpio      = 26,
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &apq8064_sdc3_card_det_cfg,
+			[GPIOMUX_ACTIVE] = &apq8064_sdc3_card_det_cfg,
+		},
+	},
+};
+
 void __init apq8064_init_gpiomux(void)
 {
 	int rc;
@@ -1259,4 +1289,7 @@
 	 msm_gpiomux_install(apq8064_sdc4_configs,
 			     ARRAY_SIZE(apq8064_sdc4_configs));
 #endif
+
+	msm_gpiomux_install(apq8064_sdc3_configs,
+			ARRAY_SIZE(apq8064_sdc3_configs));
 }
diff --git a/arch/arm/mach-msm/board-8064-gpu.c b/arch/arm/mach-msm/board-8064-gpu.c
index e24cac6..30a2683 100644
--- a/arch/arm/mach-msm/board-8064-gpu.c
+++ b/arch/arm/mach-msm/board-8064-gpu.c
@@ -97,13 +97,13 @@
 		.src = MSM_BUS_MASTER_GRAPHICS_3D,
 		.dst = MSM_BUS_SLAVE_EBI_CH0,
 		.ab = 0,
-		.ib = KGSL_CONVERT_TO_MBPS(3200),
+		.ib = KGSL_CONVERT_TO_MBPS(2656),
 	},
 	{
 		.src = MSM_BUS_MASTER_GRAPHICS_3D_PORT1,
 		.dst = MSM_BUS_SLAVE_EBI_CH0,
 		.ab = 0,
-		.ib = KGSL_CONVERT_TO_MBPS(3200),
+		.ib = KGSL_CONVERT_TO_MBPS(2656),
 	},
 };
 
@@ -224,6 +224,7 @@
 	.set_grp_async = NULL,
 	.idle_timeout = HZ/10,
 	.nap_allowed = true,
+	.strtstp_sleepwake = true,
 	.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE,
 #ifdef CONFIG_MSM_BUS_SCALING
 	.bus_scale_table = &grp3d_bus_scale_pdata,
diff --git a/arch/arm/mach-msm/board-8064-regulator.c b/arch/arm/mach-msm/board-8064-regulator.c
index 622b213..f727852 100644
--- a/arch/arm/mach-msm/board-8064-regulator.c
+++ b/arch/arm/mach-msm/board-8064-regulator.c
@@ -221,6 +221,7 @@
 	REGULATOR_SUPPLY("8921_lvs7",		NULL),
 	REGULATOR_SUPPLY("pll_vdd",		"pil_riva"),
 	REGULATOR_SUPPLY("lvds_vdda",		"lvds.0"),
+	REGULATOR_SUPPLY("hdmi_pll_fs",		"mdp.0"),
 	REGULATOR_SUPPLY("dsi1_vddio",		"mipi_dsi.1"),
 	REGULATOR_SUPPLY("hdmi_vdda",		"hdmi_msm.0"),
 };
@@ -255,7 +256,7 @@
 VREG_CONSUMERS(EXT_3P3V) = {
 	REGULATOR_SUPPLY("ext_3p3v",		NULL),
 	REGULATOR_SUPPLY("vdd_io",		"spi0.2"),
-	REGULATOR_SUPPLY("mhl_ext_3p3v",	"msm_otg"),
+	REGULATOR_SUPPLY("mhl_usb_hs_switch",	"msm_otg"),
 	REGULATOR_SUPPLY("lvds_vccs_3p3v",      "lvds.0"),
 	REGULATOR_SUPPLY("dsi1_vccs_3p3v",      "mipi_dsi.1"),
 	REGULATOR_SUPPLY("hdmi_mux_vdd",        "hdmi_msm.0"),
diff --git a/arch/arm/mach-msm/board-8064-storage.c b/arch/arm/mach-msm/board-8064-storage.c
index a53f771..a33b62b 100644
--- a/arch/arm/mach-msm/board-8064-storage.c
+++ b/arch/arm/mach-msm/board-8064-storage.c
@@ -251,6 +251,7 @@
 	.pin_data	= &mmc_slot_pin_data[SDCC1],
 	.vreg_data	= &mmc_slot_vreg_data[SDCC1],
 	.uhs_caps	= MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50,
+	.uhs_caps2	= MMC_CAP2_HS200_1_8V_SDR,
 	.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC1_DAT1,
 	.msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
 };
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index a73ef72..a574139 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -11,6 +11,7 @@
  *
  */
 #include <linux/kernel.h>
+#include <linux/bitops.h>
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
 #include <linux/io.h>
@@ -72,6 +73,7 @@
 #include <linux/fmem.h>
 #include <mach/msm_pcie.h>
 #include <mach/restart.h>
+#include <mach/msm_iomap.h>
 
 #include "msm_watchdog.h"
 #include "board-8064.h"
@@ -119,6 +121,9 @@
 #define MSM_MM_FW_SIZE		(0x200000 - HOLE_SIZE)
 #define APQ8064_FW_START	APQ8064_FIXED_AREA_START
 
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB     (MSM_QFPROM_BASE + 0x23c)
+#define QFPROM_RAW_OEM_CONFIG_ROW0_LSB      (MSM_QFPROM_BASE + 0x220)
+
 /* PCIe power enable pmic gpio */
 #define PCIE_PWR_EN_PMIC_GPIO 13
 #define PCIE_RST_N_PMIC_MPP 1
@@ -1697,12 +1702,22 @@
 };
 #endif
 
+static struct mdm_vddmin_resource mdm_vddmin_rscs = {
+	.rpm_id = MSM_RPM_ID_VDDMIN_GPIO,
+	.ap2mdm_vddmin_gpio = 30,
+	.modes  = 0x03,
+	.drive_strength = 8,
+	.mdm2ap_vddmin_gpio = 80,
+};
+
 static struct mdm_platform_data mdm_platform_data = {
 	.mdm_version = "3.0",
 	.ramdump_delay_ms = 2000,
 	.early_power_on = 1,
 	.sfr_query = 1,
+	.vddmin_resource = &mdm_vddmin_rscs,
 	.peripheral_platform_device = &apq8064_device_hsic_host,
+	.ramdump_timeout_ms = 120000,
 };
 
 static struct tsens_platform_data apq_tsens_pdata  = {
@@ -2050,10 +2065,18 @@
 	.gpio = msm_pcie_gpio_info,
 };
 
+static int __init mpq8064_pcie_enabled(void)
+{
+	return !((readl_relaxed(QFPROM_RAW_FEAT_CONFIG_ROW0_MSB) & BIT(21)) ||
+		(readl_relaxed(QFPROM_RAW_OEM_CONFIG_ROW0_LSB) & BIT(4)));
+}
+
 static void __init mpq8064_pcie_init(void)
 {
-	msm_device_pcie.dev.platform_data = &msm_pcie_platform_data;
-	platform_device_register(&msm_device_pcie);
+	if (mpq8064_pcie_enabled()) {
+		msm_device_pcie.dev.platform_data = &msm_pcie_platform_data;
+		platform_device_register(&msm_device_pcie);
+	}
 }
 
 static struct platform_device apq8064_device_ext_5v_vreg __devinitdata = {
diff --git a/arch/arm/mach-msm/board-8930-gpu.c b/arch/arm/mach-msm/board-8930-gpu.c
index 3c3843a..0c5ae5f 100644
--- a/arch/arm/mach-msm/board-8930-gpu.c
+++ b/arch/arm/mach-msm/board-8930-gpu.c
@@ -16,6 +16,7 @@
 #include <linux/msm_kgsl.h>
 #include <mach/msm_bus_board.h>
 #include <mach/board.h>
+#include <mach/socinfo.h>
 
 #include "devices.h"
 #include "board-8930.h"
@@ -35,7 +36,7 @@
 		.src = MSM_BUS_MASTER_GRAPHICS_3D,
 		.dst = MSM_BUS_SLAVE_EBI_CH0,
 		.ab = 0,
-		.ib = KGSL_CONVERT_TO_MBPS(2000),
+		.ib = KGSL_CONVERT_TO_MBPS(1000),
 	},
 };
 
@@ -44,7 +45,7 @@
 		.src = MSM_BUS_MASTER_GRAPHICS_3D,
 		.dst = MSM_BUS_SLAVE_EBI_CH0,
 		.ab = 0,
-		.ib = KGSL_CONVERT_TO_MBPS(3200),
+		.ib = KGSL_CONVERT_TO_MBPS(2656),
 	},
 };
 
@@ -139,6 +140,7 @@
 	.set_grp_async = NULL,
 	.idle_timeout = HZ/12,
 	.nap_allowed = true,
+	.strtstp_sleepwake = true,
 	.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE,
 #ifdef CONFIG_MSM_BUS_SCALING
 	.bus_scale_table = &grp3d_bus_scale_pdata,
@@ -159,5 +161,8 @@
 
 void __init msm8930_init_gpu(void)
 {
+	if (cpu_is_msm8627())
+		kgsl_3d0_pdata.pwrlevel[0].gpu_freq = 400000000;
+
 	platform_device_register(&device_kgsl_3d0);
 }
diff --git a/arch/arm/mach-msm/board-8930-regulator.c b/arch/arm/mach-msm/board-8930-regulator.c
index 5bee8a2..bc370ba 100644
--- a/arch/arm/mach-msm/board-8930-regulator.c
+++ b/arch/arm/mach-msm/board-8930-regulator.c
@@ -70,6 +70,8 @@
 	REGULATOR_SUPPLY("cam_vaf",		"4-0048"),
 	REGULATOR_SUPPLY("cam_vana",            "4-0020"),
 	REGULATOR_SUPPLY("cam_vaf",             "4-0020"),
+	REGULATOR_SUPPLY("vdd",			"12-0018"),
+	REGULATOR_SUPPLY("vdd",			"12-0068"),
 };
 VREG_CONSUMERS(L10) = {
 	REGULATOR_SUPPLY("8038_l10",		NULL),
@@ -186,6 +188,8 @@
 	REGULATOR_SUPPLY("vcc_i2c",		"3-004a"),
 	REGULATOR_SUPPLY("vcc_i2c",		"3-0024"),
 	REGULATOR_SUPPLY("vcc_i2c",		"0-0048"),
+	REGULATOR_SUPPLY("vddio",		"12-0018"),
+	REGULATOR_SUPPLY("vlogic",		"12-0068"),
 };
 VREG_CONSUMERS(EXT_5V) = {
 	REGULATOR_SUPPLY("ext_5v",		NULL),
diff --git a/arch/arm/mach-msm/board-8930-storage.c b/arch/arm/mach-msm/board-8930-storage.c
index 739b1c7..7280b22 100644
--- a/arch/arm/mach-msm/board-8930-storage.c
+++ b/arch/arm/mach-msm/board-8930-storage.c
@@ -21,6 +21,7 @@
 #include <mach/msm_bus_board.h>
 #include <mach/board.h>
 #include <mach/gpiomux.h>
+#include <mach/socinfo.h>
 #include "devices.h"
 
 #include "board-8930.h"
@@ -66,7 +67,8 @@
 		.lpm_sup = 1,
 		.hpm_uA = 800000, /* 800mA */
 		.lpm_uA = 9000,
-	}
+		.reset_at_init = true,
+	},
 };
 
 /* All SDCC controllers may require voting for VDD PAD voltage */
@@ -244,6 +246,7 @@
 	.pin_data	= &mmc_slot_pin_data[SDCC1],
 	.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC1_DAT1,
 	.msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
+	.uhs_caps2	= MMC_CAP2_HS200_1_8V_SDR,
 };
 #endif
 
@@ -264,7 +267,6 @@
 #endif
 	.vreg_data	= &mmc_slot_vreg_data[SDCC3],
 	.pin_data	= &mmc_slot_pin_data[SDCC3],
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 /*TODO: Insert right replacement for PM8038 */
 #ifndef MSM8930_PHASE_2
 	.status_gpio	= PM8921_GPIO_PM_TO_SYS(26),
@@ -275,7 +277,6 @@
 #endif
 	.irq_flags	= IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
 	.is_status_gpio_active_low = true,
-#endif
 	.xpc_cap	= 1,
 	.uhs_caps	= (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
 			MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 |
@@ -302,11 +303,27 @@
 	msm_add_sdcc(1, &msm8960_sdc1_data);
 #endif
 #ifdef CONFIG_MMC_MSM_SDC3_SUPPORT
+	/*
+	 * All 8930 platform boards using the 1.2 SoC have been reworked so that
+	 * the sd card detect line's esd circuit is no longer powered by the sd
+	 * card's voltage regulator. So this means we can turn the regulator off
+	 * to save power without affecting the sd card detect functionality.
+	 * This change to the boards will be true for newer versions of the SoC
+	 * as well.
+	 */
+	if ((SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 1 &&
+			SOCINFO_VERSION_MINOR(socinfo_get_version()) >= 2) ||
+			machine_is_msm8930_cdp()) {
+		msm8960_sdc3_data.vreg_data->vdd_data->always_on = false;
+		msm8960_sdc3_data.vreg_data->vdd_data->reset_at_init = false;
+	}
+
 	/* SDC3: External card slot */
 	if (!machine_is_msm8930_cdp()) {
 		msm8960_sdc3_data.wpswitch_gpio = 0;
 		msm8960_sdc3_data.is_wpswitch_active_low = false;
 	}
+
 	msm_add_sdcc(3, &msm8960_sdc3_data);
 #endif
 }
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index ab9fe5e..1a61dbb 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -78,6 +78,11 @@
 #include <mach/mdm2.h>
 #include <mach/msm_rtb.h>
 #include <linux/fmem.h>
+#include <mach/msm_cache_dump.h>
+
+#ifdef CONFIG_INPUT_MPU3050
+#include <linux/input/mpu3050.h>
+#endif
 
 #include "timer.h"
 #include "devices.h"
@@ -625,6 +630,19 @@
 	msm8930_mdp_writeback(msm8930_reserve_table);
 }
 
+#ifdef CONFIG_MSM_CACHE_DUMP
+static void __init reserve_cache_dump_memory(void)
+{
+	unsigned int total;
+
+	total = msm8930_cache_dump_pdata.l1_size +
+		msm8930_cache_dump_pdata.l2_size;
+	msm8930_reserve_table[MEMTYPE_EBI1].size += total;
+}
+#else
+static void __init reserve_cache_dump_memory(void) { }
+#endif
+
 static void __init msm8930_calculate_reserve_sizes(void)
 {
 	size_pmem_devices();
@@ -632,6 +650,7 @@
 	reserve_ion_memory();
 	reserve_mdp_memory();
 	reserve_rtb_memory();
+	reserve_cache_dump_memory();
 }
 
 static struct reserve_info msm8930_reserve_info __initdata = {
@@ -2145,6 +2164,7 @@
 	&msm8960_device_cache_erp,
 	&msm8930_iommu_domain_device,
 	&msm_tsens_device,
+	&msm8930_cache_dump_device,
 };
 
 static struct platform_device *cdp_devices[] __initdata = {
@@ -2311,6 +2331,21 @@
 	int                    len;
 };
 
+#ifdef CONFIG_INPUT_MPU3050
+#define MPU3050_INT_GPIO		69
+
+static struct mpu3050_gyro_platform_data mpu3050_gyro = {
+	.gpio_int = MPU3050_INT_GPIO,
+};
+
+static struct i2c_board_info __initdata mpu3050_i2c_boardinfo[] = {
+	{
+		I2C_BOARD_INFO("mpu3050", 0x68),
+		.platform_data = &mpu3050_gyro,
+	},
+};
+#endif
+
 #ifdef CONFIG_ISL9519_CHARGER
 static struct isl_platform_data isl_data __initdata = {
 	.valid_n_gpio		= 0,	/* Not required when notify-by-pmic */
@@ -2340,6 +2375,14 @@
 		ARRAY_SIZE(isl_charger_i2c_info),
 	},
 #endif /* CONFIG_ISL9519_CHARGER */
+#ifdef CONFIG_INPUT_MPU3050
+	{
+		I2C_FFA | I2C_FLUID,
+		MSM_8930_GSBI12_QUP_I2C_BUS_ID,
+		mpu3050_i2c_boardinfo,
+		ARRAY_SIZE(mpu3050_i2c_boardinfo),
+	},
+#endif
 	{
 		I2C_SURF | I2C_FFA | I2C_FLUID,
 		MSM_8930_GSBI9_QUP_I2C_BUS_ID,
diff --git a/arch/arm/mach-msm/board-8930.h b/arch/arm/mach-msm/board-8930.h
index e564aff..9f6276c 100644
--- a/arch/arm/mach-msm/board-8930.h
+++ b/arch/arm/mach-msm/board-8930.h
@@ -138,5 +138,7 @@
 #define MSM_8930_GSBI4_QUP_I2C_BUS_ID 4
 #define MSM_8930_GSBI9_QUP_I2C_BUS_ID 0
 #define MSM_8930_GSBI10_QUP_I2C_BUS_ID 10
+#define MSM_8930_GSBI12_QUP_I2C_BUS_ID 12
 
 extern struct msm_rtb_platform_data msm8930_rtb_pdata;
+extern struct msm_cache_dump_platform_data msm8930_cache_dump_pdata;
diff --git a/arch/arm/mach-msm/board-8960-regulator.c b/arch/arm/mach-msm/board-8960-regulator.c
index bc5a892..2664d6b 100644
--- a/arch/arm/mach-msm/board-8960-regulator.c
+++ b/arch/arm/mach-msm/board-8960-regulator.c
@@ -116,6 +116,7 @@
 	REGULATOR_SUPPLY("8921_l23",		NULL),
 	REGULATOR_SUPPLY("dsi_vddio",		"mipi_dsi.1"),
 	REGULATOR_SUPPLY("hdmi_avdd",		"hdmi_msm.0"),
+	REGULATOR_SUPPLY("hdmi_pll_fs",		"mdp.0"),
 	REGULATOR_SUPPLY("pll_vdd",		"pil_riva"),
 	REGULATOR_SUPPLY("pll_vdd",		"pil_qdsp6v4.1"),
 	REGULATOR_SUPPLY("pll_vdd",		"pil_qdsp6v4.2"),
@@ -245,7 +246,7 @@
 	REGULATOR_SUPPLY("ext_3p3v",		NULL),
 	REGULATOR_SUPPLY("vdd_ana",		"3-005b"),
 	REGULATOR_SUPPLY("vdd_lvds_3p3v",	"mipi_dsi.1"),
-	REGULATOR_SUPPLY("mhl_ext_3p3v",	"msm_otg"),
+	REGULATOR_SUPPLY("mhl_usb_hs_switch",	"msm_otg"),
 };
 VREG_CONSUMERS(EXT_OTG_SW) = {
 	REGULATOR_SUPPLY("ext_otg_sw",		NULL),
diff --git a/arch/arm/mach-msm/board-8960-storage.c b/arch/arm/mach-msm/board-8960-storage.c
index 4b09f82..67f44aa 100644
--- a/arch/arm/mach-msm/board-8960-storage.c
+++ b/arch/arm/mach-msm/board-8960-storage.c
@@ -295,6 +295,7 @@
 	.pin_data	= &mmc_slot_pin_data[SDCC1],
 	.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC1_DAT1,
 	.msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
+	.uhs_caps2	= MMC_CAP2_HS200_1_8V_SDR,
 };
 #endif
 
@@ -326,12 +327,10 @@
 #endif
 	.vreg_data	= &mmc_slot_vreg_data[SDCC3],
 	.pin_data	= &mmc_slot_pin_data[SDCC3],
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 	.status_gpio	= PM8921_GPIO_PM_TO_SYS(26),
 	.status_irq	= PM8921_GPIO_IRQ(PM8921_IRQ_BASE, 26),
 	.irq_flags	= IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
 	.is_status_gpio_active_low = true,
-#endif
 	.xpc_cap	= 1,
 	.uhs_caps	= (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
 			MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 |
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 628a324..251c1de 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -1291,6 +1291,8 @@
 	.ramdump_delay_ms = 1000,
 	.soft_reset_inverted = 1,
 	.peripheral_platform_device = NULL,
+	.ramdump_timeout_ms = 600000,
+	.no_powerdown_after_ramdumps = 1,
 };
 
 #define MSM_TSIF0_PHYS			(0x18200000)
@@ -1416,6 +1418,7 @@
 
 static struct msm_spi_platform_data msm8960_qup_spi_gsbi1_pdata = {
 	.max_clock_speed = 15060000,
+	.infinite_mode	 = 1
 };
 
 #ifdef CONFIG_USB_MSM_OTG_72K
diff --git a/arch/arm/mach-msm/board-copper-gpiomux.c b/arch/arm/mach-msm/board-8974-gpiomux.c
similarity index 96%
rename from arch/arm/mach-msm/board-copper-gpiomux.c
rename to arch/arm/mach-msm/board-8974-gpiomux.c
index caba698..6fcc779 100644
--- a/arch/arm/mach-msm/board-copper-gpiomux.c
+++ b/arch/arm/mach-msm/board-8974-gpiomux.c
@@ -114,13 +114,13 @@
 	},
 };
 
-void __init msm_copper_init_gpiomux(void)
+void __init msm_8974_init_gpiomux(void)
 {
 	int rc;
 
 	rc = msm_gpiomux_init(NR_GPIO_IRQS);
 	if (rc) {
-		pr_err(KERN_ERR "msmcopper_init_gpiomux failed %d\n", rc);
+		pr_err(KERN_ERR "msm_8974_init_gpiomux failed %d\n", rc);
 		return;
 	}
 
diff --git a/arch/arm/mach-msm/board-copper-regulator.c b/arch/arm/mach-msm/board-8974-regulator.c
similarity index 75%
rename from arch/arm/mach-msm/board-copper-regulator.c
rename to arch/arm/mach-msm/board-8974-regulator.c
index 7543872..1a41f09 100644
--- a/arch/arm/mach-msm/board-copper-regulator.c
+++ b/arch/arm/mach-msm/board-8974-regulator.c
@@ -22,16 +22,16 @@
  *			 regulator name		consumer dev_name
  */
 VREG_CONSUMERS(K0) = {
-	REGULATOR_SUPPLY("krait0",		NULL),
+	REGULATOR_SUPPLY("krait0",		"f9000000.qcom,acpuclk"),
 };
 VREG_CONSUMERS(K1) = {
-	REGULATOR_SUPPLY("krait1",		NULL),
+	REGULATOR_SUPPLY("krait1",		"f9000000.qcom,acpuclk"),
 };
 VREG_CONSUMERS(K2) = {
-	REGULATOR_SUPPLY("krait2",		NULL),
+	REGULATOR_SUPPLY("krait2",		"f9000000.qcom,acpuclk"),
 };
 VREG_CONSUMERS(K3) = {
-	REGULATOR_SUPPLY("krait3",		NULL),
+	REGULATOR_SUPPLY("krait3",		"f9000000.qcom,acpuclk"),
 };
 
 #define PM8X41_VREG_INIT(_id, _name, _min_uV, _max_uV, _modes, _ops, \
@@ -71,25 +71,25 @@
 KRAIT_PWR(K2, "krait2", 0, 850000,  1100000, NULL,     100000, 0);
 KRAIT_PWR(K3, "krait3", 0, 850000,  1100000, NULL,     100000, 0);
 
-#define VREG_DEVICE(_name, _devid)					       \
-		vreg_device_##_name __devinitdata =			       \
-		{							       \
-			.name = STUB_REGULATOR_DRIVER_NAME,		       \
-			.id = _devid,					       \
-			.dev = { .platform_data = &vreg_dev_##_name##_pdata }, \
-		}
+#define VREG_DEVICE(_name, _devid) \
+	static struct platform_device vreg_device_##_name __devinitdata = \
+	{ \
+		.name = STUB_REGULATOR_DRIVER_NAME, \
+		.id = _devid, \
+		.dev = { .platform_data = &vreg_dev_##_name##_pdata }, \
+	}
 
-static struct platform_device VREG_DEVICE(K0, 0);
-static struct platform_device VREG_DEVICE(K1, 1);
-static struct platform_device VREG_DEVICE(K2, 2);
-static struct platform_device VREG_DEVICE(K3, 3);
+VREG_DEVICE(K0, 0);
+VREG_DEVICE(K1, 1);
+VREG_DEVICE(K2, 2);
+VREG_DEVICE(K3, 3);
 
-struct platform_device *msm_copper_stub_regulator_devices[] __devinitdata = {
+struct platform_device *msm_8974_stub_regulator_devices[] __devinitdata = {
 	&vreg_device_K0,
 	&vreg_device_K1,
 	&vreg_device_K2,
 	&vreg_device_K3,
 };
 
-int msm_copper_stub_regulator_devices_len __devinitdata =
-			ARRAY_SIZE(msm_copper_stub_regulator_devices);
+int msm_8974_stub_regulator_devices_len __devinitdata =
+			ARRAY_SIZE(msm_8974_stub_regulator_devices);
diff --git a/arch/arm/mach-msm/board-copper.c b/arch/arm/mach-msm/board-8974.c
similarity index 72%
rename from arch/arm/mach-msm/board-copper.c
rename to arch/arm/mach-msm/board-8974.c
index 85241a4..557331a 100644
--- a/arch/arm/mach-msm/board-copper.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -43,6 +43,7 @@
 #include <mach/rpm-regulator-smd.h>
 #include <mach/qpnp-int.h>
 #include <mach/socinfo.h>
+#include <mach/msm_bus_board.h>
 #include "clock.h"
 #include "devices.h"
 #include "spm.h"
@@ -72,7 +73,7 @@
 early_param("kernel_ebi1_mem_size", kernel_ebi1_mem_size_setup);
 #endif
 
-static struct memtype_reserve msm_copper_reserve_table[] __initdata = {
+static struct memtype_reserve msm_8974_reserve_table[] __initdata = {
 	[MEMTYPE_SMI] = {
 	},
 	[MEMTYPE_EBI0] = {
@@ -83,7 +84,7 @@
 	},
 };
 
-static int msm_copper_paddr_to_memtype(unsigned int paddr)
+static int msm_8974_paddr_to_memtype(unsigned int paddr)
 {
 	return MEMTYPE_EBI1;
 }
@@ -192,14 +193,14 @@
 
 static void __init reserve_ion_memory(void)
 {
-	msm_copper_reserve_table[MEMTYPE_EBI1].size += MSM_ION_MM_SIZE;
-	msm_copper_reserve_table[MEMTYPE_EBI1].size += MSM_ION_MM_FW_SIZE;
-	msm_copper_reserve_table[MEMTYPE_EBI1].size += MSM_ION_SF_SIZE;
-	msm_copper_reserve_table[MEMTYPE_EBI1].size += MSM_ION_MFC_SIZE;
-	msm_copper_reserve_table[MEMTYPE_EBI1].size += MSM_ION_QSECOM_SIZE;
-	msm_copper_reserve_table[MEMTYPE_EBI1].size += MSM_ION_AUDIO_SIZE;
+	msm_8974_reserve_table[MEMTYPE_EBI1].size += MSM_ION_MM_SIZE;
+	msm_8974_reserve_table[MEMTYPE_EBI1].size += MSM_ION_MM_FW_SIZE;
+	msm_8974_reserve_table[MEMTYPE_EBI1].size += MSM_ION_SF_SIZE;
+	msm_8974_reserve_table[MEMTYPE_EBI1].size += MSM_ION_MFC_SIZE;
+	msm_8974_reserve_table[MEMTYPE_EBI1].size += MSM_ION_QSECOM_SIZE;
+	msm_8974_reserve_table[MEMTYPE_EBI1].size += MSM_ION_AUDIO_SIZE;
 #ifdef CONFIG_KERNEL_PMEM_EBI_REGION
-	msm_copper_reserve_table[MEMTYPE_EBI1].size += kernel_ebi1_mem_size;
+	msm_8974_reserve_table[MEMTYPE_EBI1].size += kernel_ebi1_mem_size;
 #endif
 }
 #endif
@@ -268,7 +269,7 @@
 	},
 	{
 		.irq_config_id = SMD_Q6,
-		.subsys_name = "q6",
+		.subsys_name = "adsp",
 		.edge = SMD_APPS_QDSP,
 
 		.smd_int.irq_name = "adsp_smd_in",
@@ -356,7 +357,7 @@
 	.smd_smem_areas = aux_smem_areas,
 };
 
-struct platform_device msm_device_smd_copper = {
+struct platform_device msm_device_smd_8974 = {
 	.name	= "msm_smd",
 	.id	= -1,
 	.resource = smd_resource,
@@ -366,25 +367,25 @@
 	}
 };
 
-static void __init msm_copper_calculate_reserve_sizes(void)
+static void __init msm_8974_calculate_reserve_sizes(void)
 {
 #ifdef CONFIG_ION_MSM
 	reserve_ion_memory();
 #endif
 }
 
-static struct reserve_info msm_copper_reserve_info __initdata = {
-	.memtype_reserve_table = msm_copper_reserve_table,
-	.calculate_reserve_sizes = msm_copper_calculate_reserve_sizes,
-	.paddr_to_memtype = msm_copper_paddr_to_memtype,
+static struct reserve_info msm_8974_reserve_info __initdata = {
+	.memtype_reserve_table = msm_8974_reserve_table,
+	.calculate_reserve_sizes = msm_8974_calculate_reserve_sizes,
+	.paddr_to_memtype = msm_8974_paddr_to_memtype,
 };
 
-static void __init msm_copper_early_memory(void)
+static void __init msm_8974_early_memory(void)
 {
-	reserve_info = &msm_copper_reserve_info;
+	reserve_info = &msm_8974_reserve_info;
 }
 
-void __init msm_copper_reserve(void)
+void __init msm_8974_reserve(void)
 {
 	msm_reserve();
 }
@@ -394,33 +395,151 @@
 	.id	= -1,
 };
 
-#define SHARED_IMEM_TZ_BASE 0xFE805720
-static struct resource copper_tzlog_resources[] = {
+#define BIMC_BASE	0xfc380000
+#define BIMC_SIZE	0x0006A000
+#define SYS_NOC_BASE	0xfc460000
+#define PERIPH_NOC_BASE 0xFC468000
+#define OCMEM_NOC_BASE	0xfc470000
+#define	MMSS_NOC_BASE	0xfc478000
+#define CONFIG_NOC_BASE	0xfc480000
+#define NOC_SIZE	0x00004000
+
+static struct resource bimc_res[] = {
 	{
-		.start = SHARED_IMEM_TZ_BASE,
-		.end = SHARED_IMEM_TZ_BASE + SZ_4K - 1,
+		.start = BIMC_BASE,
+		.end = BIMC_BASE + BIMC_SIZE,
 		.flags = IORESOURCE_MEM,
+		.name = "bimc_mem",
 	},
 };
 
-struct platform_device copper_device_tz_log = {
-	.name		= "tz_log",
-	.id		= 0,
-	.num_resources	= ARRAY_SIZE(copper_tzlog_resources),
-	.resource	= copper_tzlog_resources,
+static struct resource ocmem_noc_res[] = {
+	{
+		.start = OCMEM_NOC_BASE,
+		.end = OCMEM_NOC_BASE + NOC_SIZE,
+		.flags = IORESOURCE_MEM,
+		.name = "ocmem_noc_mem",
+	},
 };
 
+static struct resource mmss_noc_res[] = {
+	{
+		.start = MMSS_NOC_BASE,
+		.end = MMSS_NOC_BASE + NOC_SIZE,
+		.flags = IORESOURCE_MEM,
+		.name = "mmss_noc_mem",
+	},
+};
 
-void __init msm_copper_add_devices(void)
+static struct resource sys_noc_res[] = {
+	{
+		.start = SYS_NOC_BASE,
+		.end = SYS_NOC_BASE + NOC_SIZE,
+		.flags = IORESOURCE_MEM,
+		.name = "sys_noc_mem",
+	},
+};
+
+static struct resource config_noc_res[] = {
+	{
+		.start = CONFIG_NOC_BASE,
+		.end = CONFIG_NOC_BASE + NOC_SIZE,
+		.flags = IORESOURCE_MEM,
+		.name = "config_noc_mem",
+	},
+};
+
+static struct resource periph_noc_res[] = {
+	{
+		.start = PERIPH_NOC_BASE,
+		.end = PERIPH_NOC_BASE + NOC_SIZE,
+		.flags = IORESOURCE_MEM,
+		.name = "periph_noc_mem",
+	},
+};
+
+static struct platform_device msm_bus_sys_noc = {
+	.name  = "msm_bus_fabric",
+	.id    =  MSM_BUS_FAB_SYS_NOC,
+	.num_resources = ARRAY_SIZE(sys_noc_res),
+	.resource = sys_noc_res,
+};
+
+static struct platform_device msm_bus_bimc = {
+	.name  = "msm_bus_fabric",
+	.id    = MSM_BUS_FAB_BIMC,
+	.num_resources = ARRAY_SIZE(bimc_res),
+	.resource = bimc_res,
+};
+
+static struct platform_device msm_bus_mmss_noc = {
+	.name  = "msm_bus_fabric",
+	.id    = MSM_BUS_FAB_MMSS_NOC,
+	.num_resources = ARRAY_SIZE(mmss_noc_res),
+	.resource = mmss_noc_res,
+};
+
+static struct platform_device msm_bus_ocmem_noc = {
+	.name  = "msm_bus_fabric",
+	.id    = MSM_BUS_FAB_OCMEM_NOC,
+	.num_resources = ARRAY_SIZE(ocmem_noc_res),
+	.resource = ocmem_noc_res,
+};
+
+static struct platform_device msm_bus_periph_noc = {
+	.name  = "msm_bus_fabric",
+	.id    = MSM_BUS_FAB_PERIPH_NOC,
+	.num_resources = ARRAY_SIZE(periph_noc_res),
+	.resource = periph_noc_res,
+};
+
+static struct platform_device msm_bus_config_noc = {
+	.name  = "msm_bus_fabric",
+	.id    = MSM_BUS_FAB_CONFIG_NOC,
+	.num_resources = ARRAY_SIZE(config_noc_res),
+	.resource = config_noc_res,
+};
+
+static struct platform_device msm_bus_ocmem_vnoc = {
+	.name  = "msm_bus_fabric",
+	.id    = MSM_BUS_FAB_OCMEM_VNOC,
+};
+
+static struct platform_device *msm_bus_8974_devices[] = {
+	&msm_bus_sys_noc,
+	&msm_bus_bimc,
+	&msm_bus_mmss_noc,
+	&msm_bus_ocmem_noc,
+	&msm_bus_periph_noc,
+	&msm_bus_config_noc,
+	&msm_bus_ocmem_vnoc,
+};
+
+static void __init msm8974_init_buses(void)
+{
+#ifdef CONFIG_MSM_BUS_SCALING
+	msm_bus_sys_noc.dev.platform_data =
+		&msm_bus_8974_sys_noc_pdata;
+	msm_bus_bimc.dev.platform_data = &msm_bus_8974_bimc_pdata;
+	msm_bus_mmss_noc.dev.platform_data = &msm_bus_8974_mmss_noc_pdata;
+	msm_bus_ocmem_noc.dev.platform_data = &msm_bus_8974_ocmem_noc_pdata;
+	msm_bus_periph_noc.dev.platform_data = &msm_bus_8974_periph_noc_pdata;
+	msm_bus_config_noc.dev.platform_data = &msm_bus_8974_config_noc_pdata;
+	msm_bus_ocmem_vnoc.dev.platform_data = &msm_bus_8974_ocmem_vnoc_pdata;
+#endif
+	platform_add_devices(msm_bus_8974_devices,
+				ARRAY_SIZE(msm_bus_8974_devices));
+};
+
+void __init msm_8974_add_devices(void)
 {
 #ifdef CONFIG_ION_MSM
 	platform_device_register(&ion_dev);
 #endif
-	platform_device_register(&msm_device_smd_copper);
+	platform_device_register(&msm_device_smd_8974);
 	platform_device_register(&android_usb_device);
-	platform_add_devices(msm_copper_stub_regulator_devices,
-					msm_copper_stub_regulator_devices_len);
-	platform_device_register(&copper_device_tz_log);
+	platform_add_devices(msm_8974_stub_regulator_devices,
+					msm_8974_stub_regulator_devices_len);
 }
 
 static struct clk_lookup msm_clocks_dummy[] = {
@@ -463,7 +582,7 @@
  * into this category, and thus the driver should not be added here. The
  * EPROBE_DEFER can satisfy most dependency problems.
  */
-void __init msm_copper_add_drivers(void)
+void __init msm_8974_add_drivers(void)
 {
 	msm_init_modem_notifier_list();
 	msm_smd_init();
@@ -472,10 +591,11 @@
 	rpm_regulator_smd_driver_init();
 	msm_spm_device_init();
 	regulator_stub_init();
-	if (machine_is_copper_rumi())
+	if (machine_is_msm8974_rumi())
 		msm_clock_init(&msm_dummy_clock_init_data);
 	else
-		msm_clock_init(&msmcopper_clock_init_data);
+		msm_clock_init(&msm8974_clock_init_data);
+	msm8974_init_buses();
 }
 
 static struct of_device_id irq_match[] __initdata  = {
@@ -485,12 +605,12 @@
 	{}
 };
 
-void __init msm_copper_init_irq(void)
+void __init msm_8974_init_irq(void)
 {
 	of_irq_init(irq_match);
 }
 
-static struct of_dev_auxdata msm_copper_auxdata_lookup[] __initdata = {
+static struct of_dev_auxdata msm_8974_auxdata_lookup[] __initdata = {
 	OF_DEV_AUXDATA("qcom,msm-lsuart-v14", 0xF991F000, \
 			"msm_serial_hsl.0", NULL),
 	OF_DEV_AUXDATA("qcom,hsusb-otg", 0xF9A55000, \
@@ -523,16 +643,16 @@
 	{}
 };
 
-void __init msm_copper_init(struct of_dev_auxdata **adata)
+void __init msm_8974_init(struct of_dev_auxdata **adata)
 {
-	msm_copper_init_gpiomux();
+	msm_8974_init_gpiomux();
 
-	*adata = msm_copper_auxdata_lookup;
+	*adata = msm_8974_auxdata_lookup;
 
 	regulator_has_full_constraints();
 }
 
-void __init msm_copper_very_early(void)
+void __init msm_8974_very_early(void)
 {
-	msm_copper_early_memory();
+	msm_8974_early_memory();
 }
diff --git a/arch/arm/mach-msm/board-9615-storage.c b/arch/arm/mach-msm/board-9615-storage.c
index 2025bd0..6cb34f8 100644
--- a/arch/arm/mach-msm/board-9615-storage.c
+++ b/arch/arm/mach-msm/board-9615-storage.c
@@ -178,11 +178,9 @@
 	.sup_clk_cnt	= ARRAY_SIZE(sdc1_sup_clk_rates),
 	.vreg_data	= &mmc_slot_vreg_data[SDCC1],
 	.pin_data	= &mmc_slot_pin_data[SDCC1],
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 	.status_gpio	= GPIO_SDC1_HW_DET,
 	.status_irq	= MSM_GPIO_TO_INT(GPIO_SDC1_HW_DET),
 	.irq_flags	= IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-#endif
 	.xpc_cap	= 1,
 	.uhs_caps	= (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
 			   MMC_CAP_MAX_CURRENT_400),
diff --git a/arch/arm/mach-msm/board-9615.c b/arch/arm/mach-msm/board-9615.c
index 568de46..a312e2b 100644
--- a/arch/arm/mach-msm/board-9615.c
+++ b/arch/arm/mach-msm/board-9615.c
@@ -52,6 +52,7 @@
 #include "pm.h"
 #include "pm-boot.h"
 #include <mach/gpiomux.h>
+#include "ci13xxx_udc.h"
 
 #ifdef CONFIG_ION_MSM
 #define MSM_ION_AUDIO_SIZE	0xAF000
@@ -194,7 +195,8 @@
 
 /* Initial PM8018 GPIO configurations */
 static struct pm8xxx_gpio_init pm8018_gpios[] __initdata = {
-	PM8018_GPIO_OUTPUT(2,	0,	HIGH) /* EXT_LDO_EN_WLAN */
+	PM8018_GPIO_OUTPUT(2,	0,	HIGH), /* EXT_LDO_EN_WLAN */
+	PM8018_GPIO_OUTPUT(6,	0,	LOW), /* WLAN_CLK_PWR_REQ */
 };
 
 /* Initial PM8018 MPP configurations */
@@ -761,10 +763,21 @@
 	.core_clk_always_on_workaround = true,
 };
 
-static struct msm_hsic_peripheral_platform_data msm_hsic_peripheral_pdata = {
+
+static struct ci13xxx_platform_data msm_peripheral_pdata = {
+	.usb_core_id = 0,
+};
+
+static struct msm_hsic_peripheral_platform_data
+			msm_hsic_peripheral_pdata_private = {
 	.core_clk_always_on_workaround = true,
 };
 
+static struct ci13xxx_platform_data msm_hsic_peripheral_pdata = {
+	.usb_core_id = 1,
+	.prv_data = &msm_hsic_peripheral_pdata_private,
+};
+
 #define PID_MAGIC_ID		0x71432909
 #define SERIAL_NUM_MAGIC_ID	0x61945374
 #define SERIAL_NUMBER_LENGTH	127
@@ -863,6 +876,7 @@
 	&msm_device_hsic_host,
 	&msm_device_usb_bam,
 	&msm_android_usb_device,
+	&msm_android_usb_hsic_device,
 	&msm9615_device_uart_gsbi4,
 	&msm9615_device_ext_2p95v_vreg,
 	&msm9615_device_ssbi_pmic1,
@@ -958,6 +972,8 @@
 {
 	struct android_usb_platform_data *android_pdata =
 				msm_android_usb_device.dev.platform_data;
+	struct android_usb_platform_data *android_hsic_pdata =
+				msm_android_usb_hsic_device.dev.platform_data;
 
 	msm9615_device_init();
 	msm9615_init_gpiomux();
@@ -975,6 +991,8 @@
 
 	msm_device_otg.dev.platform_data = &msm_otg_pdata;
 	msm_otg_pdata.phy_init_seq = shelby_phy_init_seq;
+	msm_device_gadget_peripheral.dev.platform_data =
+		&msm_peripheral_pdata;
 	msm_device_hsic_peripheral.dev.platform_data =
 		&msm_hsic_peripheral_pdata;
 	msm_device_usb_bam.dev.platform_data = &msm_usb_bam_pdata;
@@ -987,8 +1005,12 @@
 	msm9615_init_mmc();
 	slim_register_board_info(msm_slim_devices,
 		ARRAY_SIZE(msm_slim_devices));
+
 	android_pdata->update_pid_and_serial_num =
 					usb_diag_update_pid_and_serial_num;
+	android_hsic_pdata->update_pid_and_serial_num =
+					usb_diag_update_pid_and_serial_num;
+
 	msm_pm_boot_pdata.p_addr = allocate_contiguous_ebi_nomap(SZ_8, SZ_64K);
 	BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
 	msm_tsens_early_init(&msm_tsens_pdata);
diff --git a/arch/arm/mach-msm/board-dt.c b/arch/arm/mach-msm/board-dt.c
index 674df09..b2a4ce2 100644
--- a/arch/arm/mach-msm/board-dt.c
+++ b/arch/arm/mach-msm/board-dt.c
@@ -35,14 +35,14 @@
 
 static void __init msm_dt_init_irq(void)
 {
-	if (machine_is_copper())
-		msm_copper_init_irq();
+	if (machine_is_msm8974())
+		msm_8974_init_irq();
 }
 
 static void __init msm_dt_map_io(void)
 {
-	if (early_machine_is_copper())
-		msm_map_copper_io();
+	if (early_machine_is_msm8974())
+		msm_map_8974_io();
 	if (socinfo_init() < 0)
 		pr_err("%s: socinfo_init() failed\n", __func__);
 }
@@ -51,31 +51,31 @@
 {
 	struct of_dev_auxdata *adata = NULL;
 
-	if (machine_is_copper())
-		msm_copper_init(&adata);
+	if (machine_is_msm8974())
+		msm_8974_init(&adata);
 
 	of_platform_populate(NULL, of_default_bus_match_table, adata, NULL);
-	if (machine_is_copper()) {
-		msm_copper_add_devices();
-		msm_copper_add_drivers();
+	if (machine_is_msm8974()) {
+		msm_8974_add_devices();
+		msm_8974_add_drivers();
 	}
 }
 
 static const char *msm_dt_match[] __initconst = {
-	"qcom,msmcopper",
+	"qcom,msm8974",
 	NULL
 };
 
 static void __init msm_dt_reserve(void)
 {
-	if (early_machine_is_copper())
-		msm_copper_reserve();
+	if (early_machine_is_msm8974())
+		msm_8974_reserve();
 }
 
 static void __init msm_dt_init_very_early(void)
 {
-	if (early_machine_is_copper())
-		msm_copper_very_early();
+	if (early_machine_is_msm8974())
+		msm_8974_very_early();
 }
 
 DT_MACHINE_START(MSM_DT, "Qualcomm MSM (Flattened Device Tree)")
@@ -85,7 +85,6 @@
 	.handle_irq = gic_handle_irq,
 	.timer = &msm_dt_timer,
 	.dt_compat = msm_dt_match,
-	.nr_irqs = -1,
 	.reserve = msm_dt_reserve,
 	.init_very_early = msm_dt_init_very_early,
 MACHINE_END
diff --git a/arch/arm/mach-msm/board-fsm9xxx.c b/arch/arm/mach-msm/board-fsm9xxx.c
index b071353..0e2aa3b 100644
--- a/arch/arm/mach-msm/board-fsm9xxx.c
+++ b/arch/arm/mach-msm/board-fsm9xxx.c
@@ -89,6 +89,15 @@
 #define GPIO_USER_FIRST		58
 #define GPIO_USER_LAST		63
 
+#define GPIO_UIM_RESET		75
+#define GPIO_UIM_DATA_IO	76
+#define GPIO_UIM_CLOCK		77
+
+#define GPIO_PM_UIM_M_RST	26	/* UIM_RST input */
+#define GPIO_PM_UIM_RST		27	/* UIM_RST output */
+#define GPIO_PM_UIM_M_CLK	28	/* UIM_CLK input */
+#define GPIO_PM_UIM_CLK		29	/* UIM_CLK output */
+
 #define FPGA_SDCC_STATUS        0x8E0001A8
 
 /* Macros assume PMIC GPIOs start at 0 */
@@ -100,6 +109,8 @@
 
 #define PMIC_GPIO_5V_PA_PWR	21	/* PMIC GPIO Number 22 */
 #define PMIC_GPIO_4_2V_PA_PWR	22	/* PMIC GPIO Number 23 */
+#define PMIC_MPP_UIM_M_DATA	0	/* UIM_DATA input */
+#define PMIC_MPP_UIM_DATA	1	/* UIM_DATA output */
 #define PMIC_MPP_3		2	/* PMIC MPP Number 3 */
 #define PMIC_MPP_6		5	/* PMIC MPP Number 6 */
 #define PMIC_MPP_7		6	/* PMIC MPP Number 7 */
@@ -181,6 +192,10 @@
 			PM8XXX_MPP_AOUT_LVL_1V25_2, AOUT_CTRL_ENABLE),
 		PM8XXX_MPP_INIT(PMIC_MPP_6, A_OUTPUT,
 			PM8XXX_MPP_AOUT_LVL_1V25_2, AOUT_CTRL_ENABLE),
+		PM8XXX_MPP_INIT(PMIC_MPP_UIM_M_DATA, D_BI_DIR,
+			PM8058_MPP_DIG_LEVEL_L3, BI_PULLUP_30KOHM),
+		PM8XXX_MPP_INIT(PMIC_MPP_UIM_DATA, D_BI_DIR,
+			PM8058_MPP_DIG_LEVEL_L3, BI_PULLUP_30KOHM),
 	};
 
 	for (i = 0; i < ARRAY_SIZE(pm8058_mpps); i++) {
@@ -597,6 +612,52 @@
 }
 #endif
 
+static struct msm_gpio uart3_uim_config_data[] = {
+	{ GPIO_CFG(GPIO_UIM_RESET, 0, GPIO_CFG_OUTPUT,
+		GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "UIM_Reset" },
+	{ GPIO_CFG(GPIO_UIM_DATA_IO, 2, GPIO_CFG_OUTPUT,
+		GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "UIM_Data" },
+	{ GPIO_CFG(GPIO_UIM_CLOCK, 2, GPIO_CFG_OUTPUT,
+		GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "UIM_Clock" },
+};
+
+static void fsm9xxx_init_uart3_uim(void)
+{
+	struct pm_gpio pmic_uim_gpio_in = {
+		.direction	= PM_GPIO_DIR_IN,
+		.pull		= PM_GPIO_PULL_NO,
+		.out_strength	= PM_GPIO_STRENGTH_HIGH,
+		.function	= PM_GPIO_FUNC_PAIRED,
+		.vin_sel	= PM8058_GPIO_VIN_L3,
+	};
+	struct pm_gpio pmic_uim_gpio_out = {
+		.direction	= PM_GPIO_DIR_OUT,
+		.pull		= PM_GPIO_PULL_NO,
+		.out_strength	= PM_GPIO_STRENGTH_HIGH,
+		.function	= PM_GPIO_FUNC_PAIRED,
+		.vin_sel	= PM8058_GPIO_VIN_L3,
+	};
+
+	/* TLMM */
+	msm_gpios_request_enable(uart3_uim_config_data,
+			ARRAY_SIZE(uart3_uim_config_data));
+
+	/* Put UIM to reset state */
+	gpio_direction_output(GPIO_UIM_RESET, 0);
+	gpio_set_value(GPIO_UIM_RESET, 0);
+	gpio_export(GPIO_UIM_RESET, false);
+
+	/* PMIC */
+	pm8xxx_gpio_config(PM8058_GPIO_PM_TO_SYS(GPIO_PM_UIM_M_RST),
+		&pmic_uim_gpio_in);
+	pm8xxx_gpio_config(PM8058_GPIO_PM_TO_SYS(GPIO_PM_UIM_RST),
+		&pmic_uim_gpio_out);
+	pm8xxx_gpio_config(PM8058_GPIO_PM_TO_SYS(GPIO_PM_UIM_M_CLK),
+		&pmic_uim_gpio_in);
+	pm8xxx_gpio_config(PM8058_GPIO_PM_TO_SYS(GPIO_PM_UIM_CLK),
+		&pmic_uim_gpio_out);
+}
+
 /*
  * SSBI
  */
@@ -833,6 +894,7 @@
 #if defined(CONFIG_SERIAL_MSM) || defined(CONFIG_MSM_SERIAL_DEBUGGER)
 	&msm_device_uart1,
 #endif
+	&msm_device_uart3,
 #if defined(CONFIG_QFP_FUSE)
 	&fsm_qfp_fuse_device,
 #endif
@@ -903,6 +965,7 @@
 #ifdef CONFIG_SERIAL_MSM_CONSOLE
 	fsm9xxx_init_uart1();
 #endif
+	fsm9xxx_init_uart3_uim();
 #ifdef CONFIG_I2C_SSBI
 	msm_device_ssbi2.dev.platform_data = &msm_i2c_ssbi2_pdata;
 	msm_device_ssbi3.dev.platform_data = &msm_i2c_ssbi3_pdata;
diff --git a/arch/arm/mach-msm/board-msm7627a-display.c b/arch/arm/mach-msm/board-msm7627a-display.c
index 3da68ad..9259161 100644
--- a/arch/arm/mach-msm/board-msm7627a-display.c
+++ b/arch/arm/mach-msm/board-msm7627a-display.c
@@ -1155,14 +1155,14 @@
 		if (rc < 0)
 			return rc;
 
-		gpio_reg_2p85v = regulator_get(&msm8625_mipi_dsi_device.dev,
+		gpio_reg_2p85v = regulator_get(&mipi_dsi_device.dev,
 								"lcd_vdd");
 		if (IS_ERR(gpio_reg_2p85v)) {
 			pr_err("%s:ext_2p85v regulator get failed", __func__);
 			return -EINVAL;
 		}
 
-		gpio_reg_1p8v = regulator_get(&msm8625_mipi_dsi_device.dev,
+		gpio_reg_1p8v = regulator_get(&mipi_dsi_device.dev,
 								"lcd_vddi");
 		if (IS_ERR(gpio_reg_1p8v)) {
 			pr_err("%s:ext_1p8v regulator get failed", __func__);
diff --git a/arch/arm/mach-msm/board-msm7627a-storage.c b/arch/arm/mach-msm/board-msm7627a-storage.c
index e2184f4..49ff393 100644
--- a/arch/arm/mach-msm/board-msm7627a-storage.c
+++ b/arch/arm/mach-msm/board-msm7627a-storage.c
@@ -233,8 +233,7 @@
 	return rc;
 }
 
-#if defined(CONFIG_MMC_MSM_SDC1_SUPPORT) \
-	&& defined(CONFIG_MMC_MSM_CARD_HW_DETECTION)
+#ifdef CONFIG_MMC_MSM_SDC1_SUPPORT
 static unsigned int msm7627a_sdcc_slot_status(struct device *dev)
 {
 	int status;
@@ -266,9 +265,7 @@
 	}
 	return status;
 }
-#endif
 
-#ifdef CONFIG_MMC_MSM_SDC1_SUPPORT
 static struct mmc_platform_data sdc1_plat_data = {
 	.ocr_mask       = MMC_VDD_28_29,
 	.translate_vdd  = msm_sdcc_setup_power,
@@ -276,10 +273,8 @@
 	.msmsdcc_fmin   = 144000,
 	.msmsdcc_fmid   = 24576000,
 	.msmsdcc_fmax   = 49152000,
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 	.status      = msm7627a_sdcc_slot_status,
 	.irq_flags   = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-#endif
 };
 #endif
 
@@ -382,13 +377,11 @@
 	gpio_sdc1_config();
 	if (mmc_regulator_init(1, "mmc", 2850000))
 		return;
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 	/* 8x25 EVT do not use hw detector */
 	if (!(machine_is_msm8625_evt()))
 		sdc1_plat_data.status_irq = MSM_GPIO_TO_INT(gpio_sdc1_hw_det);
 	if (machine_is_msm8625_evt())
 		sdc1_plat_data.status = NULL;
-#endif
 
 	msm_add_sdcc(1, &sdc1_plat_data);
 #endif
diff --git a/arch/arm/mach-msm/board-msm7x27a.c b/arch/arm/mach-msm/board-msm7x27a.c
index dc473e6..7db4bda 100644
--- a/arch/arm/mach-msm/board-msm7x27a.c
+++ b/arch/arm/mach-msm/board-msm7x27a.c
@@ -50,6 +50,7 @@
 #include <linux/atmel_maxtouch.h>
 #include <linux/fmem.h>
 #include <linux/msm_adc.h>
+#include <linux/ion.h>
 #include "devices.h"
 #include "timer.h"
 #include "board-msm7x27a-regulator.h"
@@ -167,6 +168,15 @@
 
 #endif
 
+#ifdef CONFIG_ION_MSM
+#define MSM_ION_HEAP_NUM        4
+static struct platform_device ion_dev;
+static int msm_ion_camera_size;
+static int msm_ion_audio_size;
+static int msm_ion_sf_size;
+#endif
+
+
 static struct android_usb_platform_data android_usb_pdata = {
 	.update_pid_and_serial_num = usb_diag_update_pid_and_serial_num,
 };
@@ -822,6 +832,9 @@
 	&asoc_msm_dai1,
 	&msm_batt_device,
 	&msm_adc_device,
+#ifdef CONFIG_ION_MSM
+	&ion_dev,
+#endif
 };
 
 static struct platform_device *msm8625_surf_devices[] __initdata = {
@@ -853,6 +866,81 @@
 }
 early_param("pmem_audio_size", pmem_audio_size_setup);
 
+static void fix_sizes(void)
+{
+	if (machine_is_msm7625a_surf() || machine_is_msm7625a_ffa()) {
+		pmem_mdp_size = MSM7x25A_MSM_PMEM_MDP_SIZE;
+		pmem_adsp_size = MSM7x25A_MSM_PMEM_ADSP_SIZE;
+	} else {
+		pmem_mdp_size = MSM_PMEM_MDP_SIZE;
+		pmem_adsp_size = MSM_PMEM_ADSP_SIZE;
+	}
+#ifdef CONFIG_ION_MSM
+	msm_ion_camera_size = pmem_adsp_size;
+	msm_ion_audio_size = (MSM_PMEM_AUDIO_SIZE + PMEM_KERNEL_EBI1_SIZE);
+	msm_ion_sf_size = pmem_mdp_size;
+#endif
+}
+
+#ifdef CONFIG_ION_MSM
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+static struct ion_co_heap_pdata co_ion_pdata = {
+	.adjacent_mem_id = INVALID_HEAP_ID,
+	.align = PAGE_SIZE,
+};
+#endif
+
+/**
+ * These heaps are listed in the order they will be allocated.
+ * Don't swap the order unless you know what you are doing!
+ */
+static struct ion_platform_data ion_pdata = {
+	.nr = MSM_ION_HEAP_NUM,
+	.heaps = {
+		{
+			.id	= ION_SYSTEM_HEAP_ID,
+			.type	= ION_HEAP_TYPE_SYSTEM,
+			.name	= ION_VMALLOC_HEAP_NAME,
+		},
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+		/* PMEM_ADSP = CAMERA */
+		{
+			.id	= ION_CAMERA_HEAP_ID,
+			.type	= ION_HEAP_TYPE_CARVEOUT,
+			.name	= ION_CAMERA_HEAP_NAME,
+			.memory_type = ION_EBI_TYPE,
+			.has_outer_cache = 1,
+			.extra_data = (void *)&co_ion_pdata,
+		},
+		/* PMEM_AUDIO */
+		{
+			.id	= ION_AUDIO_HEAP_ID,
+			.type	= ION_HEAP_TYPE_CARVEOUT,
+			.name	= ION_AUDIO_HEAP_NAME,
+			.memory_type = ION_EBI_TYPE,
+			.has_outer_cache = 1,
+			.extra_data = (void *)&co_ion_pdata,
+		},
+		/* PMEM_MDP = SF */
+		{
+			.id	= ION_SF_HEAP_ID,
+			.type	= ION_HEAP_TYPE_CARVEOUT,
+			.name	= ION_SF_HEAP_NAME,
+			.memory_type = ION_EBI_TYPE,
+			.has_outer_cache = 1,
+			.extra_data = (void *)&co_ion_pdata,
+		},
+#endif
+	}
+};
+
+static struct platform_device ion_dev = {
+	.name = "ion-msm",
+	.id = 1,
+	.dev = { .platform_data = &ion_pdata },
+};
+#endif
+
 static struct memtype_reserve msm7x27a_reserve_table[] __initdata = {
 	[MEMTYPE_SMI] = {
 	},
@@ -865,27 +953,22 @@
 };
 
 #ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
 static struct android_pmem_platform_data *pmem_pdata_array[] __initdata = {
 		&android_pmem_adsp_pdata,
 		&android_pmem_audio_pdata,
 		&android_pmem_pdata,
 };
 #endif
+#endif
 
 static void __init size_pmem_devices(void)
 {
 #ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
 	unsigned int i;
 	unsigned int reusable_count = 0;
 
-	if (machine_is_msm7625a_surf() || machine_is_msm7625a_ffa()) {
-		pmem_mdp_size = MSM7x25A_MSM_PMEM_MDP_SIZE;
-		pmem_adsp_size = MSM7x25A_MSM_PMEM_ADSP_SIZE;
-	} else {
-		pmem_mdp_size = MSM_PMEM_MDP_SIZE;
-		pmem_adsp_size = MSM_PMEM_ADSP_SIZE;
-	}
-
 	android_pmem_adsp_pdata.size = pmem_adsp_size;
 	android_pmem_pdata.size = pmem_mdp_size;
 	android_pmem_audio_pdata.size = pmem_audio_size;
@@ -910,29 +993,56 @@
 		}
 	}
 #endif
-
+#endif
 }
 
+#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
 static void __init reserve_memory_for(struct android_pmem_platform_data *p)
 {
 	msm7x27a_reserve_table[p->memory_type].size += p->size;
 }
+#endif
+#endif
 
 static void __init reserve_pmem_memory(void)
 {
 #ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
 	unsigned int i;
 	for (i = 0; i < ARRAY_SIZE(pmem_pdata_array); ++i)
 		reserve_memory_for(pmem_pdata_array[i]);
 
 	msm7x27a_reserve_table[MEMTYPE_EBI1].size += pmem_kernel_ebi1_size;
 #endif
+#endif
+}
+
+static void __init size_ion_devices(void)
+{
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+	ion_pdata.heaps[1].size = msm_ion_camera_size;
+	ion_pdata.heaps[2].size = msm_ion_audio_size;
+	ion_pdata.heaps[3].size = msm_ion_sf_size;
+#endif
+}
+
+static void __init reserve_ion_memory(void)
+{
+#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
+	msm7x27a_reserve_table[MEMTYPE_EBI1].size += msm_ion_camera_size;
+	msm7x27a_reserve_table[MEMTYPE_EBI1].size += msm_ion_audio_size;
+	msm7x27a_reserve_table[MEMTYPE_EBI1].size += msm_ion_sf_size;
+#endif
 }
 
 static void __init msm7x27a_calculate_reserve_sizes(void)
 {
+	fix_sizes();
 	size_pmem_devices();
 	reserve_pmem_memory();
+	size_ion_devices();
+	reserve_ion_memory();
 }
 
 static int msm7x27a_paddr_to_memtype(unsigned int paddr)
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
index 2834f24..bb94474 100644
--- a/arch/arm/mach-msm/board-msm7x30.c
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -255,7 +255,6 @@
 		}
 	};
 
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 	struct pm8xxx_gpio_init_info sdcc_det = {
 		PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_SD_DET - 1),
 		{
@@ -275,7 +274,6 @@
 		pr_err("%s PMIC_GPIO_SD_DET config failed\n", __func__);
 		return rc;
 	}
-#endif
 
 	if (machine_is_msm8x55_svlte_surf() || machine_is_msm8x55_svlte_ffa() ||
 						machine_is_msm7x30_fluid())
@@ -6206,14 +6204,12 @@
 #endif
 
 #ifdef CONFIG_MMC_MSM_SDC4_SUPPORT
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 static unsigned int msm7x30_sdcc_slot_status(struct device *dev)
 {
 	return (unsigned int)
 		gpio_get_value_cansleep(
 			PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_SD_DET - 1));
 }
-#endif
 
 static int msm_sdcc_get_wpswitch(struct device *dv)
 {
@@ -6302,11 +6298,9 @@
 	.ocr_mask	= MMC_VDD_27_28 | MMC_VDD_28_29,
 	.translate_vdd	= msm_sdcc_setup_power,
 	.mmc_bus_width  = MMC_CAP_4_BIT_DATA,
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 	.status      = msm7x30_sdcc_slot_status,
 	.status_irq  = PM8058_GPIO_IRQ(PMIC8058_IRQ_BASE, PMIC_GPIO_SD_DET - 1),
 	.irq_flags   = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-#endif
 	.wpswitch    = msm_sdcc_get_wpswitch,
 	.msmsdcc_fmin	= 144000,
 	.msmsdcc_fmid	= 24576000,
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 098ad6e..7fd07af 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -5133,6 +5133,81 @@
 	&asoc_msm_dai1,
 };
 
+/* qseecom bus scaling */
+static struct msm_bus_vectors qseecom_clks_init_vectors[] = {
+	{
+		.src = MSM_BUS_MASTER_SPS,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ib = 0,
+		.ab = 0,
+	},
+	{
+		.src = MSM_BUS_MASTER_SPDM,
+		.dst = MSM_BUS_SLAVE_SPDM,
+		.ib = 0,
+		.ab = 0,
+	},
+};
+
+static struct msm_bus_vectors qseecom_enable_dfab_vectors[] = {
+	{
+		.src = MSM_BUS_MASTER_SPS,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ib = (492 * 8) * 1000000UL,
+		.ab = (492 * 8) *  100000UL,
+	},
+	{
+		.src = MSM_BUS_MASTER_SPDM,
+		.dst = MSM_BUS_SLAVE_SPDM,
+		.ib = 0,
+		.ab = 0,
+	},
+};
+
+static struct msm_bus_vectors qseecom_enable_sfpb_vectors[] = {
+	{
+		.src = MSM_BUS_MASTER_SPS,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ib = 0,
+		.ab = 0,
+	},
+	{
+		.src = MSM_BUS_MASTER_SPDM,
+		.dst = MSM_BUS_SLAVE_SPDM,
+		.ib = (64 * 8) * 1000000UL,
+		.ab = (64 * 8) *  100000UL,
+	},
+};
+
+static struct msm_bus_paths qseecom_hw_bus_scale_usecases[] = {
+	{
+		ARRAY_SIZE(qseecom_clks_init_vectors),
+		qseecom_clks_init_vectors,
+	},
+	{
+		ARRAY_SIZE(qseecom_enable_dfab_vectors),
+		qseecom_enable_sfpb_vectors,
+	},
+	{
+		ARRAY_SIZE(qseecom_enable_sfpb_vectors),
+		qseecom_enable_sfpb_vectors,
+	},
+};
+
+static struct msm_bus_scale_pdata qseecom_bus_pdata = {
+	.usecase = qseecom_hw_bus_scale_usecases,
+	.num_usecases = ARRAY_SIZE(qseecom_hw_bus_scale_usecases),
+	.name = "qsee",
+};
+
+static struct platform_device qseecom_device = {
+	.name		= "qseecom",
+	.id		= -1,
+	.dev		= {
+		.platform_data = &qseecom_bus_pdata,
+	},
+};
+
 static struct platform_device *surf_devices[] __initdata = {
 	&msm8x60_device_acpuclk,
 	&msm_device_smd,
@@ -5141,6 +5216,8 @@
 	&msm_pil_modem,
 	&msm_pil_tzapps,
 	&msm_pil_dsps,
+	&msm_pil_vidc,
+	&qseecom_device,
 #ifdef CONFIG_I2C_QUP
 	&msm_gsbi3_qup_i2c_device,
 	&msm_gsbi4_qup_i2c_device,
@@ -5291,6 +5368,7 @@
 	&msm8660_device_watchdog,
 	&msm_device_tz_log,
 	&msm_rtb_device,
+	&msm8660_iommu_domain_device,
 };
 
 #ifdef CONFIG_ION_MSM
@@ -5320,7 +5398,7 @@
 	.align = PAGE_SIZE,
 };
 
-static struct ion_co_heap_pdata hole_co_ion_pdata = {
+static struct ion_co_heap_pdata mm_fw_co_ion_pdata = {
 	.adjacent_mem_id = ION_CP_MM_HEAP_ID,
 };
 
@@ -5363,10 +5441,10 @@
 			.id	= ION_MM_FIRMWARE_HEAP_ID,
 			.type	= ION_HEAP_TYPE_CARVEOUT,
 			.name	= ION_MM_FIRMWARE_HEAP_NAME,
-			.base	= MSM_ION_HOLE_BASE,
-			.size	= MSM_ION_HOLE_SIZE,
+			.base	= MSM_MM_FW_BASE,
+			.size	= MSM_MM_FW_SIZE,
 			.memory_type = ION_SMI_TYPE,
-			.extra_data = (void *) &hole_co_ion_pdata,
+			.extra_data = (void *) &mm_fw_co_ion_pdata,
 		},
 		{
 			.id	= ION_CP_MFC_HEAP_ID,
@@ -5692,7 +5770,6 @@
 				.inv_int_pol    = 0,
 			},
 		},
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 		{
 			PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_SDC3_DET - 1),
 			{
@@ -5703,7 +5780,6 @@
 				.inv_int_pol    = 0,
 			},
 		},
-#endif
 		{ /* core&surf gpio expander */
 			PM8058_GPIO_PM_TO_SYS(UI_INT1_N),
 			{
@@ -8353,7 +8429,6 @@
 }
 
 #ifdef CONFIG_MMC_MSM_SDC3_SUPPORT
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 static unsigned int msm8x60_sdcc_slot_status(struct device *dev)
 {
 	int status;
@@ -8375,7 +8450,6 @@
 }
 #endif
 #endif
-#endif
 
 #define MSM_MPM_PIN_SDC3_DAT1	21
 #define MSM_MPM_PIN_SDC4_DAT1	23
@@ -8421,12 +8495,10 @@
 	.translate_vdd  = msm_sdcc_setup_power,
 	.mmc_bus_width  = MMC_CAP_4_BIT_DATA,
 	.wpswitch  	= msm_sdc3_get_wpswitch,
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 	.status      = msm8x60_sdcc_slot_status,
 	.status_irq  = PM8058_GPIO_IRQ(PM8058_IRQ_BASE,
 				       PMIC_GPIO_SDC3_DET - 1),
 	.irq_flags   = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-#endif
 	.msmsdcc_fmin	= 400000,
 	.msmsdcc_fmid	= 24000000,
 	.msmsdcc_fmax	= 48000000,
diff --git a/arch/arm/mach-msm/board-qrd7627a.c b/arch/arm/mach-msm/board-qrd7627a.c
index 9c80c8b..5b9ea36 100644
--- a/arch/arm/mach-msm/board-qrd7627a.c
+++ b/arch/arm/mach-msm/board-qrd7627a.c
@@ -33,6 +33,7 @@
 #include <linux/msm_adc.h>
 #include <linux/fmem.h>
 #include <linux/regulator/msm-gpio-regulator.h>
+#include <linux/ion.h>
 #include <asm/mach/mmc.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -131,6 +132,14 @@
 #ifdef CONFIG_ARCH_MSM7X27A
 #define MSM_PMEM_MDP_SIZE       0x2300000
 #define MSM_PMEM_ADSP_SIZE      0x1200000
+
+#define MSM_ION_AUDIO_SIZE	(MSM_PMEM_AUDIO_SIZE + PMEM_KERNEL_EBI1_SIZE)
+#define MSM_ION_CAMERA_SIZE	MSM_PMEM_ADSP_SIZE
+#define MSM_ION_SF_SIZE		MSM_PMEM_MDP_SIZE
+#define MSM_ION_HEAP_NUM	4
+#ifdef CONFIG_ION_MSM
+static struct platform_device ion_dev;
+#endif
 #endif
 
 static struct android_usb_platform_data android_usb_pdata = {
@@ -677,7 +686,7 @@
 };
 
 /* GPIO regulator */
-static struct platform_device qrd_msm8625_vreg_gpio_ext_2p85v __devinitdata = {
+static struct platform_device qrd_vreg_gpio_ext_2p85v __devinitdata = {
 	.name	= GPIO_REGULATOR_DEV_NAME,
 	.id	= 35,
 	.dev	= {
@@ -686,7 +695,7 @@
 	},
 };
 
-static struct platform_device qrd_msm8625_vreg_gpio_ext_1p8v __devinitdata = {
+static struct platform_device qrd_vreg_gpio_ext_1p8v __devinitdata = {
 	.name	= GPIO_REGULATOR_DEV_NAME,
 	.id	= 40,
 	.dev	= {
@@ -708,6 +717,9 @@
 	&asoc_msm_dai1,
 	&msm_adc_device,
 	&fmem_device,
+#ifdef CONFIG_ION_MSM
+	&ion_dev,
+#endif
 };
 
 static struct platform_device *qrd7627a_devices[] __initdata = {
@@ -720,6 +732,8 @@
 	&msm_device_otg,
 	&msm_device_gadget_peripheral,
 	&msm_kgsl_3d0,
+	&qrd_vreg_gpio_ext_2p85v,
+	&qrd_vreg_gpio_ext_1p8v,
 };
 
 static struct platform_device *qrd3_devices[] __initdata = {
@@ -736,8 +750,8 @@
 	&msm8625_device_otg,
 	&msm8625_device_gadget_peripheral,
 	&msm8625_kgsl_3d0,
-	&qrd_msm8625_vreg_gpio_ext_2p85v,
-	&qrd_msm8625_vreg_gpio_ext_1p8v,
+	&qrd_vreg_gpio_ext_2p85v,
+	&qrd_vreg_gpio_ext_1p8v,
 };
 
 static unsigned pmem_kernel_ebi1_size = PMEM_KERNEL_EBI1_SIZE;
@@ -756,6 +770,68 @@
 }
 early_param("pmem_audio_size", pmem_audio_size_setup);
 
+#ifdef CONFIG_ION_MSM
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+static struct ion_co_heap_pdata co_ion_pdata = {
+	.adjacent_mem_id = INVALID_HEAP_ID,
+	.align = PAGE_SIZE,
+};
+#endif
+
+/**
+ * These heaps are listed in the order they will be allocated.
+ * Don't swap the order unless you know what you are doing!
+ */
+static struct ion_platform_data ion_pdata = {
+	.nr = MSM_ION_HEAP_NUM,
+	.heaps = {
+		{
+			.id	= ION_SYSTEM_HEAP_ID,
+			.type	= ION_HEAP_TYPE_SYSTEM,
+			.name	= ION_VMALLOC_HEAP_NAME,
+		},
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+		/* PMEM_ADSP = CAMERA */
+		{
+			.id	= ION_CAMERA_HEAP_ID,
+			.type	= ION_HEAP_TYPE_CARVEOUT,
+			.name	= ION_CAMERA_HEAP_NAME,
+			.size	= MSM_ION_CAMERA_SIZE,
+			.memory_type = ION_EBI_TYPE,
+			.has_outer_cache = 1,
+			.extra_data = (void *)&co_ion_pdata,
+		},
+		/* PMEM_AUDIO */
+		{
+			.id	= ION_AUDIO_HEAP_ID,
+			.type	= ION_HEAP_TYPE_CARVEOUT,
+			.name	= ION_AUDIO_HEAP_NAME,
+			.size	= MSM_ION_AUDIO_SIZE,
+			.memory_type = ION_EBI_TYPE,
+			.has_outer_cache = 1,
+			.extra_data = (void *)&co_ion_pdata,
+		},
+		/* PMEM_MDP = SF */
+		{
+			.id	= ION_SF_HEAP_ID,
+			.type	= ION_HEAP_TYPE_CARVEOUT,
+			.name	= ION_SF_HEAP_NAME,
+			.size	= MSM_ION_SF_SIZE,
+			.memory_type = ION_EBI_TYPE,
+			.has_outer_cache = 1,
+			.extra_data = (void *)&co_ion_pdata,
+		},
+#endif
+	}
+};
+
+static struct platform_device ion_dev = {
+	.name = "ion-msm",
+	.id = 1,
+	.dev = { .platform_data = &ion_pdata },
+};
+#endif
+
 static struct memtype_reserve msm7627a_reserve_table[] __initdata = {
 	[MEMTYPE_SMI] = {
 	},
@@ -768,16 +844,19 @@
 };
 
 #ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
 static struct android_pmem_platform_data *pmem_pdata_array[] __initdata = {
 		&android_pmem_adsp_pdata,
 		&android_pmem_audio_pdata,
 		&android_pmem_pdata,
 };
 #endif
+#endif
 
 static void __init size_pmem_devices(void)
 {
 #ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
 	unsigned int i;
 	unsigned int reusable_count = 0;
 
@@ -804,30 +883,46 @@
 			pdata->reusable = 0;
 		}
 	}
-
+#endif
 #endif
 }
 
+#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
 static void __init reserve_memory_for(struct android_pmem_platform_data *p)
 {
 	msm7627a_reserve_table[p->memory_type].size += p->size;
 }
+#endif
+#endif
 
 static void __init reserve_pmem_memory(void)
 {
 #ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
 	unsigned int i;
 	for (i = 0; i < ARRAY_SIZE(pmem_pdata_array); ++i)
 		reserve_memory_for(pmem_pdata_array[i]);
 
 	msm7627a_reserve_table[MEMTYPE_EBI1].size += pmem_kernel_ebi1_size;
 #endif
+#endif
+}
+
+static void __init reserve_ion_memory(void)
+{
+#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
+	msm7627a_reserve_table[MEMTYPE_EBI1].size += MSM_ION_CAMERA_SIZE;
+	msm7627a_reserve_table[MEMTYPE_EBI1].size += MSM_ION_AUDIO_SIZE;
+	msm7627a_reserve_table[MEMTYPE_EBI1].size += MSM_ION_SF_SIZE;
+#endif
 }
 
 static void __init msm7627a_calculate_reserve_sizes(void)
 {
 	size_pmem_devices();
 	reserve_pmem_memory();
+	reserve_ion_memory();
 }
 
 static int msm7627a_paddr_to_memtype(unsigned int paddr)
diff --git a/arch/arm/mach-msm/cache_erp.c b/arch/arm/mach-msm/cache_erp.c
index 4d7ce12..c3302ec 100644
--- a/arch/arm/mach-msm/cache_erp.c
+++ b/arch/arm/mach-msm/cache_erp.c
@@ -90,7 +90,7 @@
 
 #define MODULE_NAME "msm_cache_erp"
 
-#define ERP_LOG_MAGIC_ADDR	0x748
+#define ERP_LOG_MAGIC_ADDR	0x6A4
 #define ERP_LOG_MAGIC		0x11C39893
 
 struct msm_l1_err_stats {
diff --git a/arch/arm/mach-msm/clock-7x30.c b/arch/arm/mach-msm/clock-7x30.c
index aa94be6..f3ac7d7 100644
--- a/arch/arm/mach-msm/clock-7x30.c
+++ b/arch/arm/mach-msm/clock-7x30.c
@@ -200,7 +200,7 @@
 #define PCOM_XO_TCXO	0
 #define PCOM_XO_LPXO	1
 
-static bool pcom_is_local(struct clk *clk)
+static bool pcom_is_local(struct clk *c)
 {
 	return false;
 }
@@ -2328,60 +2328,53 @@
 	},
 };
 
-static DEFINE_CLK_PCOM(adsp_clk, ADSP_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(codec_ssbi_clk,	CODEC_SSBI_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(ebi1_clk, EBI1_CLK, CLKFLAG_SKIP_AUTO_OFF | CLKFLAG_MIN);
-static DEFINE_CLK_PCOM(ebi1_fixed_clk, EBI1_FIXED_CLK, CLKFLAG_MIN |
-						       CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(ecodec_clk, ECODEC_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(gp_clk, GP_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(adsp_clk, ADSP_CLK, 0);
+static DEFINE_CLK_PCOM(codec_ssbi_clk,	CODEC_SSBI_CLK, 0);
+static DEFINE_CLK_PCOM(ebi1_clk, EBI1_CLK, CLKFLAG_MIN);
+static DEFINE_CLK_PCOM(ebi1_fixed_clk, EBI1_FIXED_CLK, CLKFLAG_MIN);
+static DEFINE_CLK_PCOM(ecodec_clk, ECODEC_CLK, 0);
+static DEFINE_CLK_PCOM(gp_clk, GP_CLK, 0);
 static DEFINE_CLK_PCOM(uart3_clk, UART3_CLK, 0);
 static DEFINE_CLK_PCOM(usb_phy_clk, USB_PHY_CLK, CLKFLAG_MIN);
 
-static DEFINE_CLK_PCOM(p_grp_2d_clk, GRP_2D_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_grp_2d_p_clk, GRP_2D_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_hdmi_clk, HDMI_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_grp_2d_clk, GRP_2D_CLK, 0);
+static DEFINE_CLK_PCOM(p_grp_2d_p_clk, GRP_2D_P_CLK, 0);
+static DEFINE_CLK_PCOM(p_hdmi_clk, HDMI_CLK, 0);
 static DEFINE_CLK_PCOM(p_jpeg_clk, JPEG_CLK, CLKFLAG_MIN);
 static DEFINE_CLK_PCOM(p_jpeg_p_clk, JPEG_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_lpa_codec_clk, LPA_CODEC_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_lpa_core_clk, LPA_CORE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_lpa_p_clk, LPA_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mi2s_m_clk, MI2S_M_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mi2s_s_clk, MI2S_S_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mi2s_codec_rx_m_clk, MI2S_CODEC_RX_M_CLK,
-		CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mi2s_codec_rx_s_clk, MI2S_CODEC_RX_S_CLK,
-		CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mi2s_codec_tx_m_clk, MI2S_CODEC_TX_M_CLK,
-		CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mi2s_codec_tx_s_clk, MI2S_CODEC_TX_S_CLK,
-		CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_lpa_codec_clk, LPA_CODEC_CLK, 0);
+static DEFINE_CLK_PCOM(p_lpa_core_clk, LPA_CORE_CLK, 0);
+static DEFINE_CLK_PCOM(p_lpa_p_clk, LPA_P_CLK, 0);
+static DEFINE_CLK_PCOM(p_mi2s_m_clk, MI2S_M_CLK, 0);
+static DEFINE_CLK_PCOM(p_mi2s_s_clk, MI2S_S_CLK, 0);
+static DEFINE_CLK_PCOM(p_mi2s_codec_rx_m_clk, MI2S_CODEC_RX_M_CLK, 0);
+static DEFINE_CLK_PCOM(p_mi2s_codec_rx_s_clk, MI2S_CODEC_RX_S_CLK, 0);
+static DEFINE_CLK_PCOM(p_mi2s_codec_tx_m_clk, MI2S_CODEC_TX_M_CLK, 0);
+static DEFINE_CLK_PCOM(p_mi2s_codec_tx_s_clk, MI2S_CODEC_TX_S_CLK, 0);
 static DEFINE_CLK_PCOM(p_sdac_clk, SDAC_CLK, 0);
 static DEFINE_CLK_PCOM(p_sdac_m_clk, SDAC_M_CLK, 0);
-static DEFINE_CLK_PCOM(p_vfe_clk, VFE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_vfe_camif_clk, VFE_CAMIF_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_vfe_mdc_clk, VFE_MDC_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_vfe_clk, VFE_CLK, 0);
+static DEFINE_CLK_PCOM(p_vfe_camif_clk, VFE_CAMIF_CLK, 0);
+static DEFINE_CLK_PCOM(p_vfe_mdc_clk, VFE_MDC_CLK, 0);
 static DEFINE_CLK_PCOM(p_vfe_p_clk, VFE_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_grp_3d_clk, GRP_3D_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_grp_3d_p_clk, GRP_3D_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_grp_3d_clk, GRP_3D_CLK, 0);
+static DEFINE_CLK_PCOM(p_grp_3d_p_clk, GRP_3D_P_CLK, 0);
 static DEFINE_CLK_PCOM(p_imem_clk, IMEM_CLK, 0);
-static DEFINE_CLK_PCOM(p_mdp_lcdc_pad_pclk_clk, MDP_LCDC_PAD_PCLK_CLK,
-		CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mdp_lcdc_pclk_clk, MDP_LCDC_PCLK_CLK,
-		CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mdp_p_clk, MDP_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_mdp_lcdc_pad_pclk_clk, MDP_LCDC_PAD_PCLK_CLK, 0);
+static DEFINE_CLK_PCOM(p_mdp_lcdc_pclk_clk, MDP_LCDC_PCLK_CLK, 0);
+static DEFINE_CLK_PCOM(p_mdp_p_clk, MDP_P_CLK, 0);
 static DEFINE_CLK_PCOM(p_mdp_vsync_clk, MDP_VSYNC_CLK, 0);
-static DEFINE_CLK_PCOM(p_tsif_ref_clk, TSIF_REF_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_tsif_p_clk, TSIF_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_tv_dac_clk, TV_DAC_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_tv_enc_clk, TV_ENC_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_tsif_ref_clk, TSIF_REF_CLK, 0);
+static DEFINE_CLK_PCOM(p_tsif_p_clk, TSIF_P_CLK, 0);
+static DEFINE_CLK_PCOM(p_tv_dac_clk, TV_DAC_CLK, 0);
+static DEFINE_CLK_PCOM(p_tv_enc_clk, TV_ENC_CLK, 0);
 static DEFINE_CLK_PCOM(p_emdh_clk, EMDH_CLK, CLKFLAG_MIN | CLKFLAG_MAX);
 static DEFINE_CLK_PCOM(p_emdh_p_clk, EMDH_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_i2c_clk, I2C_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_i2c_2_clk, I2C_2_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mdc_clk, MDC_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_i2c_clk, I2C_CLK, 0);
+static DEFINE_CLK_PCOM(p_i2c_2_clk, I2C_2_CLK, 0);
+static DEFINE_CLK_PCOM(p_mdc_clk, MDC_CLK, 0);
 static DEFINE_CLK_PCOM(p_pmdh_clk, PMDH_CLK, CLKFLAG_MIN | CLKFLAG_MAX);
-static DEFINE_CLK_PCOM(p_pmdh_p_clk, PMDH_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_pmdh_p_clk, PMDH_P_CLK, 0);
 static DEFINE_CLK_PCOM(p_sdc1_clk, SDC1_CLK, 0);
 static DEFINE_CLK_PCOM(p_sdc1_p_clk, SDC1_P_CLK, 0);
 static DEFINE_CLK_PCOM(p_sdc2_clk, SDC2_CLK, 0);
@@ -2390,36 +2383,35 @@
 static DEFINE_CLK_PCOM(p_sdc3_p_clk, SDC3_P_CLK, 0);
 static DEFINE_CLK_PCOM(p_sdc4_clk, SDC4_CLK, 0);
 static DEFINE_CLK_PCOM(p_sdc4_p_clk, SDC4_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_uart2_clk, UART2_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_uart2_clk, UART2_CLK, 0);
 static DEFINE_CLK_PCOM(p_usb_hs2_clk, USB_HS2_CLK, 0);
 static DEFINE_CLK_PCOM(p_usb_hs2_core_clk, USB_HS2_CORE_CLK, 0);
 static DEFINE_CLK_PCOM(p_usb_hs2_p_clk, USB_HS2_P_CLK, 0);
 static DEFINE_CLK_PCOM(p_usb_hs3_clk, USB_HS3_CLK, 0);
 static DEFINE_CLK_PCOM(p_usb_hs3_core_clk, USB_HS3_CORE_CLK, 0);
 static DEFINE_CLK_PCOM(p_usb_hs3_p_clk, USB_HS3_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_qup_i2c_clk, QUP_I2C_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_spi_clk, SPI_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_spi_p_clk, SPI_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_qup_i2c_clk, QUP_I2C_CLK, 0);
+static DEFINE_CLK_PCOM(p_spi_clk, SPI_CLK, 0);
+static DEFINE_CLK_PCOM(p_spi_p_clk, SPI_P_CLK, 0);
 static DEFINE_CLK_PCOM(p_uart1_clk, UART1_CLK, 0);
 static DEFINE_CLK_PCOM(p_uart1dm_clk, UART1DM_CLK, 0);
-static DEFINE_CLK_PCOM(p_uart2dm_clk, UART2DM_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_uart2dm_clk, UART2DM_CLK, 0);
 static DEFINE_CLK_PCOM(p_usb_hs_clk, USB_HS_CLK, 0);
 static DEFINE_CLK_PCOM(p_usb_hs_core_clk, USB_HS_CORE_CLK, 0);
 static DEFINE_CLK_PCOM(p_usb_hs_p_clk, USB_HS_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_cam_m_clk, CAM_M_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_cam_m_clk, CAM_M_CLK, 0);
 static DEFINE_CLK_PCOM(p_camif_pad_p_clk, CAMIF_PAD_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_csi0_clk, CSI0_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_csi0_vfe_clk, CSI0_VFE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_csi0_p_clk, CSI0_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_csi0_clk, CSI0_CLK, 0);
+static DEFINE_CLK_PCOM(p_csi0_vfe_clk, CSI0_VFE_CLK, 0);
+static DEFINE_CLK_PCOM(p_csi0_p_clk, CSI0_P_CLK, 0);
 static DEFINE_CLK_PCOM(p_mdp_clk, MDP_CLK, CLKFLAG_MIN);
-static DEFINE_CLK_PCOM(p_mfc_clk, MFC_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mfc_div2_clk, MFC_DIV2_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mfc_p_clk, MFC_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_vpe_clk, VPE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_adm_clk, ADM_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_ce_clk, CE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_axi_rotator_clk, AXI_ROTATOR_CLK,
-		CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_mfc_clk, MFC_CLK, 0);
+static DEFINE_CLK_PCOM(p_mfc_div2_clk, MFC_DIV2_CLK, 0);
+static DEFINE_CLK_PCOM(p_mfc_p_clk, MFC_P_CLK, 0);
+static DEFINE_CLK_PCOM(p_vpe_clk, VPE_CLK, 0);
+static DEFINE_CLK_PCOM(p_adm_clk, ADM_CLK, 0);
+static DEFINE_CLK_PCOM(p_ce_clk, CE_CLK, 0);
+static DEFINE_CLK_PCOM(p_axi_rotator_clk, AXI_ROTATOR_CLK, 0);
 static DEFINE_CLK_PCOM(p_rotator_imem_clk, ROTATOR_IMEM_CLK, 0);
 static DEFINE_CLK_PCOM(p_rotator_p_clk, ROTATOR_P_CLK, 0);
 
@@ -2441,7 +2433,7 @@
 
 struct measure_sel {
 	u32 test_vector;
-	struct clk *clk;
+	struct clk *c;
 };
 
 static struct measure_sel measure_mux[] = {
@@ -2538,17 +2530,17 @@
 	{ CLK_TEST_LS(0x3F), &usb_hs_clk.c },
 };
 
-static struct measure_sel *find_measure_sel(struct clk *clk)
+static struct measure_sel *find_measure_sel(struct clk *c)
 {
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(measure_mux); i++)
-		if (measure_mux[i].clk == clk)
+		if (measure_mux[i].c == c)
 			return &measure_mux[i];
 	return NULL;
 }
 
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
 {
 	struct measure_sel *p;
 	unsigned long flags;
@@ -2599,7 +2591,7 @@
 
 /* Perform a hardware rate measurement for a given clock.
    FOR DEBUG USE ONLY: Measurements take ~15 ms! */
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
 {
 	unsigned long flags;
 	u32 regval, prph_web_reg_old;
@@ -2647,12 +2639,12 @@
 	return ret;
 }
 #else /* !CONFIG_DEBUG_FS */
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
 {
 	return -EINVAL;
 }
 
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
 {
 	return 0;
 }
@@ -2670,14 +2662,14 @@
 };
 
 /* Implementation for clk_set_flags(). */
-int soc_clk_set_flags(struct clk *clk, unsigned clk_flags)
+int soc_clk_set_flags(struct clk *c, unsigned clk_flags)
 {
 	uint32_t regval, ret = 0;
 	unsigned long flags;
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
 
-	if (clk == &vfe_clk.c) {
+	if (c == &vfe_clk.c) {
 		regval = readl_relaxed(CAM_VFE_NS_REG);
 		/* Flag values chosen for backward compatibility
 		 * with proc_comm remote clock control. */
@@ -2701,17 +2693,15 @@
 	return ret;
 }
 
-static int msm7x30_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int msm7x30_clk_reset(struct clk *c, enum clk_reset_action action)
 {
 	/* reset_mask is actually a proc_comm id */
-	unsigned id = to_rcg_clk(clk)->b.reset_mask;
-	return pc_clk_reset(id, action);
+	return pc_clk_reset(to_rcg_clk(c)->b.reset_mask, action);
 }
 
-static int soc_branch_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int soc_branch_clk_reset(struct clk *c, enum clk_reset_action action)
 {
-	unsigned id = to_branch_clk(clk)->b.reset_mask;
-	return pc_clk_reset(id, action);
+	return pc_clk_reset(to_branch_clk(c)->b.reset_mask, action);
 }
 
 /*
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 7866fc7..fc91232 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -2858,9 +2858,9 @@
 	struct clk c;
 };
 
-static inline struct pix_rdi_clk *to_pix_rdi_clk(struct clk *clk)
+static inline struct pix_rdi_clk *to_pix_rdi_clk(struct clk *c)
 {
-	return container_of(clk, struct pix_rdi_clk, c);
+	return container_of(c, struct pix_rdi_clk, c);
 }
 
 static int pix_rdi_clk_set_rate(struct clk *c, unsigned long rate)
@@ -2868,7 +2868,7 @@
 	int ret, i;
 	u32 reg;
 	unsigned long flags;
-	struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
+	struct pix_rdi_clk *rdi = to_pix_rdi_clk(c);
 	struct clk **mux_map = pix_rdi_mux_map;
 
 	/*
@@ -2889,32 +2889,32 @@
 		goto err;
 	}
 	/* Keep the new source on when switching inputs of an enabled clock */
-	if (clk->enabled) {
-		clk_disable(mux_map[clk->cur_rate]);
+	if (rdi->enabled) {
+		clk_disable(mux_map[rdi->cur_rate]);
 		clk_enable(mux_map[rate]);
 	}
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
-	reg = readl_relaxed(clk->s2_reg);
-	reg &= ~clk->s2_mask;
-	reg |= rate == 2 ? clk->s2_mask : 0;
-	writel_relaxed(reg, clk->s2_reg);
+	reg = readl_relaxed(rdi->s2_reg);
+	reg &= ~rdi->s2_mask;
+	reg |= rate == 2 ? rdi->s2_mask : 0;
+	writel_relaxed(reg, rdi->s2_reg);
 	/*
 	 * Wait at least 6 cycles of slowest clock
 	 * for the glitch-free MUX to fully switch sources.
 	 */
 	mb();
 	udelay(1);
-	reg = readl_relaxed(clk->s_reg);
-	reg &= ~clk->s_mask;
-	reg |= rate == 1 ? clk->s_mask : 0;
-	writel_relaxed(reg, clk->s_reg);
+	reg = readl_relaxed(rdi->s_reg);
+	reg &= ~rdi->s_mask;
+	reg |= rate == 1 ? rdi->s_mask : 0;
+	writel_relaxed(reg, rdi->s_reg);
 	/*
 	 * Wait at least 6 cycles of slowest clock
 	 * for the glitch-free MUX to fully switch sources.
 	 */
 	mb();
 	udelay(1);
-	clk->cur_rate = rate;
+	rdi->cur_rate = rate;
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 err:
 	for (i--; i >= 0; i--)
@@ -2931,12 +2931,12 @@
 static int pix_rdi_clk_enable(struct clk *c)
 {
 	unsigned long flags;
-	struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
+	struct pix_rdi_clk *rdi = to_pix_rdi_clk(c);
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
-	__branch_clk_enable_reg(&clk->b, clk->c.dbg_name);
+	__branch_enable_reg(&rdi->b, rdi->c.dbg_name);
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
-	clk->enabled = true;
+	rdi->enabled = true;
 
 	return 0;
 }
@@ -2944,24 +2944,22 @@
 static void pix_rdi_clk_disable(struct clk *c)
 {
 	unsigned long flags;
-	struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
+	struct pix_rdi_clk *rdi = to_pix_rdi_clk(c);
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
-	__branch_clk_disable_reg(&clk->b, clk->c.dbg_name);
+	__branch_disable_reg(&rdi->b, rdi->c.dbg_name);
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
-	clk->enabled = false;
+	rdi->enabled = false;
 }
 
-static int pix_rdi_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int pix_rdi_clk_reset(struct clk *c, enum clk_reset_action action)
 {
-	return branch_reset(&to_pix_rdi_clk(clk)->b, action);
+	return branch_reset(&to_pix_rdi_clk(c)->b, action);
 }
 
 static struct clk *pix_rdi_clk_get_parent(struct clk *c)
 {
-	struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
-
-	return pix_rdi_mux_map[clk->cur_rate];
+	return pix_rdi_mux_map[to_pix_rdi_clk(c)->cur_rate];
 }
 
 static int pix_rdi_clk_list_rate(struct clk *c, unsigned n)
@@ -2974,17 +2972,17 @@
 static enum handoff pix_rdi_clk_handoff(struct clk *c)
 {
 	u32 reg;
-	struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
+	struct pix_rdi_clk *rdi = to_pix_rdi_clk(c);
 	enum handoff ret;
 
-	ret = branch_handoff(&clk->b, &clk->c);
+	ret = branch_handoff(&rdi->b, &rdi->c);
 	if (ret == HANDOFF_DISABLED_CLK)
 		return ret;
 
-	reg = readl_relaxed(clk->s_reg);
-	clk->cur_rate = reg & clk->s_mask ? 1 : 0;
-	reg = readl_relaxed(clk->s2_reg);
-	clk->cur_rate = reg & clk->s2_mask ? 2 : clk->cur_rate;
+	reg = readl_relaxed(rdi->s_reg);
+	rdi->cur_rate = reg & rdi->s_mask ? 1 : 0;
+	reg = readl_relaxed(rdi->s2_reg);
+	rdi->cur_rate = reg & rdi->s2_mask ? 2 : rdi->cur_rate;
 
 	return HANDOFF_ENABLED_CLK;
 }
@@ -2992,7 +2990,6 @@
 static struct clk_ops clk_ops_pix_rdi_8960 = {
 	.enable = pix_rdi_clk_enable,
 	.disable = pix_rdi_clk_disable,
-	.auto_off = pix_rdi_clk_disable,
 	.handoff = pix_rdi_clk_handoff,
 	.set_rate = pix_rdi_clk_set_rate,
 	.get_rate = pix_rdi_clk_get_rate,
@@ -3897,7 +3894,7 @@
 	},
 };
 
-static int hdmi_pll_clk_enable(struct clk *clk)
+static int hdmi_pll_clk_enable(struct clk *c)
 {
 	int ret;
 	unsigned long flags;
@@ -3907,7 +3904,7 @@
 	return ret;
 }
 
-static void hdmi_pll_clk_disable(struct clk *clk)
+static void hdmi_pll_clk_disable(struct clk *c)
 {
 	unsigned long flags;
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
@@ -3915,12 +3912,12 @@
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 }
 
-static unsigned long hdmi_pll_clk_get_rate(struct clk *clk)
+static unsigned long hdmi_pll_clk_get_rate(struct clk *c)
 {
 	return hdmi_pll_get_rate();
 }
 
-static struct clk *hdmi_pll_clk_get_parent(struct clk *clk)
+static struct clk *hdmi_pll_clk_get_parent(struct clk *c)
 {
 	return &pxo_clk.c;
 }
@@ -3975,12 +3972,12 @@
  * Unlike other clocks, the TV rate is adjusted through PLL
  * re-programming. It is also routed through an MND divider.
  */
-void set_rate_tv(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_tv(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
 {
 	unsigned long pll_rate = (unsigned long)nf->extra_freq_data;
 	if (pll_rate)
 		hdmi_pll_set_rate(pll_rate);
-	set_rate_mnd(clk, nf);
+	set_rate_mnd(rcg, nf);
 }
 
 static struct rcg_clk tv_src_clk = {
@@ -4019,6 +4016,7 @@
 		.dbg_name = "tv_src_div_clk",
 		.ops = &clk_ops_cdiv,
 		CLK_INIT(tv_src_div_clk.c),
+		.rate = ULONG_MAX,
 	},
 };
 
@@ -4425,6 +4423,7 @@
 			.dbg_name = #i "_clk", \
 			.ops = &clk_ops_cdiv, \
 			CLK_INIT(i##_clk.c), \
+			.rate = ULONG_MAX, \
 		}, \
 	}
 
@@ -4444,6 +4443,7 @@
 			.dbg_name = #i "_clk", \
 			.ops = &clk_ops_cdiv, \
 			CLK_INIT(i##_clk.c), \
+			.rate = ULONG_MAX, \
 		}, \
 	}
 
@@ -4517,6 +4517,7 @@
 		.ops = &clk_ops_rcg,
 		VDD_DIG_FMAX_MAP1(LOW, 24576000),
 		CLK_INIT(pcm_clk.c),
+		.rate = ULONG_MAX,
 	},
 };
 
@@ -4585,6 +4586,7 @@
 DEFINE_CLK_RPM(mmfpb_clk, mmfpb_a_clk, MMFPB, NULL);
 DEFINE_CLK_RPM(sfab_clk, sfab_a_clk, SYSTEM_FABRIC, NULL);
 DEFINE_CLK_RPM(sfpb_clk, sfpb_a_clk, SFPB, NULL);
+DEFINE_CLK_RPM_QDSS(qdss_clk, qdss_a_clk);
 
 static DEFINE_CLK_VOTER(sfab_msmbus_a_clk, &sfab_a_clk.c, 0);
 static DEFINE_CLK_VOTER(sfab_tmr_a_clk, &sfab_a_clk.c, 0);
@@ -4616,7 +4618,7 @@
 #ifdef CONFIG_DEBUG_FS
 struct measure_sel {
 	u32 test_vector;
-	struct clk *clk;
+	struct clk *c;
 };
 
 static DEFINE_CLK_MEASURE(l2_m_clk);
@@ -4840,12 +4842,12 @@
 	{ TEST_CPUL2(0x5), &krait3_m_clk },
 };
 
-static struct measure_sel *find_measure_sel(struct clk *clk)
+static struct measure_sel *find_measure_sel(struct clk *c)
 {
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(measure_mux); i++)
-		if (measure_mux[i].clk == clk)
+		if (measure_mux[i].c == c)
 			return &measure_mux[i];
 	return NULL;
 }
@@ -4855,7 +4857,7 @@
 	int ret = 0;
 	u32 clk_sel;
 	struct measure_sel *p;
-	struct measure_clk *clk = to_measure_clk(c);
+	struct measure_clk *measure = to_measure_clk(c);
 	unsigned long flags;
 
 	if (!parent)
@@ -4871,9 +4873,9 @@
 	 * Program the test vector, measurement period (sample_ticks)
 	 * and scaling multiplier.
 	 */
-	clk->sample_ticks = 0x10000;
+	measure->sample_ticks = 0x10000;
 	clk_sel = p->test_vector & TEST_CLK_SEL_MASK;
-	clk->multiplier = 1;
+	measure->multiplier = 1;
 	switch (p->test_vector >> TEST_TYPE_SHIFT) {
 	case TEST_TYPE_PER_LS:
 		writel_relaxed(0x4030D00|BVAL(7, 0, clk_sel), CLK_TEST_REG);
@@ -4902,8 +4904,8 @@
 	case TEST_TYPE_CPUL2:
 		writel_relaxed(0x4030400, CLK_TEST_REG);
 		writel_relaxed(0x80|BVAL(5, 3, clk_sel), GCC_APCS_CLK_DIAG);
-		clk->sample_ticks = 0x4000;
-		clk->multiplier = 2;
+		measure->sample_ticks = 0x4000;
+		measure->multiplier = 2;
 		break;
 	default:
 		ret = -EPERM;
@@ -4946,7 +4948,7 @@
 	unsigned long flags;
 	u32 pdm_reg_backup, ringosc_reg_backup;
 	u64 raw_count_short, raw_count_full;
-	struct measure_clk *clk = to_measure_clk(c);
+	struct measure_clk *measure = to_measure_clk(c);
 	unsigned ret;
 
 	ret = clk_prepare_enable(&cxo_clk.c);
@@ -4973,7 +4975,7 @@
 	/* Run a short measurement. (~1 ms) */
 	raw_count_short = run_measurement(0x1000);
 	/* Run a full measurement. (~14 ms) */
-	raw_count_full = run_measurement(clk->sample_ticks);
+	raw_count_full = run_measurement(measure->sample_ticks);
 
 	writel_relaxed(ringosc_reg_backup, RINGOSC_NS_REG);
 	writel_relaxed(pdm_reg_backup, PDM_CLK_NS_REG);
@@ -4984,8 +4986,8 @@
 	else {
 		/* Compute rate in Hz. */
 		raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
-		do_div(raw_count_full, ((clk->sample_ticks * 10) + 35));
-		ret = (raw_count_full * clk->multiplier);
+		do_div(raw_count_full, ((measure->sample_ticks * 10) + 35));
+		ret = (raw_count_full * measure->multiplier);
 	}
 
 	/* Route dbg_hs_clk to PLLTEST.  300mV single-ended amplitude. */
@@ -4997,12 +4999,12 @@
 	return ret;
 }
 #else /* !CONFIG_DEBUG_FS */
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
 {
 	return -EINVAL;
 }
 
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
 {
 	return 0;
 }
@@ -5070,6 +5072,12 @@
 	CLK_LOOKUP("mem_a_clk",		ebi1_msmbus_a_clk.c,	"msm_bus"),
 	CLK_LOOKUP("dfab_clk",		dfab_msmbus_clk.c,	"msm_bus"),
 	CLK_LOOKUP("dfab_a_clk",	dfab_msmbus_a_clk.c,	"msm_bus"),
+	CLK_LOOKUP("core_a_clk",	qdss_a_clk.c,		""),
+	CLK_LOOKUP("core_clk",		qdss_clk.c,		"msm_etb.0"),
+	CLK_LOOKUP("core_clk",		qdss_clk.c,		"msm_tpiu.0"),
+	CLK_LOOKUP("core_clk",		qdss_clk.c,		"msm_funnel.0"),
+	CLK_LOOKUP("core_clk",		qdss_clk.c,		"msm_stm.0"),
+	CLK_LOOKUP("core_clk",		qdss_clk.c,		"msm_etm.0"),
 
 	CLK_LOOKUP("ebi1_clk",		ebi1_clk.c,		""),
 	CLK_LOOKUP("mmfpb_clk",		mmfpb_clk.c,		""),
@@ -5394,6 +5402,12 @@
 	CLK_LOOKUP("mem_a_clk",		ebi1_msmbus_a_clk.c,	"msm_bus"),
 	CLK_LOOKUP("dfab_clk",		dfab_msmbus_clk.c,	"msm_bus"),
 	CLK_LOOKUP("dfab_a_clk",	dfab_msmbus_a_clk.c,	"msm_bus"),
+	CLK_LOOKUP("core_a_clk",	qdss_a_clk.c,		""),
+	CLK_LOOKUP("core_clk",		qdss_clk.c,		"msm_etb.0"),
+	CLK_LOOKUP("core_clk",		qdss_clk.c,		"msm_tpiu.0"),
+	CLK_LOOKUP("core_clk",		qdss_clk.c,		"msm_funnel.0"),
+	CLK_LOOKUP("core_clk",		qdss_clk.c,		"msm_stm.0"),
+	CLK_LOOKUP("core_clk",		qdss_clk.c,		"msm_etm.0"),
 
 	CLK_LOOKUP("ebi1_clk",		ebi1_clk.c,		NULL),
 	CLK_LOOKUP("mmfpb_clk",		mmfpb_clk.c,		NULL),
@@ -5710,6 +5724,12 @@
 	CLK_LOOKUP("mem_a_clk",		ebi1_msmbus_a_clk.c,	"msm_bus"),
 	CLK_LOOKUP("dfab_clk",		dfab_msmbus_clk.c,	"msm_bus"),
 	CLK_LOOKUP("dfab_a_clk",	dfab_msmbus_a_clk.c,	"msm_bus"),
+	CLK_LOOKUP("core_a_clk",	qdss_a_clk.c,		""),
+	CLK_LOOKUP("core_clk",		qdss_clk.c,		"msm_etb.0"),
+	CLK_LOOKUP("core_clk",		qdss_clk.c,		"msm_tpiu.0"),
+	CLK_LOOKUP("core_clk",		qdss_clk.c,		"msm_funnel.0"),
+	CLK_LOOKUP("core_clk",		qdss_clk.c,		"msm_stm.0"),
+	CLK_LOOKUP("core_clk",		qdss_clk.c,		"msm_etm.0"),
 
 	CLK_LOOKUP("ebi1_clk",		ebi1_clk.c,		NULL),
 	CLK_LOOKUP("mmfpb_clk",		mmfpb_clk.c,		NULL),
@@ -6057,18 +6077,19 @@
 	 */
 	/*
 	 * Initialize MM AHB registers: Enable the FPB clock and disable HW
-	 * gating on non-8960 for all clocks. Also set VFE_AHB's
+	 * gating on 8627 for all clocks. Also set VFE_AHB's
 	 * FORCE_CORE_ON bit to prevent its memory from being collapsed when
 	 * the clock is halted. The sleep and wake-up delays are set to safe
 	 * values.
 	 */
-	if (cpu_is_msm8960() || cpu_is_apq8064()) {
-		rmwreg(0x44000000, AHB_EN_REG,  0x6C000103);
-		writel_relaxed(0x3C7097F9, AHB_EN2_REG);
-	} else {
+	if (cpu_is_msm8627()) {
 		rmwreg(0x00000003, AHB_EN_REG,  0x6C000103);
 		writel_relaxed(0x000007F9, AHB_EN2_REG);
+	} else {
+		rmwreg(0x44000000, AHB_EN_REG,  0x6C000103);
+		writel_relaxed(0x3C7097F9, AHB_EN2_REG);
 	}
+
 	if (cpu_is_apq8064())
 		rmwreg(0x00000001, AHB_EN3_REG, 0x00000001);
 
@@ -6080,25 +6101,26 @@
 	 * support it. Also set FORCE_CORE_ON bits, and any sleep and wake-up
 	 * delays to safe values. */
 	if ((cpu_is_msm8960() &&
-			SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 3) ||
-			cpu_is_apq8064()) {
-		rmwreg(0x0003AFF9, MAXI_EN_REG,  0x0803FFFF);
-		rmwreg(0x3A27FCFF, MAXI_EN2_REG, 0x3A3FFFFF);
-		rmwreg(0x0027FCFF, MAXI_EN4_REG, 0x017FFFFF);
-	} else {
+			SOCINFO_VERSION_MAJOR(socinfo_get_version()) < 3) ||
+			cpu_is_msm8627()) {
 		rmwreg(0x000007F9, MAXI_EN_REG,  0x0803FFFF);
 		rmwreg(0x3027FCFF, MAXI_EN2_REG, 0x3A3FFFFF);
-		rmwreg(0x0027FCFF, MAXI_EN4_REG, 0x017FFFFF);
+	} else {
+		rmwreg(0x0003AFF9, MAXI_EN_REG,  0x0803FFFF);
+		rmwreg(0x3A27FCFF, MAXI_EN2_REG, 0x3A3FFFFF);
 	}
+
 	rmwreg(0x0027FCFF, MAXI_EN3_REG, 0x003FFFFF);
+	rmwreg(0x0027FCFF, MAXI_EN4_REG, 0x017FFFFF);
+
 	if (cpu_is_apq8064())
 		rmwreg(0x019FECFF, MAXI_EN5_REG, 0x01FFEFFF);
-	if (cpu_is_msm8930())
+	if (cpu_is_msm8930() || cpu_is_msm8627())
 		rmwreg(0x000004FF, MAXI_EN5_REG, 0x00000FFF);
-	if (cpu_is_msm8960() || cpu_is_apq8064())
-		rmwreg(0x00003C38, SAXI_EN_REG,  0x00003FFF);
-	else
+	if (cpu_is_msm8627())
 		rmwreg(0x000003C7, SAXI_EN_REG,  0x00003FFF);
+	else
+		rmwreg(0x00003C38, SAXI_EN_REG,  0x00003FFF);
 
 	/* Enable IMEM's clk_on signal */
 	imem_reg = ioremap(0x04b00040, 4);
@@ -6129,7 +6151,7 @@
 		rmwreg(0x80FF0000, DSI2_PIXEL_CC_REG, 0xE0FF0010);
 		rmwreg(0x80FF0000, JPEGD_CC_REG,      0xE0FF0010);
 	}
-	if (cpu_is_msm8960() || cpu_is_msm8930())
+	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm8627())
 		rmwreg(0x80FF0000, TV_CC_REG,         0xE1FFC010);
 
 	if (cpu_is_msm8960()) {
@@ -6221,7 +6243,7 @@
 	 * Program PLL15 to 900MHz with ref clk = 27MHz and
 	 * only enable PLL main output.
 	 */
-	if (cpu_is_msm8930()) {
+	if (cpu_is_msm8930() || cpu_is_msm8627()) {
 		pll15_config.l = 0x21 | BVAL(31, 7, 0x600);
 		pll15_config.m = 0x1;
 		pll15_config.n = 0x3;
@@ -6265,7 +6287,7 @@
 	 * Change the freq tables and voltage requirements for
 	 * clocks which differ between 8960 and 8930.
 	 */
-	if (cpu_is_msm8930()) {
+	if (cpu_is_msm8930() || cpu_is_msm8627()) {
 		gfx3d_clk.freq_tbl = clk_tbl_gfx3d_8930;
 
 		memcpy(gfx3d_clk.c.fmax, fmax_gfx3d_8930,
@@ -6310,7 +6332,7 @@
 		clk_set_rate(&usb_hs4_xcvr_clk.c, 60000000);
 	}
 	clk_set_rate(&usb_fs1_src_clk.c, 60000000);
-	if (cpu_is_msm8960() || cpu_is_msm8930())
+	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm8627())
 		clk_set_rate(&usb_fs2_src_clk.c, 60000000);
 	clk_set_rate(&usb_hsic_xcvr_fs_clk.c, 60000000);
 	clk_set_rate(&usb_hsic_hsic_src_clk.c, 480000000);
diff --git a/arch/arm/mach-msm/clock-copper.c b/arch/arm/mach-msm/clock-8974.c
similarity index 98%
rename from arch/arm/mach-msm/clock-copper.c
rename to arch/arm/mach-msm/clock-8974.c
index 87a8998..a81343a 100644
--- a/arch/arm/mach-msm/clock-copper.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -588,39 +588,29 @@
 
 static DEFINE_VDD_CLASS(vdd_dig, set_vdd_dig);
 
-static int cxo_clk_enable(struct clk *clk)
-{
-	/* TODO: Remove from here once the rpm xo clock is ready. */
-	return 0;
-}
+#define RPM_MISC_CLK_TYPE 0x306b6c63
+#define RPM_BUS_CLK_TYPE  0x316b6c63
+#define RPM_MEM_CLK_TYPE  0x326b6c63
 
-static void cxo_clk_disable(struct clk *clk)
-{
-	/* TODO: Remove from here once the rpm xo clock is ready. */
-	return;
-}
+#define CXO_ID		0x0
 
-static enum handoff cxo_clk_handoff(struct clk *clk)
-{
-	/* TODO: Remove from here once the rpm xo clock is ready. */
-	return HANDOFF_ENABLED_CLK;
-}
+#define PNOC_ID		0x0
+#define SNOC_ID		0x1
+#define CNOC_ID		0x2
 
-static struct clk_ops clk_ops_cxo = {
-	.enable = cxo_clk_enable,
-	.disable = cxo_clk_disable,
-	.handoff = cxo_clk_handoff,
-};
+#define BIMC_ID		0x0
+#define OCMEM_ID	0x1
 
-static struct fixed_clk cxo_clk_src = {
-	.c = {
-		.rate = 19200000,
-		.dbg_name = "cxo_clk_src",
-		.ops = &clk_ops_cxo,
-		.warned = true,
-		CLK_INIT(cxo_clk_src.c),
-	},
-};
+DEFINE_CLK_RPM_SMD(pnoc_clk, pnoc_a_clk, RPM_BUS_CLK_TYPE, PNOC_ID, NULL);
+DEFINE_CLK_RPM_SMD(snoc_clk, snoc_a_clk, RPM_BUS_CLK_TYPE, SNOC_ID, NULL);
+DEFINE_CLK_RPM_SMD(cnoc_clk, cnoc_a_clk, RPM_BUS_CLK_TYPE, CNOC_ID, NULL);
+
+DEFINE_CLK_RPM_SMD(bimc_clk, bimc_a_clk, RPM_MEM_CLK_TYPE, BIMC_ID, NULL);
+DEFINE_CLK_RPM_SMD(ocmemgx_clk, ocmemgx_a_clk, RPM_MEM_CLK_TYPE, OCMEM_ID,
+			NULL);
+
+DEFINE_CLK_RPM_SMD_BRANCH(cxo_clk_src, cxo_a_clk_src,
+				RPM_MISC_CLK_TYPE, CXO_ID, 19200000);
 
 static struct pll_vote_clk gpll0_clk_src = {
 	.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE_REG,
@@ -715,24 +705,6 @@
 	},
 };
 
-#define RPM_BUS_CLK_TYPE  0x316b6c63
-#define RPM_MEM_CLK_TYPE  0x326b6c63
-
-#define PNOC_ID		0x0
-#define SNOC_ID		0x1
-#define CNOC_ID		0x2
-
-#define BIMC_ID		0x0
-#define OCMEM_ID	0x1
-
-DEFINE_CLK_RPM_SMD(pnoc_clk, pnoc_a_clk, RPM_BUS_CLK_TYPE, PNOC_ID, NULL);
-DEFINE_CLK_RPM_SMD(snoc_clk, snoc_a_clk, RPM_BUS_CLK_TYPE, SNOC_ID, NULL);
-DEFINE_CLK_RPM_SMD(cnoc_clk, cnoc_a_clk, RPM_BUS_CLK_TYPE, CNOC_ID, NULL);
-
-DEFINE_CLK_RPM_SMD(bimc_clk, bimc_a_clk, RPM_MEM_CLK_TYPE, BIMC_ID, NULL);
-DEFINE_CLK_RPM_SMD(ocmemgx_clk, ocmemgx_a_clk, RPM_MEM_CLK_TYPE, OCMEM_ID,
-			NULL);
-
 static DEFINE_CLK_VOTER(pnoc_msmbus_clk, &pnoc_clk.c, LONG_MAX);
 static DEFINE_CLK_VOTER(snoc_msmbus_clk, &snoc_clk.c, LONG_MAX);
 static DEFINE_CLK_VOTER(cnoc_msmbus_clk, &cnoc_clk.c, LONG_MAX);
@@ -4605,7 +4577,7 @@
 	.multiplier = 1,
 };
 
-static struct clk_lookup msm_clocks_copper[] = {
+static struct clk_lookup msm_clocks_8974[] = {
 	CLK_LOOKUP("xo",	cxo_clk_src.c,	"msm_otg"),
 	CLK_LOOKUP("xo",	cxo_clk_src.c,	"pil-q6v5-lpass"),
 	CLK_LOOKUP("xo",	cxo_clk_src.c,	"pil-q6v5-mss"),
@@ -4778,6 +4750,12 @@
 	CLK_LOOKUP("iface_clk", venus0_ahb_clk.c, "fdc84000.qcom,iommu"),
 	CLK_LOOKUP("core_clk", venus0_axi_clk.c, "fdc84000.qcom,iommu"),
 	CLK_LOOKUP("bus_clk", venus0_axi_clk.c, ""),
+	CLK_LOOKUP("src_clk",  vcodec0_clk_src.c, "fdce0000.qcom,venus"),
+	CLK_LOOKUP("core_clk", venus0_vcodec0_clk.c, "fdce0000.qcom,venus"),
+	CLK_LOOKUP("iface_clk",  venus0_ahb_clk.c, "fdce0000.qcom,venus"),
+	CLK_LOOKUP("bus_clk",  venus0_axi_clk.c, "fdce0000.qcom,venus"),
+	CLK_LOOKUP("mem_clk",  venus0_ocmemnoc_clk.c, "fdce0000.qcom,venus"),
+
 
 	/* LPASS clocks */
 	CLK_LOOKUP("core_clk", audio_core_slimbus_core_clk.c, "fe12f000.slim"),
@@ -5042,9 +5020,16 @@
 	writel_relaxed(0x0, GCC_REG_BASE(APCS_CLOCK_SLEEP_ENA_VOTE));
 }
 
-static void __init msmcopper_clock_post_init(void)
+static void __init msm8974_clock_post_init(void)
 {
 	clk_set_rate(&axi_clk_src.c, 333330000);
+	clk_set_rate(&ocmemnoc_clk_src.c, 333330000);
+
+	/*
+	 * Hold an active set vote for CXO; this is because CXO is expected
+	 * to remain on whenever CPUs aren't power collapsed.
+	 */
+	clk_prepare_enable(&cxo_a_clk_src.c);
 
 	/* Set rates for single-rate clocks. */
 	clk_set_rate(&usb30_master_clk_src.c,
@@ -5087,32 +5072,32 @@
 #define MSS_CC_PHYS	0xFC980000
 #define MSS_CC_SIZE	SZ_16K
 
-static void __init msmcopper_clock_pre_init(void)
+static void __init msm8974_clock_pre_init(void)
 {
 	virt_bases[GCC_BASE] = ioremap(GCC_CC_PHYS, GCC_CC_SIZE);
 	if (!virt_bases[GCC_BASE])
-		panic("clock-copper: Unable to ioremap GCC memory!");
+		panic("clock-8974: Unable to ioremap GCC memory!");
 
 	virt_bases[MMSS_BASE] = ioremap(MMSS_CC_PHYS, MMSS_CC_SIZE);
 	if (!virt_bases[MMSS_BASE])
-		panic("clock-copper: Unable to ioremap MMSS_CC memory!");
+		panic("clock-8974: Unable to ioremap MMSS_CC memory!");
 
 	virt_bases[LPASS_BASE] = ioremap(LPASS_CC_PHYS, LPASS_CC_SIZE);
 	if (!virt_bases[LPASS_BASE])
-		panic("clock-copper: Unable to ioremap LPASS_CC memory!");
+		panic("clock-8974: Unable to ioremap LPASS_CC memory!");
 
 	virt_bases[MSS_BASE] = ioremap(MSS_CC_PHYS, MSS_CC_SIZE);
 	if (!virt_bases[MSS_BASE])
-		panic("clock-copper: Unable to ioremap MSS_CC memory!");
+		panic("clock-8974: Unable to ioremap MSS_CC memory!");
 
-	clk_ops_local_pll.enable = copper_pll_clk_enable;
+	clk_ops_local_pll.enable = msm8974_pll_clk_enable;
 
 	reg_init();
 }
 
-struct clock_init_data msmcopper_clock_init_data __initdata = {
-	.table = msm_clocks_copper,
-	.size = ARRAY_SIZE(msm_clocks_copper),
-	.pre_init = msmcopper_clock_pre_init,
-	.post_init = msmcopper_clock_post_init,
+struct clock_init_data msm8974_clock_init_data __initdata = {
+	.table = msm_clocks_8974,
+	.size = ARRAY_SIZE(msm_clocks_8974),
+	.pre_init = msm8974_clock_pre_init,
+	.post_init = msm8974_clock_post_init,
 };
diff --git a/arch/arm/mach-msm/clock-8x60.c b/arch/arm/mach-msm/clock-8x60.c
index 74d71a2..4493ddc 100644
--- a/arch/arm/mach-msm/clock-8x60.c
+++ b/arch/arm/mach-msm/clock-8x60.c
@@ -341,24 +341,24 @@
 	},
 };
 
-static int pll4_clk_enable(struct clk *clk)
+static int pll4_clk_enable(struct clk *c)
 {
 	struct msm_rpm_iv_pair iv = { MSM_RPM_ID_PLL_4, 1 };
 	return msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
 }
 
-static void pll4_clk_disable(struct clk *clk)
+static void pll4_clk_disable(struct clk *c)
 {
 	struct msm_rpm_iv_pair iv = { MSM_RPM_ID_PLL_4, 0 };
 	msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
 }
 
-static struct clk *pll4_clk_get_parent(struct clk *clk)
+static struct clk *pll4_clk_get_parent(struct clk *c)
 {
 	return &pxo_clk.c;
 }
 
-static bool pll4_clk_is_local(struct clk *clk)
+static bool pll4_clk_is_local(struct clk *c)
 {
 	return false;
 }
@@ -397,7 +397,7 @@
 
 /* Unlike other clocks, the TV rate is adjusted through PLL
  * re-programming. It is also routed through an MND divider. */
-static void set_rate_tv(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+static void set_rate_tv(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
 {
 	struct pll_rate *rate = nf->extra_freq_data;
 	uint32_t pll_mode, pll_config, misc_cc2;
@@ -426,7 +426,7 @@
 	writel_relaxed(pll_config, MM_PLL2_CONFIG_REG);
 
 	/* Configure MND. */
-	set_rate_mnd(clk, nf);
+	set_rate_mnd(rcg, nf);
 
 	/* Configure hdmi_ref_clk to be equal to the TV clock rate. */
 	misc_cc2 = readl_relaxed(MISC_CC2_REG);
@@ -602,7 +602,6 @@
 	.c = {
 		.dbg_name = "smi_2x_axi_clk",
 		.ops = &clk_ops_branch,
-		.flags = CLKFLAG_SKIP_AUTO_OFF,
 		CLK_INIT(smi_2x_axi_clk.c),
 	},
 };
@@ -3022,6 +3021,7 @@
 			.dbg_name = #i "_clk", \
 			.ops = &clk_ops_cdiv, \
 			CLK_INIT(i##_clk.c), \
+			.rate = ULONG_MAX, \
 		}, \
 	}
 
@@ -3095,6 +3095,7 @@
 		.ops = &clk_ops_rcg,
 		VDD_DIG_FMAX_MAP1(LOW, 24580000),
 		CLK_INIT(pcm_clk.c),
+		.rate = ULONG_MAX,
 	},
 };
 
@@ -3133,7 +3134,7 @@
 #ifdef CONFIG_DEBUG_FS
 struct measure_sel {
 	u32 test_vector;
-	struct clk *clk;
+	struct clk *c;
 };
 
 static struct measure_sel measure_mux[] = {
@@ -3308,12 +3309,12 @@
 	{ TEST_SC(0x42), &l2_m_clk },
 };
 
-static struct measure_sel *find_measure_sel(struct clk *clk)
+static struct measure_sel *find_measure_sel(struct clk *c)
 {
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(measure_mux); i++)
-		if (measure_mux[i].clk == clk)
+		if (measure_mux[i].c == c)
 			return &measure_mux[i];
 	return NULL;
 }
@@ -3323,7 +3324,7 @@
 	int ret = 0;
 	u32 clk_sel;
 	struct measure_sel *p;
-	struct measure_clk *clk = to_measure_clk(c);
+	struct measure_clk *measure = to_measure_clk(c);
 	unsigned long flags;
 
 	if (!parent)
@@ -3340,9 +3341,9 @@
 	 * and scaling factors (multiplier, divider).
 	 */
 	clk_sel = p->test_vector & TEST_CLK_SEL_MASK;
-	clk->sample_ticks = 0x10000;
-	clk->multiplier = 1;
-	clk->divider = 1;
+	measure->sample_ticks = 0x10000;
+	measure->multiplier = 1;
+	measure->divider = 1;
 	switch (p->test_vector >> TEST_TYPE_SHIFT) {
 	case TEST_TYPE_PER_LS:
 		writel_relaxed(0x4030D00|BVAL(7, 0, clk_sel), CLK_TEST_REG);
@@ -3355,7 +3356,7 @@
 		writel_relaxed(BVAL(6, 1, clk_sel)|BIT(0), DBG_CFG_REG_LS_REG);
 		break;
 	case TEST_TYPE_MM_HS2X:
-		clk->divider = 2;
+		measure->divider = 2;
 	case TEST_TYPE_MM_HS:
 		writel_relaxed(0x402B800, CLK_TEST_REG);
 		writel_relaxed(BVAL(6, 1, clk_sel)|BIT(0), DBG_CFG_REG_HS_REG);
@@ -3367,8 +3368,8 @@
 		break;
 	case TEST_TYPE_SC:
 		writel_relaxed(0x5020000|BVAL(16, 10, clk_sel), CLK_TEST_REG);
-		clk->sample_ticks = 0x4000;
-		clk->multiplier = 2;
+		measure->sample_ticks = 0x4000;
+		measure->multiplier = 2;
 		break;
 	default:
 		ret = -EPERM;
@@ -3410,7 +3411,7 @@
 	unsigned long flags;
 	u32 pdm_reg_backup, ringosc_reg_backup;
 	u64 raw_count_short, raw_count_full;
-	struct measure_clk *clk = to_measure_clk(c);
+	struct measure_clk *measure = to_measure_clk(c);
 	unsigned ret;
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
@@ -3431,7 +3432,7 @@
 	/* Run a short measurement. (~1 ms) */
 	raw_count_short = run_measurement(0x1000);
 	/* Run a full measurement. (~14 ms) */
-	raw_count_full = run_measurement(clk->sample_ticks);
+	raw_count_full = run_measurement(measure->sample_ticks);
 
 	writel_relaxed(ringosc_reg_backup, RINGOSC_NS_REG);
 	writel_relaxed(pdm_reg_backup, PDM_CLK_NS_REG);
@@ -3442,9 +3443,9 @@
 	else {
 		/* Compute rate in Hz. */
 		raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
-		do_div(raw_count_full,
-		       (((clk->sample_ticks * 10) + 35) * clk->divider));
-		ret = (raw_count_full * clk->multiplier);
+		do_div(raw_count_full, (((measure->sample_ticks * 10) + 35)
+			* measure->divider));
+		ret = (raw_count_full * measure->multiplier);
 	}
 
 	/* Route dbg_hs_clk to PLLTEST.  300mV single-ended amplitude. */
@@ -3454,12 +3455,12 @@
 	return ret;
 }
 #else /* !CONFIG_DEBUG_FS */
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
 {
 	return -EINVAL;
 }
 
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
 {
 	return 0;
 }
@@ -3722,6 +3723,8 @@
 	CLK_LOOKUP("vcodec_iommu0_clk", vcodec_axi_clk.c, "msm_vidc.0"),
 	CLK_LOOKUP("vcodec_iommu1_clk", vcodec_axi_clk.c, "msm_vidc.0"),
 	CLK_LOOKUP("smmu_iface_clk", smmu_p_clk.c,	"msm_vidc.0"),
+	CLK_LOOKUP("core_clk",		vcodec_axi_clk.c,  "pil_vidc"),
+	CLK_LOOKUP("smmu_iface_clk",	smmu_p_clk.c,  "pil_vidc"),
 
 	CLK_LOOKUP("dfab_dsps_clk",	dfab_dsps_clk.c, NULL),
 	CLK_LOOKUP("core_clk",		dfab_usb_hs_clk.c,	"msm_otg"),
diff --git a/arch/arm/mach-msm/clock-9615.c b/arch/arm/mach-msm/clock-9615.c
index a2e0bc9..f5ce5a7 100644
--- a/arch/arm/mach-msm/clock-9615.c
+++ b/arch/arm/mach-msm/clock-9615.c
@@ -216,33 +216,33 @@
 
 static DEFINE_SPINLOCK(soft_vote_lock);
 
-static int pll_acpu_vote_clk_enable(struct clk *clk)
+static int pll_acpu_vote_clk_enable(struct clk *c)
 {
 	int ret = 0;
 	unsigned long flags;
-	struct pll_vote_clk *pll = to_pll_vote_clk(clk);
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
 
 	spin_lock_irqsave(&soft_vote_lock, flags);
 
-	if (!*pll->soft_vote)
-		ret = pll_vote_clk_enable(clk);
+	if (!*pllv->soft_vote)
+		ret = pll_vote_clk_enable(c);
 	if (ret == 0)
-		*pll->soft_vote |= (pll->soft_vote_mask);
+		*pllv->soft_vote |= (pllv->soft_vote_mask);
 
 	spin_unlock_irqrestore(&soft_vote_lock, flags);
 	return ret;
 }
 
-static void pll_acpu_vote_clk_disable(struct clk *clk)
+static void pll_acpu_vote_clk_disable(struct clk *c)
 {
 	unsigned long flags;
-	struct pll_vote_clk *pll = to_pll_vote_clk(clk);
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
 
 	spin_lock_irqsave(&soft_vote_lock, flags);
 
-	*pll->soft_vote &= ~(pll->soft_vote_mask);
-	if (!*pll->soft_vote)
-		pll_vote_clk_disable(clk);
+	*pllv->soft_vote &= ~(pllv->soft_vote_mask);
+	if (!*pllv->soft_vote)
+		pll_vote_clk_disable(c);
 
 	spin_unlock_irqrestore(&soft_vote_lock, flags);
 }
@@ -250,7 +250,6 @@
 static struct clk_ops clk_ops_pll_acpu_vote = {
 	.enable = pll_acpu_vote_clk_enable,
 	.disable = pll_acpu_vote_clk_disable,
-	.auto_off = pll_acpu_vote_clk_disable,
 	.is_enabled = pll_vote_clk_is_enabled,
 	.get_parent = pll_vote_clk_get_parent,
 };
@@ -1175,6 +1174,7 @@
 			.dbg_name = #i "_clk", \
 			.ops = &clk_ops_cdiv, \
 			CLK_INIT(i##_clk.c), \
+			.rate = ULONG_MAX, \
 		}, \
 	}
 
@@ -1194,6 +1194,7 @@
 			.dbg_name = #i "_clk", \
 			.ops = &clk_ops_cdiv, \
 			CLK_INIT(i##_clk.c), \
+			.rate = ULONG_MAX, \
 		}, \
 	}
 
@@ -1267,6 +1268,7 @@
 		.ops = &clk_ops_rcg,
 		VDD_DIG_FMAX_MAP1(LOW, 24576000),
 		CLK_INIT(pcm_clk.c),
+		.rate = ULONG_MAX,
 	},
 };
 
@@ -1376,7 +1378,7 @@
 #ifdef CONFIG_DEBUG_FS
 struct measure_sel {
 	u32 test_vector;
-	struct clk *clk;
+	struct clk *c;
 };
 
 static DEFINE_CLK_MEASURE(q6sw_clk);
@@ -1447,12 +1449,12 @@
 	{ TEST_LPA_HS(0x00), &q6_func_clk },
 };
 
-static struct measure_sel *find_measure_sel(struct clk *clk)
+static struct measure_sel *find_measure_sel(struct clk *c)
 {
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(measure_mux); i++)
-		if (measure_mux[i].clk == clk)
+		if (measure_mux[i].c == c)
 			return &measure_mux[i];
 	return NULL;
 }
@@ -1462,7 +1464,7 @@
 	int ret = 0;
 	u32 clk_sel;
 	struct measure_sel *p;
-	struct measure_clk *clk = to_measure_clk(c);
+	struct measure_clk *measure = to_measure_clk(c);
 	unsigned long flags;
 
 	if (!parent)
@@ -1478,9 +1480,9 @@
 	 * Program the test vector, measurement period (sample_ticks)
 	 * and scaling multiplier.
 	 */
-	clk->sample_ticks = 0x10000;
+	measure->sample_ticks = 0x10000;
 	clk_sel = p->test_vector & TEST_CLK_SEL_MASK;
-	clk->multiplier = 1;
+	measure->multiplier = 1;
 	switch (p->test_vector >> TEST_TYPE_SHIFT) {
 	case TEST_TYPE_PER_LS:
 		writel_relaxed(0x4030D00|BVAL(7, 0, clk_sel), CLK_TEST_REG);
@@ -1539,7 +1541,7 @@
 	unsigned long flags;
 	u32 pdm_reg_backup, ringosc_reg_backup;
 	u64 raw_count_short, raw_count_full;
-	struct measure_clk *clk = to_measure_clk(c);
+	struct measure_clk *measure = to_measure_clk(c);
 	unsigned ret;
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
@@ -1560,7 +1562,7 @@
 	/* Run a short measurement. (~1 ms) */
 	raw_count_short = run_measurement(0x1000);
 	/* Run a full measurement. (~14 ms) */
-	raw_count_full = run_measurement(clk->sample_ticks);
+	raw_count_full = run_measurement(measure->sample_ticks);
 
 	writel_relaxed(ringosc_reg_backup, RINGOSC_NS_REG);
 	writel_relaxed(pdm_reg_backup, PDM_CLK_NS_REG);
@@ -1571,8 +1573,8 @@
 	else {
 		/* Compute rate in Hz. */
 		raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
-		do_div(raw_count_full, ((clk->sample_ticks * 10) + 35));
-		ret = (raw_count_full * clk->multiplier);
+		do_div(raw_count_full, ((measure->sample_ticks * 10) + 35));
+		ret = (raw_count_full * measure->multiplier);
 	}
 
 	/* Route dbg_hs_clk to PLLTEST.  300mV single-ended amplitude. */
@@ -1582,12 +1584,12 @@
 	return ret;
 }
 #else /* !CONFIG_DEBUG_FS */
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
 {
 	return -EINVAL;
 }
 
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
 {
 	return 0;
 }
diff --git a/arch/arm/mach-msm/clock-debug.c b/arch/arm/mach-msm/clock-debug.c
index e8c3e05..7263512 100644
--- a/arch/arm/mach-msm/clock-debug.c
+++ b/arch/arm/mach-msm/clock-debug.c
@@ -169,16 +169,23 @@
 
 static int clock_debug_print_clock(struct clk *c)
 {
-	size_t ln = 0;
-	char s[128];
+	char *start = "";
 
 	if (!c || !c->count)
 		return 0;
 
-	ln += snprintf(s, sizeof(s), "\t%s", c->dbg_name);
-	while (ln < sizeof(s) && (c = clk_get_parent(c)))
-		ln += snprintf(s + ln, sizeof(s) - ln, " -> %s", c->dbg_name);
-	pr_info("%s\n", s);
+	pr_info("\t");
+	do {
+		if (c->vdd_class)
+			pr_cont("%s%s [%ld, %lu]", start, c->dbg_name, c->rate,
+				c->vdd_class->cur_level);
+		else
+			pr_cont("%s%s [%ld]", start, c->dbg_name, c->rate);
+		start = " -> ";
+	} while ((c = clk_get_parent(c)));
+
+	pr_cont("\n");
+
 	return 1;
 }
 
diff --git a/arch/arm/mach-msm/clock-dss-8960.c b/arch/arm/mach-msm/clock-dss-8960.c
index 7f3646f..d9ad103 100644
--- a/arch/arm/mach-msm/clock-dss-8960.c
+++ b/arch/arm/mach-msm/clock-dss-8960.c
@@ -98,6 +98,7 @@
 	unsigned int val;
 	u32 ahb_en_reg, ahb_enabled;
 	unsigned int timeout_count;
+	int pll_lock_retry = 10;
 
 	ahb_en_reg = readl_relaxed(AHB_EN_REG);
 	ahb_enabled = ahb_en_reg & BIT(4);
@@ -149,7 +150,7 @@
 
 	timeout_count = 1000;
 	while (!(readl_relaxed(HDMI_PHY_PLL_STATUS0) & BIT(0)) &&
-			timeout_count) {
+			timeout_count && pll_lock_retry) {
 		if (--timeout_count == 0) {
 			/*
 			 * PLL has still not locked.
@@ -166,16 +167,18 @@
 			udelay(10);
 			writel_relaxed(0x0D, HDMI_PHY_PLL_LOCKDET_CFG2);
 			timeout_count = 1000;
-
-			pr_err("%s: PLL not locked after %d iterations\n",
-				__func__, timeout_count);
-			pr_err("%s: Asserting PLL S/W reset & trying again\n",
-				__func__);
+			pll_lock_retry--;
 		}
 	}
 
 	if (!ahb_enabled)
 		writel_relaxed(ahb_en_reg & ~BIT(4), AHB_EN_REG);
+
+	if (!pll_lock_retry) {
+		pr_err("%s: HDMI PLL not locked\n", __func__);
+		return -EAGAIN;
+	}
+
 	hdmi_pll_on = 1;
 	return 0;
 }
@@ -233,24 +236,19 @@
 	switch (rate) {
 	case 27030000:
 		/* 480p60/480i60 case */
-		writel_relaxed(0x32, HDMI_PHY_PLL_REFCLK_CFG);
+		writel_relaxed(0xA, HDMI_PHY_PLL_PWRDN_B);
+		writel_relaxed(0x38, HDMI_PHY_PLL_REFCLK_CFG);
 		writel_relaxed(0x2, HDMI_PHY_PLL_CHRG_PUMP_CFG);
-		writel_relaxed(0x08, HDMI_PHY_PLL_LOOP_FLT_CFG0);
-		writel_relaxed(0x77, HDMI_PHY_PLL_LOOP_FLT_CFG1);
-		writel_relaxed(0x2C, HDMI_PHY_PLL_IDAC_ADJ_CFG);
-		writel_relaxed(0x6, HDMI_PHY_PLL_I_VI_KVCO_CFG);
-		writel_relaxed(0x7b, HDMI_PHY_PLL_SDM_CFG0);
-		writel_relaxed(0x01, HDMI_PHY_PLL_SDM_CFG1);
-		writel_relaxed(0x4C, HDMI_PHY_PLL_SDM_CFG2);
-		writel_relaxed(0xC0, HDMI_PHY_PLL_SDM_CFG3);
+		writel_relaxed(0x20, HDMI_PHY_PLL_LOOP_FLT_CFG0);
+		writel_relaxed(0xFF, HDMI_PHY_PLL_LOOP_FLT_CFG1);
+		writel_relaxed(0x00, HDMI_PHY_PLL_SDM_CFG0);
+		writel_relaxed(0x4E, HDMI_PHY_PLL_SDM_CFG1);
+		writel_relaxed(0xD7, HDMI_PHY_PLL_SDM_CFG2);
+		writel_relaxed(0x03, HDMI_PHY_PLL_SDM_CFG3);
 		writel_relaxed(0x00, HDMI_PHY_PLL_SDM_CFG4);
-		writel_relaxed(0x9A, HDMI_PHY_PLL_SSC_CFG0);
-		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG1);
-		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG2);
-		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG3);
 		writel_relaxed(0x2A, HDMI_PHY_PLL_VCOCAL_CFG0);
 		writel_relaxed(0x03, HDMI_PHY_PLL_VCOCAL_CFG1);
-		writel_relaxed(0x2B, HDMI_PHY_PLL_VCOCAL_CFG2);
+		writel_relaxed(0x3B, HDMI_PHY_PLL_VCOCAL_CFG2);
 		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG3);
 		writel_relaxed(0x86, HDMI_PHY_PLL_VCOCAL_CFG4);
 		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG5);
@@ -266,6 +264,7 @@
 		writel_relaxed(0x33, HDMI_PHY_PLL_LOOP_FLT_CFG1);
 		writel_relaxed(0x2C, HDMI_PHY_PLL_IDAC_ADJ_CFG);
 		writel_relaxed(0x6, HDMI_PHY_PLL_I_VI_KVCO_CFG);
+		writel_relaxed(0xA, HDMI_PHY_PLL_PWRDN_B);
 		writel_relaxed(0x77, HDMI_PHY_PLL_SDM_CFG0);
 		writel_relaxed(0x4C, HDMI_PHY_PLL_SDM_CFG1);
 		writel_relaxed(0x00, HDMI_PHY_PLL_SDM_CFG2);
@@ -275,9 +274,12 @@
 		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG1);
 		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG2);
 		writel_relaxed(0x20, HDMI_PHY_PLL_SSC_CFG3);
+		writel_relaxed(0x10, HDMI_PHY_PLL_LOCKDET_CFG0);
+		writel_relaxed(0x1A, HDMI_PHY_PLL_LOCKDET_CFG1);
+		writel_relaxed(0x0D, HDMI_PHY_PLL_LOCKDET_CFG2);
 		writel_relaxed(0xF4, HDMI_PHY_PLL_VCOCAL_CFG0);
 		writel_relaxed(0x02, HDMI_PHY_PLL_VCOCAL_CFG1);
-		writel_relaxed(0x2B, HDMI_PHY_PLL_VCOCAL_CFG2);
+		writel_relaxed(0x3B, HDMI_PHY_PLL_VCOCAL_CFG2);
 		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG3);
 		writel_relaxed(0x86, HDMI_PHY_PLL_VCOCAL_CFG4);
 		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG5);
@@ -293,6 +295,7 @@
 		writel_relaxed(0x33, HDMI_PHY_PLL_LOOP_FLT_CFG1);
 		writel_relaxed(0x2C, HDMI_PHY_PLL_IDAC_ADJ_CFG);
 		writel_relaxed(0x6, HDMI_PHY_PLL_I_VI_KVCO_CFG);
+		writel_relaxed(0xA, HDMI_PHY_PLL_PWRDN_B);
 		writel_relaxed(0x7B, HDMI_PHY_PLL_SDM_CFG0);
 		writel_relaxed(0x01, HDMI_PHY_PLL_SDM_CFG1);
 		writel_relaxed(0x4C, HDMI_PHY_PLL_SDM_CFG2);
@@ -302,9 +305,12 @@
 		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG1);
 		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG2);
 		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG3);
+		writel_relaxed(0x10, HDMI_PHY_PLL_LOCKDET_CFG0);
+		writel_relaxed(0x1A, HDMI_PHY_PLL_LOCKDET_CFG1);
+		writel_relaxed(0x0D, HDMI_PHY_PLL_LOCKDET_CFG2);
 		writel_relaxed(0x2a, HDMI_PHY_PLL_VCOCAL_CFG0);
 		writel_relaxed(0x03, HDMI_PHY_PLL_VCOCAL_CFG1);
-		writel_relaxed(0x2B, HDMI_PHY_PLL_VCOCAL_CFG2);
+		writel_relaxed(0x3B, HDMI_PHY_PLL_VCOCAL_CFG2);
 		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG3);
 		writel_relaxed(0x86, HDMI_PHY_PLL_VCOCAL_CFG4);
 		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG5);
@@ -316,12 +322,14 @@
 		/* 720p60/720p50/1080i60/1080i50
 		 * 1080p24/1080p30/1080p25 case
 		 */
+		writel_relaxed(0xA, HDMI_PHY_PLL_PWRDN_B);
 		writel_relaxed(0x12, HDMI_PHY_PLL_REFCLK_CFG);
 		writel_relaxed(0x01, HDMI_PHY_PLL_LOOP_FLT_CFG0);
 		writel_relaxed(0x33, HDMI_PHY_PLL_LOOP_FLT_CFG1);
 		writel_relaxed(0x76, HDMI_PHY_PLL_SDM_CFG0);
 		writel_relaxed(0xE6, HDMI_PHY_PLL_VCOCAL_CFG0);
 		writel_relaxed(0x02, HDMI_PHY_PLL_VCOCAL_CFG1);
+		writel_relaxed(0x3B, HDMI_PHY_PLL_VCOCAL_CFG2);
 	break;
 
 	case 148500000:
@@ -332,6 +340,7 @@
 		writel_relaxed(0x33, HDMI_PHY_PLL_LOOP_FLT_CFG1);
 		writel_relaxed(0x2C, HDMI_PHY_PLL_IDAC_ADJ_CFG);
 		writel_relaxed(0x6, HDMI_PHY_PLL_I_VI_KVCO_CFG);
+		writel_relaxed(0xA, HDMI_PHY_PLL_PWRDN_B);
 		writel_relaxed(0x76, HDMI_PHY_PLL_SDM_CFG0);
 		writel_relaxed(0x01, HDMI_PHY_PLL_SDM_CFG1);
 		writel_relaxed(0x4C, HDMI_PHY_PLL_SDM_CFG2);
@@ -341,9 +350,12 @@
 		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG1);
 		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG2);
 		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG3);
+		writel_relaxed(0x10, HDMI_PHY_PLL_LOCKDET_CFG0);
+		writel_relaxed(0x1A, HDMI_PHY_PLL_LOCKDET_CFG1);
+		writel_relaxed(0x0D, HDMI_PHY_PLL_LOCKDET_CFG2);
 		writel_relaxed(0xe6, HDMI_PHY_PLL_VCOCAL_CFG0);
 		writel_relaxed(0x02, HDMI_PHY_PLL_VCOCAL_CFG1);
-		writel_relaxed(0x2B, HDMI_PHY_PLL_VCOCAL_CFG2);
+		writel_relaxed(0x3B, HDMI_PHY_PLL_VCOCAL_CFG2);
 		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG3);
 		writel_relaxed(0x86, HDMI_PHY_PLL_VCOCAL_CFG4);
 		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG5);
diff --git a/arch/arm/mach-msm/clock-fsm9xxx.c b/arch/arm/mach-msm/clock-fsm9xxx.c
index 13a5b65..c188ba6 100644
--- a/arch/arm/mach-msm/clock-fsm9xxx.c
+++ b/arch/arm/mach-msm/clock-fsm9xxx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
 static struct clk_lookup msm_clocks_fsm9xxx[] = {
 	CLK_DUMMY("core_clk",	ADM0_CLK,	"msm_dmov", OFF),
 	CLK_DUMMY("core_clk",	UART1_CLK,	"msm_serial.0", OFF),
+	CLK_DUMMY("core_clk",	UART3_CLK,	"msm_uim.2", OFF),
 	CLK_DUMMY("core_clk",	CE_CLK,		"qce.0", OFF),
 	CLK_DUMMY("core_clk",	CE_CLK,		"qcota.0", OFF),
 	CLK_DUMMY("core_clk",	CE_CLK,		"qcrypto.0", OFF),
diff --git a/arch/arm/mach-msm/clock-local.c b/arch/arm/mach-msm/clock-local.c
index b5ae4ab..ca913dc 100644
--- a/arch/arm/mach-msm/clock-local.c
+++ b/arch/arm/mach-msm/clock-local.c
@@ -53,32 +53,32 @@
  */
 
 /* For clocks with MND dividers. */
-void set_rate_mnd(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
 {
 	uint32_t ns_reg_val, ctl_reg_val;
 
 	/* Assert MND reset. */
-	ns_reg_val = readl_relaxed(clk->ns_reg);
+	ns_reg_val = readl_relaxed(rcg->ns_reg);
 	ns_reg_val |= BIT(7);
-	writel_relaxed(ns_reg_val, clk->ns_reg);
+	writel_relaxed(ns_reg_val, rcg->ns_reg);
 
 	/* Program M and D values. */
-	writel_relaxed(nf->md_val, clk->md_reg);
+	writel_relaxed(nf->md_val, rcg->md_reg);
 
 	/* If the clock has a separate CC register, program it. */
-	if (clk->ns_reg != clk->b.ctl_reg) {
-		ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
-		ctl_reg_val &= ~(clk->ctl_mask);
+	if (rcg->ns_reg != rcg->b.ctl_reg) {
+		ctl_reg_val = readl_relaxed(rcg->b.ctl_reg);
+		ctl_reg_val &= ~(rcg->ctl_mask);
 		ctl_reg_val |= nf->ctl_val;
-		writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+		writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
 	}
 
 	/* Deassert MND reset. */
 	ns_reg_val &= ~BIT(7);
-	writel_relaxed(ns_reg_val, clk->ns_reg);
+	writel_relaxed(ns_reg_val, rcg->ns_reg);
 }
 
-void set_rate_nop(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_nop(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
 {
 	/*
 	 * Nothing to do for fixed-rate or integer-divider clocks. Any settings
@@ -88,31 +88,31 @@
 	 */
 }
 
-void set_rate_mnd_8(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_mnd_8(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
 {
 	uint32_t ctl_reg_val;
 
 	/* Assert MND reset. */
-	ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
+	ctl_reg_val = readl_relaxed(rcg->b.ctl_reg);
 	ctl_reg_val |= BIT(8);
-	writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+	writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
 
 	/* Program M and D values. */
-	writel_relaxed(nf->md_val, clk->md_reg);
+	writel_relaxed(nf->md_val, rcg->md_reg);
 
 	/* Program MN counter Enable and Mode. */
-	ctl_reg_val &= ~(clk->ctl_mask);
+	ctl_reg_val &= ~(rcg->ctl_mask);
 	ctl_reg_val |= nf->ctl_val;
-	writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+	writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
 
 	/* Deassert MND reset. */
 	ctl_reg_val &= ~BIT(8);
-	writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+	writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
 }
 
-void set_rate_mnd_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_mnd_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
 {
-	struct bank_masks *banks = clk->bank_info;
+	struct bank_masks *banks = rcg->bank_info;
 	const struct bank_mask_info *new_bank_masks;
 	const struct bank_mask_info *old_bank_masks;
 	uint32_t ns_reg_val, ctl_reg_val;
@@ -123,10 +123,10 @@
 	 * off, program the active bank since bank switching won't work if
 	 * both banks aren't running.
 	 */
-	ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
+	ctl_reg_val = readl_relaxed(rcg->b.ctl_reg);
 	bank_sel = !!(ctl_reg_val & banks->bank_sel_mask);
 	 /* If clock isn't running, don't switch banks. */
-	bank_sel ^= (!clk->enabled || clk->current_freq->freq_hz == 0);
+	bank_sel ^= (!rcg->enabled || rcg->current_freq->freq_hz == 0);
 	if (bank_sel == 0) {
 		new_bank_masks = &banks->bank1_mask;
 		old_bank_masks = &banks->bank0_mask;
@@ -135,46 +135,46 @@
 		old_bank_masks = &banks->bank1_mask;
 	}
 
-	ns_reg_val = readl_relaxed(clk->ns_reg);
+	ns_reg_val = readl_relaxed(rcg->ns_reg);
 
 	/* Assert bank MND reset. */
 	ns_reg_val |= new_bank_masks->rst_mask;
-	writel_relaxed(ns_reg_val, clk->ns_reg);
+	writel_relaxed(ns_reg_val, rcg->ns_reg);
 
 	/*
 	 * Program NS only if the clock is enabled, since the NS will be set
 	 * as part of the enable procedure and should remain with a low-power
 	 * MUX input selected until then.
 	 */
-	if (clk->enabled) {
+	if (rcg->enabled) {
 		ns_reg_val &= ~(new_bank_masks->ns_mask);
 		ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask);
-		writel_relaxed(ns_reg_val, clk->ns_reg);
+		writel_relaxed(ns_reg_val, rcg->ns_reg);
 	}
 
 	writel_relaxed(nf->md_val, new_bank_masks->md_reg);
 
 	/* Enable counter only if clock is enabled. */
-	if (clk->enabled)
+	if (rcg->enabled)
 		ctl_reg_val |= new_bank_masks->mnd_en_mask;
 	else
 		ctl_reg_val &= ~(new_bank_masks->mnd_en_mask);
 
 	ctl_reg_val &= ~(new_bank_masks->mode_mask);
 	ctl_reg_val |= (nf->ctl_val & new_bank_masks->mode_mask);
-	writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+	writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
 
 	/* Deassert bank MND reset. */
 	ns_reg_val &= ~(new_bank_masks->rst_mask);
-	writel_relaxed(ns_reg_val, clk->ns_reg);
+	writel_relaxed(ns_reg_val, rcg->ns_reg);
 
 	/*
 	 * Switch to the new bank if clock is running.  If it isn't, then
 	 * no switch is necessary since we programmed the active bank.
 	 */
-	if (clk->enabled && clk->current_freq->freq_hz) {
+	if (rcg->enabled && rcg->current_freq->freq_hz) {
 		ctl_reg_val ^= banks->bank_sel_mask;
-		writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+		writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
 		/*
 		 * Wait at least 6 cycles of slowest bank's clock
 		 * for the glitch-free MUX to fully switch sources.
@@ -184,22 +184,22 @@
 
 		/* Disable old bank's MN counter. */
 		ctl_reg_val &= ~(old_bank_masks->mnd_en_mask);
-		writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+		writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
 
 		/* Program old bank to a low-power source and divider. */
 		ns_reg_val &= ~(old_bank_masks->ns_mask);
-		ns_reg_val |= (clk->freq_tbl->ns_val & old_bank_masks->ns_mask);
-		writel_relaxed(ns_reg_val, clk->ns_reg);
+		ns_reg_val |= (rcg->freq_tbl->ns_val & old_bank_masks->ns_mask);
+		writel_relaxed(ns_reg_val, rcg->ns_reg);
 	}
 
 	/* Update the MND_EN and NS masks to match the current bank. */
-	clk->mnd_en_mask = new_bank_masks->mnd_en_mask;
-	clk->ns_mask = new_bank_masks->ns_mask;
+	rcg->mnd_en_mask = new_bank_masks->mnd_en_mask;
+	rcg->ns_mask = new_bank_masks->ns_mask;
 }
 
-void set_rate_div_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_div_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
 {
-	struct bank_masks *banks = clk->bank_info;
+	struct bank_masks *banks = rcg->bank_info;
 	const struct bank_mask_info *new_bank_masks;
 	const struct bank_mask_info *old_bank_masks;
 	uint32_t ns_reg_val, bank_sel;
@@ -209,10 +209,10 @@
 	 * off, program the active bank since bank switching won't work if
 	 * both banks aren't running.
 	 */
-	ns_reg_val = readl_relaxed(clk->ns_reg);
+	ns_reg_val = readl_relaxed(rcg->ns_reg);
 	bank_sel = !!(ns_reg_val & banks->bank_sel_mask);
 	 /* If clock isn't running, don't switch banks. */
-	bank_sel ^= (!clk->enabled || clk->current_freq->freq_hz == 0);
+	bank_sel ^= (!rcg->enabled || rcg->current_freq->freq_hz == 0);
 	if (bank_sel == 0) {
 		new_bank_masks = &banks->bank1_mask;
 		old_bank_masks = &banks->bank0_mask;
@@ -226,19 +226,19 @@
 	 * as part of the enable procedure and should remain with a low-power
 	 * MUX input selected until then.
 	 */
-	if (clk->enabled) {
+	if (rcg->enabled) {
 		ns_reg_val &= ~(new_bank_masks->ns_mask);
 		ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask);
-		writel_relaxed(ns_reg_val, clk->ns_reg);
+		writel_relaxed(ns_reg_val, rcg->ns_reg);
 	}
 
 	/*
 	 * Switch to the new bank if clock is running.  If it isn't, then
 	 * no switch is necessary since we programmed the active bank.
 	 */
-	if (clk->enabled && clk->current_freq->freq_hz) {
+	if (rcg->enabled && rcg->current_freq->freq_hz) {
 		ns_reg_val ^= banks->bank_sel_mask;
-		writel_relaxed(ns_reg_val, clk->ns_reg);
+		writel_relaxed(ns_reg_val, rcg->ns_reg);
 		/*
 		 * Wait at least 6 cycles of slowest bank's clock
 		 * for the glitch-free MUX to fully switch sources.
@@ -248,12 +248,12 @@
 
 		/* Program old bank to a low-power source and divider. */
 		ns_reg_val &= ~(old_bank_masks->ns_mask);
-		ns_reg_val |= (clk->freq_tbl->ns_val & old_bank_masks->ns_mask);
-		writel_relaxed(ns_reg_val, clk->ns_reg);
+		ns_reg_val |= (rcg->freq_tbl->ns_val & old_bank_masks->ns_mask);
+		writel_relaxed(ns_reg_val, rcg->ns_reg);
 	}
 
 	/* Update the NS mask to match the current bank. */
-	clk->ns_mask = new_bank_masks->ns_mask;
+	rcg->ns_mask = new_bank_masks->ns_mask;
 }
 
 /*
@@ -261,10 +261,10 @@
  */
 
 /* Return non-zero if a clock status registers shows the clock is halted. */
-static int branch_clk_is_halted(const struct branch *clk)
+static int branch_clk_is_halted(const struct branch *b)
 {
-	int invert = (clk->halt_check == ENABLE);
-	int status_bit = readl_relaxed(clk->halt_reg) & BIT(clk->halt_bit);
+	int invert = (b->halt_check == ENABLE);
+	int status_bit = readl_relaxed(b->halt_reg) & BIT(b->halt_bit);
 	return invert ? !status_bit : status_bit;
 }
 
@@ -276,14 +276,14 @@
 	return !!(readl_relaxed(b->hwcg_reg) & b->hwcg_mask);
 }
 
-void __branch_clk_enable_reg(const struct branch *clk, const char *name)
+void __branch_enable_reg(const struct branch *b, const char *name)
 {
 	u32 reg_val;
 
-	if (clk->en_mask) {
-		reg_val = readl_relaxed(clk->ctl_reg);
-		reg_val |= clk->en_mask;
-		writel_relaxed(reg_val, clk->ctl_reg);
+	if (b->en_mask) {
+		reg_val = readl_relaxed(b->ctl_reg);
+		reg_val |= b->en_mask;
+		writel_relaxed(reg_val, b->ctl_reg);
 	}
 
 	/*
@@ -295,19 +295,19 @@
 	mb();
 
 	/* Skip checking halt bit if the clock is in hardware gated mode */
-	if (branch_in_hwcg_mode(clk))
+	if (branch_in_hwcg_mode(b))
 		return;
 
 	/* Wait for clock to enable before returning. */
-	if (clk->halt_check == DELAY)
+	if (b->halt_check == DELAY) {
 		udelay(HALT_CHECK_DELAY_US);
-	else if (clk->halt_check == ENABLE || clk->halt_check == HALT
-			|| clk->halt_check == ENABLE_VOTED
-			|| clk->halt_check == HALT_VOTED) {
+	} else if (b->halt_check == ENABLE || b->halt_check == HALT
+			|| b->halt_check == ENABLE_VOTED
+			|| b->halt_check == HALT_VOTED) {
 		int count;
 
 		/* Wait up to HALT_CHECK_MAX_LOOPS for clock to enable. */
-		for (count = HALT_CHECK_MAX_LOOPS; branch_clk_is_halted(clk)
+		for (count = HALT_CHECK_MAX_LOOPS; branch_clk_is_halted(b)
 					&& count > 0; count--)
 			udelay(1);
 		WARN(count == 0, "%s status stuck at 'off'", name);
@@ -315,50 +315,50 @@
 }
 
 /* Perform any register operations required to enable the clock. */
-static void __rcg_clk_enable_reg(struct rcg_clk *clk)
+static void __rcg_clk_enable_reg(struct rcg_clk *rcg)
 {
 	u32 reg_val;
-	void __iomem *const reg = clk->b.ctl_reg;
+	void __iomem *const reg = rcg->b.ctl_reg;
 
-	WARN(clk->current_freq == &rcg_dummy_freq,
+	WARN(rcg->current_freq == &rcg_dummy_freq,
 		"Attempting to enable %s before setting its rate. "
-		"Set the rate first!\n", clk->c.dbg_name);
+		"Set the rate first!\n", rcg->c.dbg_name);
 
 	/*
 	 * Program the NS register, if applicable. NS registers are not
 	 * set in the set_rate path because power can be saved by deferring
 	 * the selection of a clocked source until the clock is enabled.
 	 */
-	if (clk->ns_mask) {
-		reg_val = readl_relaxed(clk->ns_reg);
-		reg_val &= ~(clk->ns_mask);
-		reg_val |= (clk->current_freq->ns_val & clk->ns_mask);
-		writel_relaxed(reg_val, clk->ns_reg);
+	if (rcg->ns_mask) {
+		reg_val = readl_relaxed(rcg->ns_reg);
+		reg_val &= ~(rcg->ns_mask);
+		reg_val |= (rcg->current_freq->ns_val & rcg->ns_mask);
+		writel_relaxed(reg_val, rcg->ns_reg);
 	}
 
 	/* Enable MN counter, if applicable. */
 	reg_val = readl_relaxed(reg);
-	if (clk->current_freq->md_val) {
-		reg_val |= clk->mnd_en_mask;
+	if (rcg->current_freq->md_val) {
+		reg_val |= rcg->mnd_en_mask;
 		writel_relaxed(reg_val, reg);
 	}
 	/* Enable root. */
-	if (clk->root_en_mask) {
-		reg_val |= clk->root_en_mask;
+	if (rcg->root_en_mask) {
+		reg_val |= rcg->root_en_mask;
 		writel_relaxed(reg_val, reg);
 	}
-	__branch_clk_enable_reg(&clk->b, clk->c.dbg_name);
+	__branch_enable_reg(&rcg->b, rcg->c.dbg_name);
 }
 
 /* Perform any register operations required to disable the branch. */
-u32 __branch_clk_disable_reg(const struct branch *clk, const char *name)
+u32 __branch_disable_reg(const struct branch *b, const char *name)
 {
 	u32 reg_val;
 
-	reg_val = readl_relaxed(clk->ctl_reg);
-	if (clk->en_mask) {
-		reg_val &= ~(clk->en_mask);
-		writel_relaxed(reg_val, clk->ctl_reg);
+	reg_val = readl_relaxed(b->ctl_reg);
+	if (b->en_mask) {
+		reg_val &= ~(b->en_mask);
+		writel_relaxed(reg_val, b->ctl_reg);
 	}
 
 	/*
@@ -370,18 +370,18 @@
 	mb();
 
 	/* Skip checking halt bit if the clock is in hardware gated mode */
-	if (branch_in_hwcg_mode(clk))
+	if (branch_in_hwcg_mode(b))
 		return reg_val;
 
 	/* Wait for clock to disable before continuing. */
-	if (clk->halt_check == DELAY || clk->halt_check == ENABLE_VOTED
-				     || clk->halt_check == HALT_VOTED)
+	if (b->halt_check == DELAY || b->halt_check == ENABLE_VOTED
+				   || b->halt_check == HALT_VOTED) {
 		udelay(HALT_CHECK_DELAY_US);
-	else if (clk->halt_check == ENABLE || clk->halt_check == HALT) {
+	} else if (b->halt_check == ENABLE || b->halt_check == HALT) {
 		int count;
 
 		/* Wait up to HALT_CHECK_MAX_LOOPS for clock to disable. */
-		for (count = HALT_CHECK_MAX_LOOPS; !branch_clk_is_halted(clk)
+		for (count = HALT_CHECK_MAX_LOOPS; !branch_clk_is_halted(b)
 					&& count > 0; count--)
 			udelay(1);
 		WARN(count == 0, "%s status stuck at 'on'", name);
@@ -391,31 +391,31 @@
 }
 
 /* Perform any register operations required to disable the generator. */
-static void __rcg_clk_disable_reg(struct rcg_clk *clk)
+static void __rcg_clk_disable_reg(struct rcg_clk *rcg)
 {
-	void __iomem *const reg = clk->b.ctl_reg;
+	void __iomem *const reg = rcg->b.ctl_reg;
 	uint32_t reg_val;
 
-	reg_val = __branch_clk_disable_reg(&clk->b, clk->c.dbg_name);
+	reg_val = __branch_disable_reg(&rcg->b, rcg->c.dbg_name);
 	/* Disable root. */
-	if (clk->root_en_mask) {
-		reg_val &= ~(clk->root_en_mask);
+	if (rcg->root_en_mask) {
+		reg_val &= ~(rcg->root_en_mask);
 		writel_relaxed(reg_val, reg);
 	}
 	/* Disable MN counter, if applicable. */
-	if (clk->current_freq->md_val) {
-		reg_val &= ~(clk->mnd_en_mask);
+	if (rcg->current_freq->md_val) {
+		reg_val &= ~(rcg->mnd_en_mask);
 		writel_relaxed(reg_val, reg);
 	}
 	/*
 	 * Program NS register to low-power value with an un-clocked or
 	 * slowly-clocked source selected.
 	 */
-	if (clk->ns_mask) {
-		reg_val = readl_relaxed(clk->ns_reg);
-		reg_val &= ~(clk->ns_mask);
-		reg_val |= (clk->freq_tbl->ns_val & clk->ns_mask);
-		writel_relaxed(reg_val, clk->ns_reg);
+	if (rcg->ns_mask) {
+		reg_val = readl_relaxed(rcg->ns_reg);
+		reg_val &= ~(rcg->ns_mask);
+		reg_val |= (rcg->freq_tbl->ns_val & rcg->ns_mask);
+		writel_relaxed(reg_val, rcg->ns_reg);
 	}
 }
 
@@ -423,11 +423,11 @@
 static int rcg_clk_enable(struct clk *c)
 {
 	unsigned long flags;
-	struct rcg_clk *clk = to_rcg_clk(c);
+	struct rcg_clk *rcg = to_rcg_clk(c);
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
-	__rcg_clk_enable_reg(clk);
-	clk->enabled = true;
+	__rcg_clk_enable_reg(rcg);
+	rcg->enabled = true;
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 
 	return 0;
@@ -437,11 +437,11 @@
 static void rcg_clk_disable(struct clk *c)
 {
 	unsigned long flags;
-	struct rcg_clk *clk = to_rcg_clk(c);
+	struct rcg_clk *rcg = to_rcg_clk(c);
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
-	__rcg_clk_disable_reg(clk);
-	clk->enabled = false;
+	__rcg_clk_disable_reg(rcg);
+	rcg->enabled = false;
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 }
 
@@ -452,21 +452,21 @@
 /* Set a clock to an exact rate. */
 static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
 {
-	struct rcg_clk *clk = to_rcg_clk(c);
+	struct rcg_clk *rcg = to_rcg_clk(c);
 	struct clk_freq_tbl *nf, *cf;
 	struct clk *chld;
 	int rc = 0;
 
-	for (nf = clk->freq_tbl; nf->freq_hz != FREQ_END
+	for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
 			&& nf->freq_hz != rate; nf++)
 		;
 
 	if (nf->freq_hz == FREQ_END)
 		return -EINVAL;
 
-	cf = clk->current_freq;
+	cf = rcg->current_freq;
 
-	if (clk->enabled) {
+	if (rcg->enabled) {
 		/* Enable source clock dependency for the new freq. */
 		rc = clk_enable(nf->src_clk);
 		if (rc)
@@ -476,9 +476,9 @@
 	spin_lock(&local_clock_reg_lock);
 
 	/* Disable branch if clock isn't dual-banked with a glitch-free MUX. */
-	if (!clk->bank_info) {
+	if (!rcg->bank_info) {
 		/* Disable all branches to prevent glitches. */
-		list_for_each_entry(chld, &clk->c.children, siblings) {
+		list_for_each_entry(chld, &rcg->c.children, siblings) {
 			struct branch_clk *x = to_branch_clk(chld);
 			/*
 			 * We don't need to grab the child's lock because
@@ -486,56 +486,56 @@
 			 * only modified within lock.
 			 */
 			if (x->enabled)
-				__branch_clk_disable_reg(&x->b, x->c.dbg_name);
+				__branch_disable_reg(&x->b, x->c.dbg_name);
 		}
-		if (clk->enabled)
-			__rcg_clk_disable_reg(clk);
+		if (rcg->enabled)
+			__rcg_clk_disable_reg(rcg);
 	}
 
 	/* Perform clock-specific frequency switch operations. */
-	BUG_ON(!clk->set_rate);
-	clk->set_rate(clk, nf);
+	BUG_ON(!rcg->set_rate);
+	rcg->set_rate(rcg, nf);
 
 	/*
 	 * Current freq must be updated before __rcg_clk_enable_reg()
 	 * is called to make sure the MNCNTR_EN bit is set correctly.
 	 */
-	clk->current_freq = nf;
+	rcg->current_freq = nf;
 
 	/* Enable any clocks that were disabled. */
-	if (!clk->bank_info) {
-		if (clk->enabled)
-			__rcg_clk_enable_reg(clk);
+	if (!rcg->bank_info) {
+		if (rcg->enabled)
+			__rcg_clk_enable_reg(rcg);
 		/* Enable only branches that were ON before. */
-		list_for_each_entry(chld, &clk->c.children, siblings) {
+		list_for_each_entry(chld, &rcg->c.children, siblings) {
 			struct branch_clk *x = to_branch_clk(chld);
 			if (x->enabled)
-				__branch_clk_enable_reg(&x->b, x->c.dbg_name);
+				__branch_enable_reg(&x->b, x->c.dbg_name);
 		}
 	}
 
 	spin_unlock(&local_clock_reg_lock);
 
 	/* Release source requirements of the old freq. */
-	if (clk->enabled)
+	if (rcg->enabled)
 		clk_disable(cf->src_clk);
 
 	return rc;
 }
 
 /* Check if a clock is currently enabled. */
-static int rcg_clk_is_enabled(struct clk *clk)
+static int rcg_clk_is_enabled(struct clk *c)
 {
-	return to_rcg_clk(clk)->enabled;
+	return to_rcg_clk(c)->enabled;
 }
 
 /* Return a supported rate that's at least the specified rate. */
 static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
 {
-	struct rcg_clk *clk = to_rcg_clk(c);
+	struct rcg_clk *rcg = to_rcg_clk(c);
 	struct clk_freq_tbl *f;
 
-	for (f = clk->freq_tbl; f->freq_hz != FREQ_END; f++)
+	for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
 		if (f->freq_hz >= rate)
 			return f->freq_hz;
 
@@ -545,26 +545,26 @@
 /* Return the nth supported frequency for a given clock. */
 static int rcg_clk_list_rate(struct clk *c, unsigned n)
 {
-	struct rcg_clk *clk = to_rcg_clk(c);
+	struct rcg_clk *rcg = to_rcg_clk(c);
 
-	if (!clk->freq_tbl || clk->freq_tbl->freq_hz == FREQ_END)
+	if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
 		return -ENXIO;
 
-	return (clk->freq_tbl + n)->freq_hz;
+	return (rcg->freq_tbl + n)->freq_hz;
 }
 
-static struct clk *rcg_clk_get_parent(struct clk *clk)
+static struct clk *rcg_clk_get_parent(struct clk *c)
 {
-	return to_rcg_clk(clk)->current_freq->src_clk;
+	return to_rcg_clk(c)->current_freq->src_clk;
 }
 
 /* Disable hw clock gating if not set at boot */
-enum handoff branch_handoff(struct branch *clk, struct clk *c)
+enum handoff branch_handoff(struct branch *b, struct clk *c)
 {
-	if (!branch_in_hwcg_mode(clk)) {
-		clk->hwcg_mask = 0;
+	if (!branch_in_hwcg_mode(b)) {
+		b->hwcg_mask = 0;
 		c->flags &= ~CLKFLAG_HWCG;
-		if (readl_relaxed(clk->ctl_reg) & clk->en_mask)
+		if (readl_relaxed(b->ctl_reg) & b->en_mask)
 			return HANDOFF_ENABLED_CLK;
 	} else {
 		c->flags |= CLKFLAG_HWCG;
@@ -574,24 +574,24 @@
 
 static enum handoff branch_clk_handoff(struct clk *c)
 {
-	struct branch_clk *clk = to_branch_clk(c);
-	return branch_handoff(&clk->b, &clk->c);
+	struct branch_clk *br = to_branch_clk(c);
+	return branch_handoff(&br->b, &br->c);
 }
 
 static enum handoff rcg_clk_handoff(struct clk *c)
 {
-	struct rcg_clk *clk = to_rcg_clk(c);
+	struct rcg_clk *rcg = to_rcg_clk(c);
 	uint32_t ctl_val, ns_val, md_val, ns_mask;
 	struct clk_freq_tbl *freq;
 	enum handoff ret;
 
-	ctl_val = readl_relaxed(clk->b.ctl_reg);
-	ret = branch_handoff(&clk->b, &clk->c);
+	ctl_val = readl_relaxed(rcg->b.ctl_reg);
+	ret = branch_handoff(&rcg->b, &rcg->c);
 	if (ret == HANDOFF_DISABLED_CLK)
 		return HANDOFF_DISABLED_CLK;
 
-	if (clk->bank_info) {
-		const struct bank_masks *bank_masks = clk->bank_info;
+	if (rcg->bank_info) {
+		const struct bank_masks *bank_masks = rcg->bank_info;
 		const struct bank_mask_info *bank_info;
 		if (!(ctl_val & bank_masks->bank_sel_mask))
 			bank_info = &bank_masks->bank0_mask;
@@ -602,13 +602,13 @@
 		md_val = bank_info->md_reg ?
 				readl_relaxed(bank_info->md_reg) : 0;
 	} else {
-		ns_mask = clk->ns_mask;
-		md_val = clk->md_reg ? readl_relaxed(clk->md_reg) : 0;
+		ns_mask = rcg->ns_mask;
+		md_val = rcg->md_reg ? readl_relaxed(rcg->md_reg) : 0;
 	}
 	if (!ns_mask)
 		return HANDOFF_UNKNOWN_RATE;
-	ns_val = readl_relaxed(clk->ns_reg) & ns_mask;
-	for (freq = clk->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
+	ns_val = readl_relaxed(rcg->ns_reg) & ns_mask;
+	for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
 		if ((freq->ns_val & ns_mask) == ns_val &&
 		    (!freq->md_val || freq->md_val == md_val))
 			break;
@@ -616,7 +616,7 @@
 	if (freq->freq_hz == FREQ_END)
 		return HANDOFF_UNKNOWN_RATE;
 
-	clk->current_freq = freq;
+	rcg->current_freq = freq;
 	c->rate = freq->freq_hz;
 
 	return HANDOFF_ENABLED_CLK;
@@ -632,40 +632,38 @@
 	},
 };
 
-static int branch_clk_enable(struct clk *clk)
+static int branch_clk_enable(struct clk *c)
 {
 	unsigned long flags;
-	struct branch_clk *branch = to_branch_clk(clk);
+	struct branch_clk *br = to_branch_clk(c);
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
-	__branch_clk_enable_reg(&branch->b, branch->c.dbg_name);
-	branch->enabled = true;
+	__branch_enable_reg(&br->b, br->c.dbg_name);
+	br->enabled = true;
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 
 	return 0;
 }
 
-static void branch_clk_disable(struct clk *clk)
+static void branch_clk_disable(struct clk *c)
 {
 	unsigned long flags;
-	struct branch_clk *branch = to_branch_clk(clk);
+	struct branch_clk *br = to_branch_clk(c);
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
-	__branch_clk_disable_reg(&branch->b, branch->c.dbg_name);
-	branch->enabled = false;
+	__branch_disable_reg(&br->b, br->c.dbg_name);
+	br->enabled = false;
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 }
 
-static struct clk *branch_clk_get_parent(struct clk *clk)
+static struct clk *branch_clk_get_parent(struct clk *c)
 {
-	struct branch_clk *branch = to_branch_clk(clk);
-	return branch->parent;
+	return to_branch_clk(c)->parent;
 }
 
-static int branch_clk_is_enabled(struct clk *clk)
+static int branch_clk_is_enabled(struct clk *c)
 {
-	struct branch_clk *branch = to_branch_clk(clk);
-	return branch->enabled;
+	return to_branch_clk(c)->enabled;
 }
 
 static void branch_enable_hwcg(struct branch *b)
@@ -692,16 +690,14 @@
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 }
 
-static void branch_clk_enable_hwcg(struct clk *clk)
+static void branch_clk_enable_hwcg(struct clk *c)
 {
-	struct branch_clk *branch = to_branch_clk(clk);
-	branch_enable_hwcg(&branch->b);
+	branch_enable_hwcg(&to_branch_clk(c)->b);
 }
 
-static void branch_clk_disable_hwcg(struct clk *clk)
+static void branch_clk_disable_hwcg(struct clk *c)
 {
-	struct branch_clk *branch = to_branch_clk(clk);
-	branch_disable_hwcg(&branch->b);
+	branch_disable_hwcg(&to_branch_clk(c)->b);
 }
 
 static int branch_set_flags(struct branch *b, unsigned flags)
@@ -738,26 +734,22 @@
 
 static int branch_clk_in_hwcg_mode(struct clk *c)
 {
-	struct branch_clk *clk = to_branch_clk(c);
-	return branch_in_hwcg_mode(&clk->b);
+	return branch_in_hwcg_mode(&to_branch_clk(c)->b);
 }
 
-static void rcg_clk_enable_hwcg(struct clk *clk)
+static void rcg_clk_enable_hwcg(struct clk *c)
 {
-	struct rcg_clk *rcg = to_rcg_clk(clk);
-	branch_enable_hwcg(&rcg->b);
+	branch_enable_hwcg(&to_rcg_clk(c)->b);
 }
 
-static void rcg_clk_disable_hwcg(struct clk *clk)
+static void rcg_clk_disable_hwcg(struct clk *c)
 {
-	struct rcg_clk *rcg = to_rcg_clk(clk);
-	branch_disable_hwcg(&rcg->b);
+	branch_disable_hwcg(&to_rcg_clk(c)->b);
 }
 
 static int rcg_clk_in_hwcg_mode(struct clk *c)
 {
-	struct rcg_clk *clk = to_rcg_clk(c);
-	return branch_in_hwcg_mode(&clk->b);
+	return branch_in_hwcg_mode(&to_rcg_clk(c)->b);
 }
 
 static int rcg_clk_set_flags(struct clk *clk, unsigned flags)
@@ -802,9 +794,9 @@
 	return ret;
 }
 
-static int branch_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
 {
-	return branch_reset(&to_branch_clk(clk)->b, action);
+	return branch_reset(&to_branch_clk(c)->b, action);
 }
 
 struct clk_ops clk_ops_branch = {
@@ -813,7 +805,6 @@
 	.enable_hwcg = branch_clk_enable_hwcg,
 	.disable_hwcg = branch_clk_disable_hwcg,
 	.in_hwcg_mode = branch_clk_in_hwcg_mode,
-	.auto_off = branch_clk_disable,
 	.is_enabled = branch_clk_is_enabled,
 	.reset = branch_clk_reset,
 	.get_parent = branch_clk_get_parent,
@@ -825,9 +816,9 @@
 	.reset = branch_clk_reset,
 };
 
-static int rcg_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int rcg_clk_reset(struct clk *c, enum clk_reset_action action)
 {
-	return branch_reset(&to_rcg_clk(clk)->b, action);
+	return branch_reset(&to_rcg_clk(c)->b, action);
 }
 
 struct clk_ops clk_ops_rcg = {
@@ -836,7 +827,6 @@
 	.enable_hwcg = rcg_clk_enable_hwcg,
 	.disable_hwcg = rcg_clk_disable_hwcg,
 	.in_hwcg_mode = rcg_clk_in_hwcg_mode,
-	.auto_off = rcg_clk_disable,
 	.handoff = rcg_clk_handoff,
 	.set_rate = rcg_clk_set_rate,
 	.list_rate = rcg_clk_list_rate,
@@ -850,10 +840,10 @@
 static int cdiv_clk_enable(struct clk *c)
 {
 	unsigned long flags;
-	struct cdiv_clk *clk = to_cdiv_clk(c);
+	struct cdiv_clk *cdiv = to_cdiv_clk(c);
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
-	__branch_clk_enable_reg(&clk->b, clk->c.dbg_name);
+	__branch_enable_reg(&cdiv->b, cdiv->c.dbg_name);
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 
 	return 0;
@@ -862,70 +852,67 @@
 static void cdiv_clk_disable(struct clk *c)
 {
 	unsigned long flags;
-	struct cdiv_clk *clk = to_cdiv_clk(c);
+	struct cdiv_clk *cdiv = to_cdiv_clk(c);
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
-	__branch_clk_disable_reg(&clk->b, clk->c.dbg_name);
+	__branch_disable_reg(&cdiv->b, cdiv->c.dbg_name);
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 }
 
 static int cdiv_clk_set_rate(struct clk *c, unsigned long rate)
 {
-	struct cdiv_clk *clk = to_cdiv_clk(c);
+	struct cdiv_clk *cdiv = to_cdiv_clk(c);
 	u32 reg_val;
 
-	if (rate > clk->max_div)
+	if (rate > cdiv->max_div)
 		return -EINVAL;
 
 	spin_lock(&local_clock_reg_lock);
-	reg_val = readl_relaxed(clk->ns_reg);
-	reg_val &= ~(clk->ext_mask | (clk->max_div - 1) << clk->div_offset);
+	reg_val = readl_relaxed(cdiv->ns_reg);
+	reg_val &= ~(cdiv->ext_mask | (cdiv->max_div - 1) << cdiv->div_offset);
 	/* Non-zero rates mean set a divider, zero means use external input */
 	if (rate)
-		reg_val |= (rate - 1) << clk->div_offset;
+		reg_val |= (rate - 1) << cdiv->div_offset;
 	else
-		reg_val |= clk->ext_mask;
-	writel_relaxed(reg_val, clk->ns_reg);
+		reg_val |= cdiv->ext_mask;
+	writel_relaxed(reg_val, cdiv->ns_reg);
 	spin_unlock(&local_clock_reg_lock);
 
-	clk->cur_div = rate;
+	cdiv->cur_div = rate;
 	return 0;
 }
 
 static unsigned long cdiv_clk_get_rate(struct clk *c)
 {
-	struct cdiv_clk *clk = to_cdiv_clk(c);
-	return clk->cur_div;
+	return to_cdiv_clk(c)->cur_div;
 }
 
 static long cdiv_clk_round_rate(struct clk *c, unsigned long rate)
 {
-	struct cdiv_clk *clk = to_cdiv_clk(c);
-	return rate > clk->max_div ? -EPERM : rate;
+	return rate > to_cdiv_clk(c)->max_div ? -EPERM : rate;
 }
 
 static int cdiv_clk_list_rate(struct clk *c, unsigned n)
 {
-	struct cdiv_clk *clk = to_cdiv_clk(c);
-	return n > clk->max_div ? -ENXIO : n;
+	return n > to_cdiv_clk(c)->max_div ? -ENXIO : n;
 }
 
 static enum handoff cdiv_clk_handoff(struct clk *c)
 {
-	struct cdiv_clk *clk = to_cdiv_clk(c);
+	struct cdiv_clk *cdiv = to_cdiv_clk(c);
 	enum handoff ret;
 	u32 reg_val;
 
-	ret = branch_handoff(&clk->b, &clk->c);
+	ret = branch_handoff(&cdiv->b, &cdiv->c);
 	if (ret == HANDOFF_DISABLED_CLK)
 		return ret;
 
-	reg_val = readl_relaxed(clk->ns_reg);
-	if (reg_val & clk->ext_mask) {
-		clk->cur_div = 0;
+	reg_val = readl_relaxed(cdiv->ns_reg);
+	if (reg_val & cdiv->ext_mask) {
+		cdiv->cur_div = 0;
 	} else {
-		reg_val >>= clk->div_offset;
-		clk->cur_div = (reg_val & (clk->max_div - 1)) + 1;
+		reg_val >>= cdiv->div_offset;
+		cdiv->cur_div = (reg_val & (cdiv->max_div - 1)) + 1;
 	}
 
 	return HANDOFF_ENABLED_CLK;
@@ -933,20 +920,17 @@
 
 static void cdiv_clk_enable_hwcg(struct clk *c)
 {
-	struct cdiv_clk *clk = to_cdiv_clk(c);
-	branch_enable_hwcg(&clk->b);
+	branch_enable_hwcg(&to_cdiv_clk(c)->b);
 }
 
 static void cdiv_clk_disable_hwcg(struct clk *c)
 {
-	struct cdiv_clk *clk = to_cdiv_clk(c);
-	branch_disable_hwcg(&clk->b);
+	branch_disable_hwcg(&to_cdiv_clk(c)->b);
 }
 
 static int cdiv_clk_in_hwcg_mode(struct clk *c)
 {
-	struct cdiv_clk *clk = to_cdiv_clk(c);
-	return branch_in_hwcg_mode(&clk->b);
+	return branch_in_hwcg_mode(&to_cdiv_clk(c)->b);
 }
 
 struct clk_ops clk_ops_cdiv = {
@@ -955,7 +939,6 @@
 	.in_hwcg_mode = cdiv_clk_in_hwcg_mode,
 	.enable_hwcg = cdiv_clk_enable_hwcg,
 	.disable_hwcg = cdiv_clk_disable_hwcg,
-	.auto_off = cdiv_clk_disable,
 	.handoff = cdiv_clk_handoff,
 	.set_rate = cdiv_clk_set_rate,
 	.get_rate = cdiv_clk_get_rate,
diff --git a/arch/arm/mach-msm/clock-local.h b/arch/arm/mach-msm/clock-local.h
index ffc7057..034e09c 100644
--- a/arch/arm/mach-msm/clock-local.h
+++ b/arch/arm/mach-msm/clock-local.h
@@ -156,9 +156,9 @@
 extern struct clk_ops clk_ops_reset;
 
 int branch_reset(struct branch *b, enum clk_reset_action action);
-void __branch_clk_enable_reg(const struct branch *clk, const char *name);
-u32 __branch_clk_disable_reg(const struct branch *clk, const char *name);
-enum handoff branch_handoff(struct branch *clk, struct clk *c);
+void __branch_enable_reg(const struct branch *b, const char *name);
+u32 __branch_disable_reg(const struct branch *b, const char *name);
+enum handoff branch_handoff(struct branch *b, struct clk *c);
 
 /*
  * Generic clock-definition struct and macros
@@ -183,9 +183,9 @@
 	struct clk	c;
 };
 
-static inline struct rcg_clk *to_rcg_clk(struct clk *clk)
+static inline struct rcg_clk *to_rcg_clk(struct clk *c)
 {
-	return container_of(clk, struct rcg_clk, c);
+	return container_of(c, struct rcg_clk, c);
 }
 
 extern struct clk_ops clk_ops_rcg;
@@ -214,9 +214,9 @@
 	struct clk c;
 };
 
-static inline struct cdiv_clk *to_cdiv_clk(struct clk *clk)
+static inline struct cdiv_clk *to_cdiv_clk(struct clk *c)
 {
-	return container_of(clk, struct cdiv_clk, c);
+	return container_of(c, struct cdiv_clk, c);
 }
 
 extern struct clk_ops clk_ops_cdiv;
@@ -234,7 +234,7 @@
  * @enabled: true if clock is on, false otherwise
  * @b: branch
  * @parent: clock source
- * @c: clk
+ * @c: clock
  *
  * An on/off switch with a rate derived from the parent.
  */
@@ -245,9 +245,9 @@
 	struct clk c;
 };
 
-static inline struct branch_clk *to_branch_clk(struct clk *clk)
+static inline struct branch_clk *to_branch_clk(struct clk *c)
 {
-	return container_of(clk, struct branch_clk, c);
+	return container_of(c, struct branch_clk, c);
 }
 
 /**
@@ -255,7 +255,7 @@
  * @sample_ticks: sample period in reference clock ticks
  * @multiplier: measurement scale-up factor
  * @divider: measurement scale-down factor
- * @c: clk
+ * @c: clock
 */
 struct measure_clk {
 	u64 sample_ticks;
@@ -266,9 +266,9 @@
 
 extern struct clk_ops clk_ops_empty;
 
-static inline struct measure_clk *to_measure_clk(struct clk *clk)
+static inline struct measure_clk *to_measure_clk(struct clk *c)
 {
-	return container_of(clk, struct measure_clk, c);
+	return container_of(c, struct measure_clk, c);
 }
 
 /*
@@ -280,11 +280,11 @@
 /*
  * Generic set-rate implementations
  */
-void set_rate_mnd(struct rcg_clk *clk, struct clk_freq_tbl *nf);
-void set_rate_nop(struct rcg_clk *clk, struct clk_freq_tbl *nf);
-void set_rate_mnd_8(struct rcg_clk *clk, struct clk_freq_tbl *nf);
-void set_rate_mnd_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf);
-void set_rate_div_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf);
+void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
+void set_rate_nop(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
+void set_rate_mnd_8(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
+void set_rate_mnd_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
+void set_rate_div_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
 
 #endif /* __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_H */
 
diff --git a/arch/arm/mach-msm/clock-local2.c b/arch/arm/mach-msm/clock-local2.c
index 9fe9591..23b4723 100644
--- a/arch/arm/mach-msm/clock-local2.c
+++ b/arch/arm/mach-msm/clock-local2.c
@@ -589,7 +589,6 @@
 struct clk_ops clk_ops_branch = {
 	.enable = branch_clk_enable,
 	.disable = branch_clk_disable,
-	.auto_off = branch_clk_disable,
 	.set_rate = branch_clk_set_rate,
 	.get_rate = branch_clk_get_rate,
 	.list_rate = branch_clk_list_rate,
@@ -602,7 +601,6 @@
 struct clk_ops clk_ops_vote = {
 	.enable = local_vote_clk_enable,
 	.disable = local_vote_clk_disable,
-	.auto_off = local_vote_clk_disable,
 	.reset = local_vote_clk_reset,
 	.handoff = local_vote_clk_handoff,
 };
diff --git a/arch/arm/mach-msm/clock-local2.h b/arch/arm/mach-msm/clock-local2.h
index 547e633..1de79d7 100644
--- a/arch/arm/mach-msm/clock-local2.h
+++ b/arch/arm/mach-msm/clock-local2.h
@@ -174,5 +174,5 @@
 extern struct clk_ops clk_ops_branch;
 extern struct clk_ops clk_ops_vote;
 
-#endif /* __ARCH_ARM_MACH_MSM_COPPER_CLOCK_LOCAL_H */
+#endif /* __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_2_H */
 
diff --git a/arch/arm/mach-msm/clock-pcom-lookup.c b/arch/arm/mach-msm/clock-pcom-lookup.c
index 83940cf..2a2cc01 100644
--- a/arch/arm/mach-msm/clock-pcom-lookup.c
+++ b/arch/arm/mach-msm/clock-pcom-lookup.c
@@ -22,19 +22,19 @@
 #define PLLn_MODE(n)	(MSM_CLK_CTL_BASE + 0x300 + 28 * (n))
 #define PLL4_MODE	(MSM_CLK_CTL_BASE + 0x374)
 
-static DEFINE_CLK_PCOM(adm_clk,		ADM_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(adsp_clk,	ADSP_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(ahb_m_clk,	AHB_M_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(ahb_s_clk,	AHB_S_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(cam_m_clk,	CAM_M_CLK,	CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(adm_clk,		ADM_CLK,	0);
+static DEFINE_CLK_PCOM(adsp_clk,	ADSP_CLK,	0);
+static DEFINE_CLK_PCOM(ahb_m_clk,	AHB_M_CLK,	0);
+static DEFINE_CLK_PCOM(ahb_s_clk,	AHB_S_CLK,	0);
+static DEFINE_CLK_PCOM(cam_m_clk,	CAM_M_CLK,	0);
 static DEFINE_CLK_PCOM(axi_rotator_clk,	AXI_ROTATOR_CLK, 0);
-static DEFINE_CLK_PCOM(ce_clk,		CE_CLK,		CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(csi0_clk,	CSI0_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(csi0_p_clk,	CSI0_P_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(csi0_vfe_clk,	CSI0_VFE_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(csi1_clk,	CSI1_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(csi1_p_clk,	CSI1_P_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(csi1_vfe_clk,	CSI1_VFE_CLK,	CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(ce_clk,		CE_CLK,		0);
+static DEFINE_CLK_PCOM(csi0_clk,	CSI0_CLK,	0);
+static DEFINE_CLK_PCOM(csi0_p_clk,	CSI0_P_CLK,	0);
+static DEFINE_CLK_PCOM(csi0_vfe_clk,	CSI0_VFE_CLK,	0);
+static DEFINE_CLK_PCOM(csi1_clk,	CSI1_CLK,	0);
+static DEFINE_CLK_PCOM(csi1_p_clk,	CSI1_P_CLK,	0);
+static DEFINE_CLK_PCOM(csi1_vfe_clk,	CSI1_VFE_CLK,	0);
 
 static struct pll_shared_clk pll0_clk = {
 	.id = PLL_0,
@@ -113,38 +113,36 @@
 };
 
 static DEFINE_CLK_PCOM(dsi_ref_clk,	DSI_REF_CLK,	0);
-static DEFINE_CLK_PCOM(ebi1_clk,	EBI1_CLK,
-		CLKFLAG_SKIP_AUTO_OFF | CLKFLAG_MIN);
-static DEFINE_CLK_PCOM(ebi2_clk,	EBI2_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(ecodec_clk,	ECODEC_CLK,	CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(ebi1_clk,	EBI1_CLK,	CLKFLAG_MIN);
+static DEFINE_CLK_PCOM(ebi2_clk,	EBI2_CLK,	0);
+static DEFINE_CLK_PCOM(ecodec_clk,	ECODEC_CLK,	0);
 static DEFINE_CLK_PCOM(emdh_clk,	EMDH_CLK,   CLKFLAG_MIN | CLKFLAG_MAX);
-static DEFINE_CLK_PCOM(gp_clk,		GP_CLK,		CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(grp_2d_clk,	GRP_2D_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(grp_2d_p_clk,	GRP_2D_P_CLK,	CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(gp_clk,		GP_CLK,		0);
+static DEFINE_CLK_PCOM(grp_2d_clk,	GRP_2D_CLK,	0);
+static DEFINE_CLK_PCOM(grp_2d_p_clk,	GRP_2D_P_CLK,	0);
 static DEFINE_CLK_PCOM(grp_3d_clk,	GRP_3D_CLK,	0);
-static DEFINE_CLK_PCOM(grp_3d_p_clk,	GRP_3D_P_CLK,	CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(grp_3d_p_clk,	GRP_3D_P_CLK,	0);
 static DEFINE_CLK_PCOM(gsbi1_qup_clk,	GSBI1_QUP_CLK,	0);
 static DEFINE_CLK_PCOM(gsbi1_qup_p_clk,	GSBI1_QUP_P_CLK, 0);
 static DEFINE_CLK_PCOM(gsbi2_qup_clk,	GSBI2_QUP_CLK,	0);
 static DEFINE_CLK_PCOM(gsbi2_qup_p_clk,	GSBI2_QUP_P_CLK, 0);
-static DEFINE_CLK_PCOM(gsbi_clk,	GSBI_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(gsbi_p_clk,	GSBI_P_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(hdmi_clk,	HDMI_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(i2c_clk,		I2C_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(icodec_rx_clk,	ICODEC_RX_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(icodec_tx_clk,	ICODEC_TX_CLK,	CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(gsbi_clk,	GSBI_CLK,	0);
+static DEFINE_CLK_PCOM(gsbi_p_clk,	GSBI_P_CLK,	0);
+static DEFINE_CLK_PCOM(hdmi_clk,	HDMI_CLK,	0);
+static DEFINE_CLK_PCOM(i2c_clk,		I2C_CLK,	0);
+static DEFINE_CLK_PCOM(icodec_rx_clk,	ICODEC_RX_CLK,	0);
+static DEFINE_CLK_PCOM(icodec_tx_clk,	ICODEC_TX_CLK,	0);
 static DEFINE_CLK_PCOM(imem_clk,	IMEM_CLK,	0);
-static DEFINE_CLK_PCOM(mdc_clk,		MDC_CLK,	CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(mdc_clk,		MDC_CLK,	0);
 static DEFINE_CLK_PCOM(mdp_clk,		MDP_CLK,	CLKFLAG_MIN);
 static DEFINE_CLK_PCOM(mdp_lcdc_pad_pclk_clk, MDP_LCDC_PAD_PCLK_CLK,
-		CLKFLAG_SKIP_AUTO_OFF);
+		0);
 static DEFINE_CLK_PCOM(mdp_lcdc_pclk_clk, MDP_LCDC_PCLK_CLK,
-		CLKFLAG_SKIP_AUTO_OFF);
+		0);
 static DEFINE_CLK_PCOM(mdp_vsync_clk,	MDP_VSYNC_CLK,	0);
 static DEFINE_CLK_PCOM(mdp_dsi_p_clk,	MDP_DSI_P_CLK,	0);
-static DEFINE_CLK_PCOM(pbus_clk,	PBUS_CLK,
-		CLKFLAG_SKIP_AUTO_OFF | CLKFLAG_MIN);
-static DEFINE_CLK_PCOM(pcm_clk,		PCM_CLK,	CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(pbus_clk,	PBUS_CLK,	CLKFLAG_MIN);
+static DEFINE_CLK_PCOM(pcm_clk,		PCM_CLK,	0);
 static DEFINE_CLK_PCOM(pmdh_clk,	PMDH_CLK,   CLKFLAG_MIN | CLKFLAG_MAX);
 static DEFINE_CLK_PCOM(sdac_clk,	SDAC_CLK,	0);
 static DEFINE_CLK_PCOM(sdc1_clk,	SDC1_CLK,	0);
@@ -155,12 +153,12 @@
 static DEFINE_CLK_PCOM(sdc3_p_clk,	SDC3_P_CLK,	0);
 static DEFINE_CLK_PCOM(sdc4_clk,	SDC4_CLK,	0);
 static DEFINE_CLK_PCOM(sdc4_p_clk,	SDC4_P_CLK,	0);
-static DEFINE_CLK_PCOM(spi_clk,		SPI_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(tsif_clk,	TSIF_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(tsif_p_clk,	TSIF_P_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(tsif_ref_clk,	TSIF_REF_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(tv_dac_clk,	TV_DAC_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(tv_enc_clk,	TV_ENC_CLK,	CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(spi_clk,		SPI_CLK,	0);
+static DEFINE_CLK_PCOM(tsif_clk,	TSIF_CLK,	0);
+static DEFINE_CLK_PCOM(tsif_p_clk,	TSIF_P_CLK,	0);
+static DEFINE_CLK_PCOM(tsif_ref_clk,	TSIF_REF_CLK,	0);
+static DEFINE_CLK_PCOM(tv_dac_clk,	TV_DAC_CLK,	0);
+static DEFINE_CLK_PCOM(tv_enc_clk,	TV_ENC_CLK,	0);
 static DEFINE_CLK_PCOM(uart1_clk,	UART1_CLK,	0);
 static DEFINE_CLK_PCOM(uart1dm_clk,	UART1DM_CLK,	0);
 static DEFINE_CLK_PCOM(uart2_clk,	UART2_CLK,	0);
@@ -173,8 +171,8 @@
 static DEFINE_CLK_PCOM(usb_hs_clk,	USB_HS_CLK,	0);
 static DEFINE_CLK_PCOM(usb_hs_core_clk,	USB_HS_CORE_CLK, 0);
 static DEFINE_CLK_PCOM(usb_hs_p_clk,	USB_HS_P_CLK,	0);
-static DEFINE_CLK_PCOM(usb_otg_clk,	USB_OTG_CLK,	CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(usb_phy_clk,	USB_PHY_CLK,	CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(usb_otg_clk,	USB_OTG_CLK,	0);
+static DEFINE_CLK_PCOM(usb_phy_clk,	USB_PHY_CLK,	0);
 static DEFINE_CLK_PCOM(vdc_clk,		VDC_CLK,	CLKFLAG_MIN);
 static DEFINE_CLK_PCOM(vfe_axi_clk,	VFE_AXI_CLK,	0);
 static DEFINE_CLK_PCOM(vfe_clk,		VFE_CLK,	0);
diff --git a/arch/arm/mach-msm/clock-pcom.c b/arch/arm/mach-msm/clock-pcom.c
index 02c8765..428423a 100644
--- a/arch/arm/mach-msm/clock-pcom.c
+++ b/arch/arm/mach-msm/clock-pcom.c
@@ -190,7 +190,6 @@
 struct clk_ops clk_ops_pcom = {
 	.enable = pc_clk_enable,
 	.disable = pc_clk_disable,
-	.auto_off = pc_clk_disable,
 	.reset = pc_reset,
 	.set_rate = pc_clk_set_rate,
 	.set_max_rate = pc_clk_set_max_rate,
@@ -205,7 +204,6 @@
 struct clk_ops clk_ops_pcom_ext_config = {
 	.enable = pc_clk_enable,
 	.disable = pc_clk_disable,
-	.auto_off = pc_clk_disable,
 	.reset = pc_reset,
 	.set_rate = pc_clk_set_ext_config,
 	.set_max_rate = pc_clk_set_max_rate,
diff --git a/arch/arm/mach-msm/clock-pll.c b/arch/arm/mach-msm/clock-pll.c
index d839911..2938135 100644
--- a/arch/arm/mach-msm/clock-pll.c
+++ b/arch/arm/mach-msm/clock-pll.c
@@ -55,16 +55,16 @@
 
 #define ENABLE_WAIT_MAX_LOOPS 200
 
-int pll_vote_clk_enable(struct clk *clk)
+int pll_vote_clk_enable(struct clk *c)
 {
 	u32 ena, count;
 	unsigned long flags;
-	struct pll_vote_clk *pll = to_pll_vote_clk(clk);
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
 
 	spin_lock_irqsave(&pll_reg_lock, flags);
-	ena = readl_relaxed(PLL_EN_REG(pll));
-	ena |= pll->en_mask;
-	writel_relaxed(ena, PLL_EN_REG(pll));
+	ena = readl_relaxed(PLL_EN_REG(pllv));
+	ena |= pllv->en_mask;
+	writel_relaxed(ena, PLL_EN_REG(pllv));
 	spin_unlock_irqrestore(&pll_reg_lock, flags);
 
 	/*
@@ -75,45 +75,44 @@
 
 	/* Wait for pll to enable. */
 	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
-		if (readl_relaxed(PLL_STATUS_REG(pll)) & pll->status_mask)
+		if (readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask)
 			return 0;
 		udelay(1);
 	}
 
-	WARN("PLL %s didn't enable after voting for it!\n", clk->dbg_name);
+	WARN("PLL %s didn't enable after voting for it!\n", c->dbg_name);
 
 	return -ETIMEDOUT;
 }
 
-void pll_vote_clk_disable(struct clk *clk)
+void pll_vote_clk_disable(struct clk *c)
 {
 	u32 ena;
 	unsigned long flags;
-	struct pll_vote_clk *pll = to_pll_vote_clk(clk);
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
 
 	spin_lock_irqsave(&pll_reg_lock, flags);
-	ena = readl_relaxed(PLL_EN_REG(pll));
-	ena &= ~(pll->en_mask);
-	writel_relaxed(ena, PLL_EN_REG(pll));
+	ena = readl_relaxed(PLL_EN_REG(pllv));
+	ena &= ~(pllv->en_mask);
+	writel_relaxed(ena, PLL_EN_REG(pllv));
 	spin_unlock_irqrestore(&pll_reg_lock, flags);
 }
 
-struct clk *pll_vote_clk_get_parent(struct clk *clk)
+struct clk *pll_vote_clk_get_parent(struct clk *c)
 {
-	struct pll_vote_clk *pll = to_pll_vote_clk(clk);
-	return pll->parent;
+	return to_pll_vote_clk(c)->parent;
 }
 
-int pll_vote_clk_is_enabled(struct clk *clk)
+int pll_vote_clk_is_enabled(struct clk *c)
 {
-	struct pll_vote_clk *pll = to_pll_vote_clk(clk);
-	return !!(readl_relaxed(PLL_STATUS_REG(pll)) & pll->status_mask);
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+	return !!(readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask);
 }
 
-static enum handoff pll_vote_clk_handoff(struct clk *clk)
+static enum handoff pll_vote_clk_handoff(struct clk *c)
 {
-	struct pll_vote_clk *pll = to_pll_vote_clk(clk);
-	if (readl_relaxed(PLL_EN_REG(pll)) & pll->en_mask)
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+	if (readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask)
 		return HANDOFF_ENABLED_CLK;
 
 	return HANDOFF_DISABLED_CLK;
@@ -122,7 +121,6 @@
 struct clk_ops clk_ops_pll_vote = {
 	.enable = pll_vote_clk_enable,
 	.disable = pll_vote_clk_disable,
-	.auto_off = pll_vote_clk_disable,
 	.is_enabled = pll_vote_clk_is_enabled,
 	.get_parent = pll_vote_clk_get_parent,
 	.handoff = pll_vote_clk_handoff,
@@ -158,10 +156,10 @@
 	mb();
 }
 
-static int local_pll_clk_enable(struct clk *clk)
+static int local_pll_clk_enable(struct clk *c)
 {
 	unsigned long flags;
-	struct pll_clk *pll = to_pll_clk(clk);
+	struct pll_clk *pll = to_pll_clk(c);
 
 	spin_lock_irqsave(&pll_reg_lock, flags);
 	__pll_clk_enable_reg(PLL_MODE_REG(pll));
@@ -177,10 +175,10 @@
 	writel_relaxed(mode, mode_reg);
 }
 
-static void local_pll_clk_disable(struct clk *clk)
+static void local_pll_clk_disable(struct clk *c)
 {
 	unsigned long flags;
-	struct pll_clk *pll = to_pll_clk(clk);
+	struct pll_clk *pll = to_pll_clk(c);
 
 	/*
 	 * Disable the PLL output, disable test mode, enable
@@ -191,9 +189,9 @@
 	spin_unlock_irqrestore(&pll_reg_lock, flags);
 }
 
-static enum handoff local_pll_clk_handoff(struct clk *clk)
+static enum handoff local_pll_clk_handoff(struct clk *c)
 {
-	struct pll_clk *pll = to_pll_clk(clk);
+	struct pll_clk *pll = to_pll_clk(c);
 	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
 	u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
 
@@ -203,17 +201,16 @@
 	return HANDOFF_DISABLED_CLK;
 }
 
-static struct clk *local_pll_clk_get_parent(struct clk *clk)
+static struct clk *local_pll_clk_get_parent(struct clk *c)
 {
-	struct pll_clk *pll = to_pll_clk(clk);
-	return pll->parent;
+	return to_pll_clk(c)->parent;
 }
 
-int sr_pll_clk_enable(struct clk *clk)
+int sr_pll_clk_enable(struct clk *c)
 {
 	u32 mode;
 	unsigned long flags;
-	struct pll_clk *pll = to_pll_clk(clk);
+	struct pll_clk *pll = to_pll_clk(c);
 
 	spin_lock_irqsave(&pll_reg_lock, flags);
 	mode = readl_relaxed(PLL_MODE_REG(pll));
@@ -250,10 +247,10 @@
 
 #define PLL_LOCKED_BIT BIT(16)
 
-int copper_pll_clk_enable(struct clk *clk)
+int msm8974_pll_clk_enable(struct clk *c)
 {
 	unsigned long flags;
-	struct pll_clk *pll = to_pll_clk(clk);
+	struct pll_clk *pll = to_pll_clk(c);
 	u32 count, mode;
 	int ret = 0;
 
@@ -282,7 +279,7 @@
 	}
 
 	if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)) {
-		WARN("PLL %s didn't lock after enabling it!\n", clk->dbg_name);
+		WARN("PLL %s didn't lock after enabling it!\n", c->dbg_name);
 		ret = -ETIMEDOUT;
 		goto out;
 	}
@@ -302,7 +299,6 @@
 struct clk_ops clk_ops_local_pll = {
 	.enable = local_pll_clk_enable,
 	.disable = local_pll_clk_disable,
-	.auto_off = local_pll_clk_disable,
 	.handoff = local_pll_clk_handoff,
 	.get_parent = local_pll_clk_get_parent,
 };
@@ -379,9 +375,9 @@
 
 }
 
-static int pll_clk_enable(struct clk *clk)
+static int pll_clk_enable(struct clk *c)
 {
-	struct pll_shared_clk *pll = to_pll_shared_clk(clk);
+	struct pll_shared_clk *pll = to_pll_shared_clk(c);
 	unsigned int pll_id = pll->id;
 
 	remote_spin_lock(&pll_lock);
@@ -396,9 +392,9 @@
 	return 0;
 }
 
-static void pll_clk_disable(struct clk *clk)
+static void pll_clk_disable(struct clk *c)
 {
-	struct pll_shared_clk *pll = to_pll_shared_clk(clk);
+	struct pll_shared_clk *pll = to_pll_shared_clk(c);
 	unsigned int pll_id = pll->id;
 
 	remote_spin_lock(&pll_lock);
@@ -413,16 +409,14 @@
 	remote_spin_unlock(&pll_lock);
 }
 
-static int pll_clk_is_enabled(struct clk *clk)
+static int pll_clk_is_enabled(struct clk *c)
 {
-	struct pll_shared_clk *pll = to_pll_shared_clk(clk);
-
-	return readl_relaxed(PLL_MODE_REG(pll)) & BIT(0);
+	return readl_relaxed(PLL_MODE_REG(to_pll_shared_clk(c))) & BIT(0);
 }
 
-static enum handoff pll_clk_handoff(struct clk *clk)
+static enum handoff pll_clk_handoff(struct clk *c)
 {
-	struct pll_shared_clk *pll = to_pll_shared_clk(clk);
+	struct pll_shared_clk *pll = to_pll_shared_clk(c);
 	unsigned int pll_lval;
 	struct pll_rate *l;
 
@@ -438,12 +432,12 @@
 	/* Convert PLL L values to PLL Output rate */
 	for (l = pll_l_rate; l->rate != 0; l++) {
 		if (l->lvalue == pll_lval) {
-			clk->rate = l->rate;
+			c->rate = l->rate;
 			break;
 		}
 	}
 
-	if (!clk->rate) {
+	if (!c->rate) {
 		pr_crit("Unknown PLL's L value!\n");
 		BUG();
 	}
diff --git a/arch/arm/mach-msm/clock-pll.h b/arch/arm/mach-msm/clock-pll.h
index a8c642f..90f8a95 100644
--- a/arch/arm/mach-msm/clock-pll.h
+++ b/arch/arm/mach-msm/clock-pll.h
@@ -34,7 +34,7 @@
  * @id: PLL ID
  * @mode_reg: enable register
  * @parent: clock source
- * @c: clk
+ * @c: clock
  */
 struct pll_shared_clk {
 	unsigned int id;
@@ -45,9 +45,9 @@
 
 extern struct clk_ops clk_ops_pll;
 
-static inline struct pll_shared_clk *to_pll_shared_clk(struct clk *clk)
+static inline struct pll_shared_clk *to_pll_shared_clk(struct clk *c)
 {
-	return container_of(clk, struct pll_shared_clk, c);
+	return container_of(c, struct pll_shared_clk, c);
 }
 
 /**
@@ -64,7 +64,7 @@
  * @status_mask: ANDed with @status_reg to determine if PLL is active.
  * @status_reg: status register
  * @parent: clock source
- * @c: clk
+ * @c: clock
  */
 struct pll_vote_clk {
 	u32 *soft_vote;
@@ -81,9 +81,9 @@
 
 extern struct clk_ops clk_ops_pll_vote;
 
-static inline struct pll_vote_clk *to_pll_vote_clk(struct clk *clk)
+static inline struct pll_vote_clk *to_pll_vote_clk(struct clk *c)
 {
-	return container_of(clk, struct pll_vote_clk, c);
+	return container_of(c, struct pll_vote_clk, c);
 }
 
 /**
@@ -105,21 +105,21 @@
 
 extern struct clk_ops clk_ops_local_pll;
 
-static inline struct pll_clk *to_pll_clk(struct clk *clk)
+static inline struct pll_clk *to_pll_clk(struct clk *c)
 {
-	return container_of(clk, struct pll_clk, c);
+	return container_of(c, struct pll_clk, c);
 }
 
-int sr_pll_clk_enable(struct clk *clk);
-int copper_pll_clk_enable(struct clk *clk);
+int sr_pll_clk_enable(struct clk *c);
+int msm8974_pll_clk_enable(struct clk *c);
 
 /*
  * PLL vote clock APIs
  */
-int pll_vote_clk_enable(struct clk *clk);
-void pll_vote_clk_disable(struct clk *clk);
-struct clk *pll_vote_clk_get_parent(struct clk *clk);
-int pll_vote_clk_is_enabled(struct clk *clk);
+int pll_vote_clk_enable(struct clk *c);
+void pll_vote_clk_disable(struct clk *c);
+struct clk *pll_vote_clk_get_parent(struct clk *c);
+int pll_vote_clk_is_enabled(struct clk *c);
 
 struct pll_config {
 	u32 l;
diff --git a/arch/arm/mach-msm/clock-rpm.c b/arch/arm/mach-msm/clock-rpm.c
index 2ec40ce..8096c10 100644
--- a/arch/arm/mach-msm/clock-rpm.c
+++ b/arch/arm/mach-msm/clock-rpm.c
@@ -51,16 +51,18 @@
 	int rc;
 	struct msm_rpm_iv_pair iv = { .id = r->rpm_status_id, };
 	rc = msm_rpm_get_status(&iv, 1);
-	return (rc < 0) ? rc : iv.value * 1000;
+	return (rc < 0) ? rc : iv.value * r->factor;
 }
 
-#define RPM_SMD_KEY_CLOCK_SET_RATE	0x007A484B
+#define RPM_SMD_KEY_RATE	0x007A484B
+#define RPM_SMD_KEY_ENABLE	0x62616E45
 
 static int clk_rpmrs_set_rate_smd(struct rpm_clk *r, uint32_t value,
 				uint32_t context, int noirq)
 {
+	u32 rpm_key = r->branch ? RPM_SMD_KEY_ENABLE : RPM_SMD_KEY_RATE;
 	struct msm_rpm_kvp kvp = {
-		.key = RPM_SMD_KEY_CLOCK_SET_RATE,
+		.key = rpm_key,
 		.data = (void *)&value,
 		.length = sizeof(value),
 	};
@@ -190,7 +192,7 @@
 	unsigned long this_khz, this_sleep_khz;
 	int rc = 0;
 
-	this_khz = DIV_ROUND_UP(rate, 1000);
+	this_khz = DIV_ROUND_UP(rate, r->factor);
 
 	spin_lock_irqsave(&rpm_clock_lock, flags);
 
@@ -275,7 +277,7 @@
 	if (!r->branch) {
 		r->last_set_khz = iv.value;
 		r->last_set_sleep_khz = iv.value;
-		clk->rate = iv.value * 1000;
+		clk->rate = iv.value * r->factor;
 	}
 
 	return HANDOFF_ENABLED_CLK;
diff --git a/arch/arm/mach-msm/clock-rpm.h b/arch/arm/mach-msm/clock-rpm.h
index b2358bc..ce878ce 100644
--- a/arch/arm/mach-msm/clock-rpm.h
+++ b/arch/arm/mach-msm/clock-rpm.h
@@ -32,6 +32,7 @@
 	unsigned last_set_sleep_khz;
 	bool enabled;
 	bool branch; /* true: RPM only accepts 1 for ON and 0 for OFF */
+	unsigned factor;
 	struct clk_rpmrs_data *rpmrs_data;
 
 	struct rpm_clk *peer;
@@ -53,10 +54,10 @@
 		.rpm_clk_id = (r_id), \
 		.rpm_status_id = (stat_id), \
 		.peer = &active, \
+		.factor = 1000, \
 		.rpmrs_data = (rpmrsdata),\
 		.c = { \
 			.ops = &clk_ops_rpm, \
-			.flags = CLKFLAG_SKIP_AUTO_OFF, \
 			.dbg_name = #name, \
 			CLK_INIT(name.c), \
 			.depends = dep, \
@@ -68,10 +69,10 @@
 		.rpm_status_id = (stat_id), \
 		.peer = &name, \
 		.active_only = true, \
+		.factor = 1000, \
 		.rpmrs_data = (rpmrsdata),\
 		.c = { \
 			.ops = &clk_ops_rpm, \
-			.flags = CLKFLAG_SKIP_AUTO_OFF, \
 			.dbg_name = #active, \
 			CLK_INIT(active.c), \
 			.depends = dep, \
@@ -88,11 +89,11 @@
 		.peer = &active, \
 		.last_set_khz = ((r) / 1000), \
 		.last_set_sleep_khz = ((r) / 1000), \
+		.factor = 1000, \
 		.branch = true, \
 		.rpmrs_data = (rpmrsdata),\
 		.c = { \
 			.ops = &clk_ops_rpm_branch, \
-			.flags = CLKFLAG_SKIP_AUTO_OFF, \
 			.dbg_name = #name, \
 			.rate = (r), \
 			CLK_INIT(name.c), \
@@ -106,11 +107,11 @@
 		.peer = &name, \
 		.last_set_khz = ((r) / 1000), \
 		.active_only = true, \
+		.factor = 1000, \
 		.branch = true, \
 		.rpmrs_data = (rpmrsdata),\
 		.c = { \
 			.ops = &clk_ops_rpm_branch, \
-			.flags = CLKFLAG_SKIP_AUTO_OFF, \
 			.dbg_name = #active, \
 			.rate = (r), \
 			CLK_INIT(active.c), \
@@ -118,10 +119,46 @@
 		}, \
 	};
 
+#define __DEFINE_CLK_RPM_QDSS(name, active, type, r_id, stat_id, rpmrsdata) \
+	static struct rpm_clk active; \
+	static struct rpm_clk name = { \
+		.rpm_res_type = (type), \
+		.rpm_clk_id = (r_id), \
+		.rpm_status_id = (stat_id), \
+		.peer = &active, \
+		.factor = 1, \
+		.rpmrs_data = (rpmrsdata),\
+		.c = { \
+			.ops = &clk_ops_rpm, \
+			.dbg_name = #name, \
+			CLK_INIT(name.c), \
+			.warned = true, \
+		}, \
+	}; \
+	static struct rpm_clk active = { \
+		.rpm_res_type = (type), \
+		.rpm_clk_id = (r_id), \
+		.rpm_status_id = (stat_id), \
+		.peer = &name, \
+		.active_only = true, \
+		.factor = 1, \
+		.rpmrs_data = (rpmrsdata),\
+		.c = { \
+			.ops = &clk_ops_rpm, \
+			.dbg_name = #active, \
+			CLK_INIT(active.c), \
+			.warned = true, \
+		}, \
+	};
+
 #define DEFINE_CLK_RPM(name, active, r_id, dep) \
 	__DEFINE_CLK_RPM(name, active, 0, MSM_RPM_ID_##r_id##_CLK, \
 		MSM_RPM_STATUS_ID_##r_id##_CLK, dep, &clk_rpmrs_data)
 
+#define DEFINE_CLK_RPM_QDSS(name, active) \
+	__DEFINE_CLK_RPM_QDSS(name, active, 0, MSM_RPM_ID_QDSS_CLK, \
+		MSM_RPM_STATUS_ID_QDSS_CLK, &clk_rpmrs_data)
+
 #define DEFINE_CLK_RPM_BRANCH(name, active, r_id, r) \
 	__DEFINE_CLK_RPM_BRANCH(name, active, 0, MSM_RPM_ID_##r_id##_CLK, \
 			MSM_RPM_STATUS_ID_##r_id##_CLK, r, &clk_rpmrs_data)
diff --git a/arch/arm/mach-msm/clock-voter.h b/arch/arm/mach-msm/clock-voter.h
index c9aebba..407aac6 100644
--- a/arch/arm/mach-msm/clock-voter.h
+++ b/arch/arm/mach-msm/clock-voter.h
@@ -34,7 +34,6 @@
 		.c = { \
 			.dbg_name = #clk_name, \
 			.ops = &clk_ops_voter, \
-			.flags = CLKFLAG_SKIP_AUTO_OFF, \
 			.rate = _default_rate, \
 			CLK_INIT(clk_name.c), \
 		}, \
diff --git a/arch/arm/mach-msm/clock.c b/arch/arm/mach-msm/clock.c
index 8bf98fa..da8c3a9 100644
--- a/arch/arm/mach-msm/clock.c
+++ b/arch/arm/mach-msm/clock.c
@@ -506,34 +506,16 @@
 		clk_init_data->post_init();
 }
 
-/*
- * The bootloader and/or AMSS may have left various clocks enabled.
- * Disable any clocks that have not been explicitly enabled by a
- * clk_enable() call and don't have the CLKFLAG_SKIP_AUTO_OFF flag.
- */
 static int __init clock_late_init(void)
 {
-	unsigned n, count = 0;
 	struct handoff_clk *h, *h_temp;
-	unsigned long flags;
-	int ret = 0;
+	int n, ret = 0;
 
 	clock_debug_init(clk_init_data);
-	for (n = 0; n < clk_init_data->size; n++) {
-		struct clk *clk = clk_init_data->table[n].clk;
+	for (n = 0; n < clk_init_data->size; n++)
+		clock_debug_add(clk_init_data->table[n].clk);
 
-		clock_debug_add(clk);
-		spin_lock_irqsave(&clk->lock, flags);
-		if (!(clk->flags & CLKFLAG_SKIP_AUTO_OFF)) {
-			if (!clk->count && clk->ops->auto_off) {
-				count++;
-				clk->ops->auto_off(clk);
-			}
-		}
-		spin_unlock_irqrestore(&clk->lock, flags);
-	}
-	pr_info("clock_late_init() disabled %d unused clocks\n", count);
-
+	pr_info("%s: Removing enables held for handed-off clocks\n", __func__);
 	list_for_each_entry_safe(h, h_temp, &handoff_list, list) {
 		clk_disable_unprepare(h->clk);
 		list_del(&h->list);
diff --git a/arch/arm/mach-msm/clock.h b/arch/arm/mach-msm/clock.h
index 03d5790..56d3c6f 100644
--- a/arch/arm/mach-msm/clock.h
+++ b/arch/arm/mach-msm/clock.h
@@ -33,7 +33,6 @@
 #define CLKFLAG_RETAIN			0x00000040
 #define CLKFLAG_NORETAIN		0x00000080
 #define CLKFLAG_SKIP_HANDOFF		0x00000100
-#define CLKFLAG_SKIP_AUTO_OFF		0x00000200
 #define CLKFLAG_MIN			0x00000400
 #define CLKFLAG_MAX			0x00000800
 
@@ -90,7 +89,6 @@
 	int (*enable)(struct clk *clk);
 	void (*disable)(struct clk *clk);
 	void (*unprepare)(struct clk *clk);
-	void (*auto_off)(struct clk *clk);
 	void (*enable_hwcg)(struct clk *clk);
 	void (*disable_hwcg)(struct clk *clk);
 	int (*in_hwcg_mode)(struct clk *clk);
@@ -172,7 +170,7 @@
 extern struct clock_init_data qds8x50_clock_init_data;
 extern struct clock_init_data msm8625_dummy_clock_init_data;
 extern struct clock_init_data msm8930_clock_init_data;
-extern struct clock_init_data msmcopper_clock_init_data;
+extern struct clock_init_data msm8974_clock_init_data;
 
 void msm_clock_init(struct clock_init_data *data);
 int vote_vdd_level(struct clk_vdd_class *vdd_class, int level);
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index f6ce848..614785e 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -17,6 +17,7 @@
 #include <linux/msm_rotator.h>
 #include <linux/clkdev.h>
 #include <linux/dma-mapping.h>
+#include <linux/coresight.h>
 #include <mach/irqs-8064.h>
 #include <mach/board.h>
 #include <mach/msm_iomap.h>
@@ -32,7 +33,6 @@
 #include <mach/msm_smd.h>
 #include <mach/msm_dcvs.h>
 #include <mach/msm_rtb.h>
-#include <mach/qdss.h>
 #include <linux/ion.h>
 #include "clock.h"
 #include "devices.h"
@@ -1192,6 +1192,7 @@
 	.disable_dmx = 0,
 	.disable_fullhd = 0,
 	.cont_mode_dpb_count = 18,
+	.fw_addr = 0x9fe00000,
 };
 
 struct platform_device apq8064_msm_device_vidc = {
@@ -1804,8 +1805,8 @@
 	FS_8X60(FS_MDP,    "vdd",       "mdp.0",        &mdp_fs_data),
 	FS_8X60(FS_ROT,    "vdd",	"msm_rotator.0", &rot_fs_data),
 	FS_8X60(FS_IJPEG,  "vdd",	"msm_gemini.0",	&ijpeg_fs_data),
-	FS_8X60(FS_VFE,    "fs_vfe",	NULL,	&vfe_fs_data),
-	FS_8X60(FS_VPE,    "fs_vpe",	NULL,	&vpe_fs_data),
+	FS_8X60(FS_VFE,    "vdd",	"msm_vfe.0",	&vfe_fs_data),
+	FS_8X60(FS_VPE,    "vdd",	"msm_vpe.0",	&vpe_fs_data),
 	FS_8X60(FS_GFX3D,  "vdd",	"kgsl-3d0.0",	&gfx3d_fs_data),
 	FS_8X60(FS_VED,    "vdd",	"msm_vidc.0",	&ved_fs_data),
 	FS_8X60(FS_VCAP,   "vdd",	"msm_vcap.0",	&vcap_fs_data),
@@ -1917,6 +1918,7 @@
 		MSM_RPM_MAP(8064, HDMI_SWITCH, HDMI_SWITCH, 1),
 		MSM_RPM_MAP(8064, DDR_DMM_0, DDR_DMM, 2),
 		MSM_RPM_MAP(8064, QDSS_CLK, QDSS_CLK, 1),
+		MSM_RPM_MAP(8064, VDDMIN_GPIO, VDDMIN_GPIO, 1),
 	},
 	.target_status = {
 		MSM_RPM_STATUS_ID_MAP(8064, VERSION_MAJOR),
@@ -2050,6 +2052,7 @@
 		MSM_RPM_STATUS_ID_MAP(8064, PM8821_S2_1),
 		MSM_RPM_STATUS_ID_MAP(8064, PM8821_L1_0),
 		MSM_RPM_STATUS_ID_MAP(8064, PM8821_L1_1),
+		MSM_RPM_STATUS_ID_MAP(8064, VDDMIN_GPIO),
 	},
 	.target_ctrl_id = {
 		MSM_RPM_CTRL_MAP(8064, VERSION_MAJOR),
@@ -2271,6 +2274,7 @@
 #define AP2MDM_STATUS			48
 #define AP2MDM_SOFT_RESET		27
 #define AP2MDM_WAKEUP			35
+#define MDM2AP_PBLRDY			46
 
 static struct resource mdm_resources[] = {
 	{
@@ -2309,6 +2313,12 @@
 		.name	= "AP2MDM_WAKEUP",
 		.flags	= IORESOURCE_IO,
 	},
+	{
+		.start	= MDM2AP_PBLRDY,
+		.end	= MDM2AP_PBLRDY,
+		.name	= "MDM2AP_PBLRDY",
+		.flags	= IORESOURCE_IO,
+	},
 };
 
 struct platform_device mdm_8064_device = {
@@ -2586,15 +2596,15 @@
 		.name = "jpegd_dst",
 		.domain = CAMERA_DOMAIN,
 	},
-	/* Rotator src*/
+	/* Rotator */
 	{
 		.name = "rot_src",
-		.domain = ROTATOR_SRC_DOMAIN,
+		.domain = ROTATOR_DOMAIN,
 	},
-	/* Rotator dst */
+	/* Rotator */
 	{
 		.name = "rot_dst",
-		.domain = ROTATOR_DST_DOMAIN,
+		.domain = ROTATOR_DOMAIN,
 	},
 	/* Video */
 	{
@@ -2650,36 +2660,18 @@
 		},
 };
 
-static struct mem_pool apq8064_display_read_pools[] =  {
+static struct mem_pool apq8064_display_pools[] =  {
 	[GEN_POOL] =
-	/* One address space for display reads */
+	/* One address space for display */
 		{
 			.paddr	= SZ_128K,
 			.size	= SZ_2G - SZ_128K,
 		},
 };
 
-static struct mem_pool apq8064_display_write_pools[] =  {
+static struct mem_pool apq8064_rotator_pools[] =  {
 	[GEN_POOL] =
-	/* One address space for display writes */
-		{
-			.paddr	= SZ_128K,
-			.size	= SZ_2G - SZ_128K,
-		},
-};
-
-static struct mem_pool apq8064_rotator_src_pools[] =  {
-	[GEN_POOL] =
-	/* One address space for rotator src */
-		{
-			.paddr	= SZ_128K,
-			.size	= SZ_2G - SZ_128K,
-		},
-};
-
-static struct mem_pool apq8064_rotator_dst_pools[] =  {
-	[GEN_POOL] =
-	/* One address space for rotator dst */
+	/* One address space for rotator */
 		{
 			.paddr	= SZ_128K,
 			.size	= SZ_2G - SZ_128K,
@@ -2695,21 +2687,13 @@
 			.iova_pools = apq8064_camera_pools,
 			.npools = ARRAY_SIZE(apq8064_camera_pools),
 		},
-		[DISPLAY_READ_DOMAIN] = {
-			.iova_pools = apq8064_display_read_pools,
-			.npools = ARRAY_SIZE(apq8064_display_read_pools),
+		[DISPLAY_DOMAIN] = {
+			.iova_pools = apq8064_display_pools,
+			.npools = ARRAY_SIZE(apq8064_display_pools),
 		},
-		[DISPLAY_WRITE_DOMAIN] = {
-			.iova_pools = apq8064_display_write_pools,
-			.npools = ARRAY_SIZE(apq8064_display_write_pools),
-		},
-		[ROTATOR_SRC_DOMAIN] = {
-			.iova_pools = apq8064_rotator_src_pools,
-			.npools = ARRAY_SIZE(apq8064_rotator_src_pools),
-		},
-		[ROTATOR_DST_DOMAIN] = {
-			.iova_pools = apq8064_rotator_dst_pools,
-			.npools = ARRAY_SIZE(apq8064_rotator_dst_pools),
+		[ROTATOR_DOMAIN] = {
+			.iova_pools = apq8064_rotator_pools,
+			.npools = ARRAY_SIZE(apq8064_rotator_pools),
 		},
 };
 
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index 03685da..3212364 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -25,6 +25,7 @@
 #include <mach/socinfo.h>
 #include <mach/iommu_domains.h>
 #include <mach/msm_rtb.h>
+#include <mach/msm_cache_dump.h>
 
 #include "devices.h"
 #include "rpm_log.h"
@@ -434,8 +435,8 @@
 	FS_8X60(FS_MDP,    "vdd",	"mdp.0",	&mdp_fs_data),
 	FS_8X60(FS_ROT,    "vdd",	"msm_rotator.0", &rot_fs_data),
 	FS_8X60(FS_IJPEG,  "vdd",	"msm_gemini.0", &ijpeg_fs_data),
-	FS_8X60(FS_VFE,    "fs_vfe",	NULL,	&vfe_fs_data),
-	FS_8X60(FS_VPE,    "fs_vpe",	NULL,	&vpe_fs_data),
+	FS_8X60(FS_VFE,    "vdd",	"msm_vfe.0",	&vfe_fs_data),
+	FS_8X60(FS_VPE,    "vdd",	"msm_vpe.0",	&vpe_fs_data),
 	FS_8X60(FS_GFX3D,  "vdd",	"kgsl-3d0.0",	&gfx3d_fs_data),
 	FS_8X60(FS_VED,    "vdd",	"msm_vidc.0",	&ved_fs_data),
 };
@@ -694,6 +695,7 @@
 #endif
 	.disable_dmx = 1,
 	.disable_fullhd = 0,
+	.fw_addr = 0x9fe00000,
 };
 
 struct platform_device apq8930_msm_device_vidc = {
@@ -765,12 +767,12 @@
 	/* Rotator */
 	{
 		.name = "rot_src",
-		.domain = ROTATOR_SRC_DOMAIN,
+		.domain = ROTATOR_DOMAIN,
 	},
 	/* Rotator */
 	{
 		.name = "rot_dst",
-		.domain = ROTATOR_DST_DOMAIN,
+		.domain = ROTATOR_DOMAIN,
 	},
 	/* Video */
 	{
@@ -826,36 +828,18 @@
 		},
 };
 
-static struct mem_pool msm8930_display_read_pools[] =  {
+static struct mem_pool msm8930_display_pools[] =  {
 	[GEN_POOL] =
-	/* One address space for display reads */
+	/* One address space for display */
 		{
 			.paddr	= SZ_128K,
 			.size	= SZ_2G - SZ_128K,
 		},
 };
 
-static struct mem_pool msm8930_display_write_pools[] =  {
+static struct mem_pool msm8930_rotator_pools[] =  {
 	[GEN_POOL] =
-	/* One address space for display writes */
-		{
-			.paddr	= SZ_128K,
-			.size	= SZ_2G - SZ_128K,
-		},
-};
-
-static struct mem_pool msm8930_rotator_src_pools[] =  {
-	[GEN_POOL] =
-	/* One address space for rotator src */
-		{
-			.paddr	= SZ_128K,
-			.size	= SZ_2G - SZ_128K,
-		},
-};
-
-static struct mem_pool msm8930_rotator_dst_pools[] =  {
-	[GEN_POOL] =
-	/* One address space for rotator dst */
+	/* One address space for rotator */
 		{
 			.paddr	= SZ_128K,
 			.size	= SZ_2G - SZ_128K,
@@ -871,21 +855,13 @@
 			.iova_pools = msm8930_camera_pools,
 			.npools = ARRAY_SIZE(msm8930_camera_pools),
 		},
-		[DISPLAY_READ_DOMAIN] = {
-			.iova_pools = msm8930_display_read_pools,
-			.npools = ARRAY_SIZE(msm8930_display_read_pools),
+		[DISPLAY_DOMAIN] = {
+			.iova_pools = msm8930_display_pools,
+			.npools = ARRAY_SIZE(msm8930_display_pools),
 		},
-		[DISPLAY_WRITE_DOMAIN] = {
-			.iova_pools = msm8930_display_write_pools,
-			.npools = ARRAY_SIZE(msm8930_display_write_pools),
-		},
-		[ROTATOR_SRC_DOMAIN] = {
-			.iova_pools = msm8930_rotator_src_pools,
-			.npools = ARRAY_SIZE(msm8930_rotator_src_pools),
-		},
-		[ROTATOR_DST_DOMAIN] = {
-			.iova_pools = msm8930_rotator_dst_pools,
-			.npools = ARRAY_SIZE(msm8930_rotator_dst_pools),
+		[ROTATOR_DOMAIN] = {
+			.iova_pools = msm8930_rotator_pools,
+			.npools = ARRAY_SIZE(msm8930_rotator_pools),
 		},
 };
 
@@ -927,3 +903,23 @@
 		.platform_data = &msm8930_rtb_pdata,
 	},
 };
+
+#define MSM8930_L1_SIZE  SZ_1M
+/*
+ * The actual L2 size is smaller but we need a larger buffer
+ * size to store other dump information
+ */
+#define MSM8930_L2_SIZE  SZ_4M
+
+struct msm_cache_dump_platform_data msm8930_cache_dump_pdata = {
+	.l2_size = MSM8930_L2_SIZE,
+	.l1_size = MSM8930_L1_SIZE,
+};
+
+struct platform_device msm8930_cache_dump_device = {
+	.name           = "msm_cache_dump",
+	.id             = -1,
+	.dev            = {
+		.platform_data = &msm8930_cache_dump_pdata,
+	},
+};
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index 0d417bd..369e826 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -17,6 +17,7 @@
 #include <linux/msm_rotator.h>
 #include <linux/ion.h>
 #include <linux/gpio.h>
+#include <linux/coresight.h>
 #include <asm/clkdev.h>
 #include <linux/msm_kgsl.h>
 #include <linux/android_pmem.h>
@@ -37,7 +38,6 @@
 #include <sound/msm-dai-q6.h>
 #include <sound/apr_audio.h>
 #include <mach/msm_tsif.h>
-#include <mach/qdss.h>
 #include <mach/msm_serial_hs_lite.h>
 #include "clock.h"
 #include "devices.h"
@@ -729,6 +729,7 @@
 	.disable_dmx = 0,
 	.disable_fullhd = 0,
 	.cont_mode_dpb_count = 18,
+	.fw_addr = 0x9fe00000,
 };
 
 struct platform_device msm_device_vidc = {
@@ -2155,8 +2156,8 @@
 	FS_8X60(FS_MDP,    "vdd",	"mdp.0",	&mdp_fs_data),
 	FS_8X60(FS_ROT,    "vdd",	"msm_rotator.0", &rot_fs_data),
 	FS_8X60(FS_IJPEG,  "vdd",	"msm_gemini.0",	&ijpeg_fs_data),
-	FS_8X60(FS_VFE,    "fs_vfe",	NULL,	&vfe_fs_data),
-	FS_8X60(FS_VPE,    "fs_vpe",	NULL,	&vpe_fs_data),
+	FS_8X60(FS_VFE,    "vdd",	"msm_vfe.0",	&vfe_fs_data),
+	FS_8X60(FS_VPE,    "vdd",	"msm_vpe.0",	&vpe_fs_data),
 	FS_8X60(FS_GFX3D,  "vdd",	"kgsl-3d0.0",	&gfx3d_fs_data),
 	FS_8X60(FS_GFX2D0, "vdd",	"kgsl-2d0.0",	&gfx2d0_fs_data),
 	FS_8X60(FS_GFX2D1, "vdd",	"kgsl-2d1.1",	&gfx2d1_fs_data),
@@ -3639,15 +3640,15 @@
 		.name = "jpegd_dst",
 		.domain = CAMERA_DOMAIN,
 	},
-	/* Rotator src*/
+	/* Rotator */
 	{
 		.name = "rot_src",
-		.domain = ROTATOR_SRC_DOMAIN,
+		.domain = ROTATOR_DOMAIN,
 	},
-	/* Rotator dst */
+	/* Rotator */
 	{
 		.name = "rot_dst",
-		.domain = ROTATOR_DST_DOMAIN,
+		.domain = ROTATOR_DOMAIN,
 	},
 	/* Video */
 	{
@@ -3703,36 +3704,18 @@
 		},
 };
 
-static struct mem_pool msm8960_display_read_pools[] =  {
+static struct mem_pool msm8960_display_pools[] =  {
 	[GEN_POOL] =
-	/* One address space for display reads */
+	/* One address space for display */
 		{
 			.paddr	= SZ_128K,
 			.size	= SZ_2G - SZ_128K,
 		},
 };
 
-static struct mem_pool msm8960_display_write_pools[] =  {
+static struct mem_pool msm8960_rotator_pools[] =  {
 	[GEN_POOL] =
-	/* One address space for display writes */
-		{
-			.paddr	= SZ_128K,
-			.size	= SZ_2G - SZ_128K,
-		},
-};
-
-static struct mem_pool msm8960_rotator_src_pools[] =  {
-	[GEN_POOL] =
-	/* One address space for rotator src */
-		{
-			.paddr	= SZ_128K,
-			.size	= SZ_2G - SZ_128K,
-		},
-};
-
-static struct mem_pool msm8960_rotator_dst_pools[] =  {
-	[GEN_POOL] =
-	/* One address space for rotator dst */
+	/* One address space for rotator */
 		{
 			.paddr	= SZ_128K,
 			.size	= SZ_2G - SZ_128K,
@@ -3748,21 +3731,13 @@
 			.iova_pools = msm8960_camera_pools,
 			.npools = ARRAY_SIZE(msm8960_camera_pools),
 		},
-		[DISPLAY_READ_DOMAIN] = {
-			.iova_pools = msm8960_display_read_pools,
-			.npools = ARRAY_SIZE(msm8960_display_read_pools),
+		[DISPLAY_DOMAIN] = {
+			.iova_pools = msm8960_display_pools,
+			.npools = ARRAY_SIZE(msm8960_display_pools),
 		},
-		[DISPLAY_WRITE_DOMAIN] = {
-			.iova_pools = msm8960_display_write_pools,
-			.npools = ARRAY_SIZE(msm8960_display_write_pools),
-		},
-		[ROTATOR_SRC_DOMAIN] = {
-			.iova_pools = msm8960_rotator_src_pools,
-			.npools = ARRAY_SIZE(msm8960_rotator_src_pools),
-		},
-		[ROTATOR_DST_DOMAIN] = {
-			.iova_pools = msm8960_rotator_dst_pools,
-			.npools = ARRAY_SIZE(msm8960_rotator_dst_pools),
+		[ROTATOR_DOMAIN] = {
+			.iova_pools = msm8960_rotator_pools,
+			.npools = ARRAY_SIZE(msm8960_rotator_pools),
 		},
 };
 
diff --git a/arch/arm/mach-msm/devices-9615.c b/arch/arm/mach-msm/devices-9615.c
index 06d8653..9c2b26a 100644
--- a/arch/arm/mach-msm/devices-9615.c
+++ b/arch/arm/mach-msm/devices-9615.c
@@ -1348,7 +1348,9 @@
 	return 0;
 }
 
-struct android_usb_platform_data msm_android_usb_pdata;
+struct android_usb_platform_data msm_android_usb_pdata = {
+	.usb_core_id = 0,
+};
 
 struct platform_device msm_android_usb_device = {
 	.name	= "android_usb",
@@ -1358,6 +1360,19 @@
 	},
 };
 
+struct android_usb_platform_data msm_android_usb_hsic_pdata  = {
+	.usb_core_id = 1,
+};
+
+struct platform_device msm_android_usb_hsic_device = {
+	.name	= "android_usb_hsic",
+	.id	= -1,
+	.dev	= {
+		.platform_data = &msm_android_usb_hsic_pdata,
+	},
+};
+
+
 void __init msm9615_device_init(void)
 {
 	msm_spm_init(msm_spm_data, ARRAY_SIZE(msm_spm_data));
@@ -1365,6 +1380,8 @@
 	BUG_ON(msm_rpmrs_levels_init(&msm_rpmrs_data));
 	msm_android_usb_pdata.swfi_latency =
 		msm_rpmrs_levels[0].latency_us;
+	msm_android_usb_hsic_pdata.swfi_latency =
+		msm_rpmrs_levels[0].latency_us;
 
 }
 
diff --git a/arch/arm/mach-msm/devices-fsm9xxx.c b/arch/arm/mach-msm/devices-fsm9xxx.c
index 777b6d6..5f4d940 100644
--- a/arch/arm/mach-msm/devices-fsm9xxx.c
+++ b/arch/arm/mach-msm/devices-fsm9xxx.c
@@ -71,6 +71,26 @@
 	.resource	= resources_uart2,
 };
 
+static struct resource resources_uart3[] = {
+	{
+		.start	= INT_UART3,
+		.end	= INT_UART3,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.start	= MSM_UART3_PHYS,
+		.end	= MSM_UART3_PHYS + MSM_UART3_SIZE - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+struct platform_device msm_device_uart3 = {
+	.name	= "msm_uim",
+	.id	= 2,
+	.num_resources	= ARRAY_SIZE(resources_uart3),
+	.resource	= resources_uart3,
+};
+
 /*
  * SSBIs
  */
diff --git a/arch/arm/mach-msm/devices-msm7x27a.c b/arch/arm/mach-msm/devices-msm7x27a.c
index b3454cd..c159926 100644
--- a/arch/arm/mach-msm/devices-msm7x27a.c
+++ b/arch/arm/mach-msm/devices-msm7x27a.c
@@ -558,8 +558,8 @@
 	},
 	{
 		.name	= "sdcc_dma_chnl",
-		.start	= DMOV_SDC3_CHAN,
-		.end	= DMOV_SDC3_CHAN,
+		.start	= DMOV_NAND_CHAN,
+		.end	= DMOV_NAND_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
@@ -1476,7 +1476,7 @@
 	},
 };
 
-struct platform_device msm8625_mipi_dsi_device = {
+static struct platform_device msm8625_mipi_dsi_device = {
 	.name   = "mipi_dsi",
 	.id     = 1,
 	.num_resources  = ARRAY_SIZE(msm8625_mipi_dsi_resources),
@@ -1504,6 +1504,8 @@
 	.resource       = msm8625_mdp_resources,
 };
 
+struct platform_device mipi_dsi_device;
+
 void __init msm_fb_register_device(char *name, void *data)
 {
 	if (!strncmp(name, "mdp", 3)) {
@@ -1512,10 +1514,13 @@
 		else
 			msm_register_device(&msm_mdp_device, data);
 	} else if (!strncmp(name, "mipi_dsi", 8)) {
-		if (cpu_is_msm8625())
+		if (cpu_is_msm8625()) {
 			msm_register_device(&msm8625_mipi_dsi_device, data);
-		else
+			mipi_dsi_device = msm8625_mipi_dsi_device;
+		} else {
 			msm_register_device(&msm_mipi_dsi_device, data);
+			mipi_dsi_device = msm_mipi_dsi_device;
+		}
 	} else if (!strncmp(name, "lcdc", 4)) {
 			msm_register_device(&msm_lcdc_device, data);
 	} else {
@@ -1715,7 +1720,7 @@
 
 void __init msm8625_init_irq(void)
 {
-	msm_gic_irq_extn_init(MSM_QGIC_DIST_BASE, MSM_QGIC_CPU_BASE);
+	msm_gic_irq_extn_init();
 	gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE,
 			(void *)MSM_QGIC_CPU_BASE);
 }
diff --git a/arch/arm/mach-msm/devices-msm7x30.c b/arch/arm/mach-msm/devices-msm7x30.c
index ff747e2..4b02f7a 100644
--- a/arch/arm/mach-msm/devices-msm7x30.c
+++ b/arch/arm/mach-msm/devices-msm7x30.c
@@ -820,8 +820,8 @@
 	},
 	{
 		.name	= "sdcc_dma_chnl",
-		.start	= DMOV_SDC2_CHAN,
-		.end	= DMOV_SDC2_CHAN,
+		.start	= DMOV_NAND_CHAN,
+		.end	= DMOV_NAND_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
@@ -1321,7 +1321,7 @@
 static struct kgsl_device_platform_data kgsl_2d0_pdata = {
 	.pwrlevel = {
 		{
-			.gpu_freq = 0,
+			.gpu_freq = 192000000,
 			.bus_freq = 192000000,
 		},
 	},
diff --git a/arch/arm/mach-msm/devices-msm8x60.c b/arch/arm/mach-msm/devices-msm8x60.c
index d8bf054..43c5f88 100644
--- a/arch/arm/mach-msm/devices-msm8x60.c
+++ b/arch/arm/mach-msm/devices-msm8x60.c
@@ -56,6 +56,7 @@
 #include "rpm_stats.h"
 #include <mach/mpm.h>
 #include "msm_watchdog.h"
+#include <mach/iommu_domains.h>
 
 /* Address of GSBI blocks */
 #define MSM_GSBI1_PHYS	0x16000000
@@ -231,6 +232,11 @@
 	.dev.platform_data = "dsps",
 };
 
+struct platform_device msm_pil_vidc = {
+	.name = "pil_vidc",
+	.id = -1,
+};
+
 static struct resource msm_uart1_dm_resources[] = {
 	{
 		.start = MSM_UART1DM_PHYS,
@@ -1584,6 +1590,7 @@
 #define PPSS_SMEM_BASE          0x40000000
 #define PPSS_SMEM_SIZE          0x4000
 #define PPSS_REG_PHYS_BASE	0x12080000
+#define PPSS_PAUSE_REG          0x1804
 
 #define MHZ (1000*1000)
 
@@ -1656,6 +1663,7 @@
 	.ddr_size = PPSS_DSPS_DDR_SIZE,
 	.smem_start = PPSS_SMEM_BASE,
 	.smem_size  = PPSS_SMEM_SIZE,
+	.ppss_pause_reg = PPSS_PAUSE_REG,
 	.signature = DSPS_SIGNATURE,
 };
 
@@ -2284,15 +2292,18 @@
 #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
 	.memtype = ION_CP_MM_HEAP_ID,
 	.enable_ion = 1,
-	.cp_enabled = 0,
+	.cp_enabled = 1,
+	.secure_wb_heap = 1,
 #else
 	.memtype = MEMTYPE_SMI_KERNEL,
 	.enable_ion = 0,
+	.secure_wb_heap = 0,
 #endif
 	.disable_dmx = 0,
 	.disable_fullhd = 0,
 	.cont_mode_dpb_count = 8,
 	.disable_turbo = 1,
+	.fw_addr = 0x38000000,
 };
 
 struct platform_device msm_device_vidc = {
@@ -2686,8 +2697,8 @@
 	FS_8X60(FS_MDP,    "vdd",	"mdp.0",	&mdp_fs_data),
 	FS_8X60(FS_ROT,    "vdd",	"msm_rotator.0", &rot_fs_data),
 	FS_8X60(FS_VED,    "vdd",	"msm_vidc.0",	&ved_fs_data),
-	FS_8X60(FS_VFE,    "fs_vfe",	NULL,	&vfe_fs_data),
-	FS_8X60(FS_VPE,    "fs_vpe",	NULL,	&vpe_fs_data),
+	FS_8X60(FS_VFE,    "vdd",	"msm_vfe.0",	&vfe_fs_data),
+	FS_8X60(FS_VPE,    "vdd",	"msm_vpe.0",	&vpe_fs_data),
 	FS_8X60(FS_GFX3D,  "vdd",	"kgsl-3d0.0",	&gfx3d_fs_data),
 	FS_8X60(FS_GFX2D0, "vdd",	"kgsl-2d0.0",	&gfx2d0_fs_data),
 	FS_8X60(FS_GFX2D1, "vdd",	"kgsl-2d1.1",	&gfx2d1_fs_data),
@@ -2958,3 +2969,161 @@
 	.name = "msm_rpm",
 	.id = -1,
 };
+
+struct msm_iommu_domain_name msm8660_iommu_ctx_names[] = {
+	/* Camera */
+	{
+		.name = "vpe_src",
+		.domain = CAMERA_DOMAIN,
+	},
+	/* Camera */
+	{
+		.name = "vpe_dst",
+		.domain = CAMERA_DOMAIN,
+	},
+	/* Camera */
+	{
+		.name = "vfe_imgwr",
+		.domain = CAMERA_DOMAIN,
+	},
+	/* Camera */
+	{
+		.name = "vfe_misc",
+		.domain = CAMERA_DOMAIN,
+	},
+	/* Camera */
+	{
+		.name = "ijpeg_src",
+		.domain = CAMERA_DOMAIN,
+	},
+	/* Camera */
+	{
+		.name = "ijpeg_dst",
+		.domain = CAMERA_DOMAIN,
+	},
+	/* Camera */
+	{
+		.name = "jpegd_src",
+		.domain = CAMERA_DOMAIN,
+	},
+	/* Camera */
+	{
+		.name = "jpegd_dst",
+		.domain = CAMERA_DOMAIN,
+	},
+	/* Rotator */
+	{
+		.name = "rot_src",
+		.domain = ROTATOR_DOMAIN,
+	},
+	/* Rotator */
+	{
+		.name = "rot_dst",
+		.domain = ROTATOR_DOMAIN,
+	},
+	/* Video */
+	{
+		.name = "vcodec_a_mm1",
+		.domain = VIDEO_DOMAIN,
+	},
+	/* Video */
+	{
+		.name = "vcodec_b_mm2",
+		.domain = VIDEO_DOMAIN,
+	},
+	/* Video */
+	{
+		.name = "vcodec_a_stream",
+		.domain = VIDEO_DOMAIN,
+	},
+};
+
+static struct mem_pool msm8660_video_pools[] =  {
+	/*
+	 * Video hardware has the following requirements:
+	 * 1. All video addresses used by the video hardware must be at a higher
+	 *    address than video firmware address.
+	 * 2. Video hardware can only access a range of 256MB from the base of
+	 *    the video firmware.
+	*/
+	[VIDEO_FIRMWARE_POOL] =
+	/* Low addresses, intended for video firmware */
+		{
+			.paddr	= SZ_128K,
+			.size	= SZ_16M - SZ_128K,
+		},
+	[VIDEO_MAIN_POOL] =
+	/* Main video pool */
+		{
+			.paddr	= SZ_16M,
+			.size	= SZ_256M - SZ_16M,
+		},
+	[GEN_POOL] =
+	/* Remaining address space up to 2G */
+		{
+			.paddr	= SZ_256M,
+			.size	= SZ_2G - SZ_256M,
+		},
+};
+
+static struct mem_pool msm8660_camera_pools[] =  {
+	[GEN_POOL] =
+	/* One address space for camera */
+		{
+			.paddr	= SZ_128K,
+			.size	= SZ_2G - SZ_128K,
+		},
+};
+
+static struct mem_pool msm8660_display_pools[] =  {
+	[GEN_POOL] =
+	/* One address space for display */
+		{
+			.paddr	= SZ_128K,
+			.size	= SZ_2G - SZ_128K,
+		},
+};
+
+static struct mem_pool msm8660_rotator_pools[] =  {
+	[GEN_POOL] =
+	/* One address space for rotator */
+		{
+			.paddr	= SZ_128K,
+			.size	= SZ_2G - SZ_128K,
+		},
+};
+
+static struct msm_iommu_domain msm8660_iommu_domains[] = {
+		[VIDEO_DOMAIN] = {
+			.iova_pools = msm8660_video_pools,
+			.npools = ARRAY_SIZE(msm8660_video_pools),
+		},
+		[CAMERA_DOMAIN] = {
+			.iova_pools = msm8660_camera_pools,
+			.npools = ARRAY_SIZE(msm8660_camera_pools),
+		},
+		[DISPLAY_DOMAIN] = {
+			.iova_pools = msm8660_display_pools,
+			.npools = ARRAY_SIZE(msm8660_display_pools),
+		},
+		[ROTATOR_DOMAIN] = {
+			.iova_pools = msm8660_rotator_pools,
+			.npools = ARRAY_SIZE(msm8660_rotator_pools),
+		},
+};
+
+struct iommu_domains_pdata msm8660_iommu_domain_pdata = {
+	.domains = msm8660_iommu_domains,
+	.ndomains = ARRAY_SIZE(msm8660_iommu_domains),
+	.domain_names = msm8660_iommu_ctx_names,
+	.nnames = ARRAY_SIZE(msm8660_iommu_ctx_names),
+	.domain_alloc_flags = 0,
+};
+
+struct platform_device msm8660_iommu_domain_device = {
+	.name = "iommu_domains",
+	.id = -1,
+	.dev = {
+		.platform_data = &msm8660_iommu_domain_pdata,
+	}
+};
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index ea47727..f2fd791 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -113,6 +113,7 @@
 
 extern struct platform_device msm_device_otg;
 extern struct platform_device msm_android_usb_device;
+extern struct platform_device msm_android_usb_hsic_device;
 extern struct platform_device msm_device_hsic_peripheral;
 extern struct platform_device msm8960_device_otg;
 extern struct platform_device msm8960_device_gadget_peripheral;
@@ -302,7 +303,7 @@
 extern struct platform_device msm_kgsl_2d1;
 
 extern struct platform_device msm_mipi_dsi1_device;
-extern struct platform_device msm8625_mipi_dsi_device;
+extern struct platform_device mipi_dsi_device;
 extern struct platform_device msm_lvds_device;
 extern struct platform_device msm_ebi2_lcdc_device;
 
@@ -373,8 +374,8 @@
 
 extern struct platform_device mdm_8064_device;
 extern struct platform_device msm_dsps_device_8064;
-extern struct platform_device *msm_copper_stub_regulator_devices[];
-extern int msm_copper_stub_regulator_devices_len;
+extern struct platform_device *msm_8974_stub_regulator_devices[];
+extern int msm_8974_stub_regulator_devices_len;
 
 extern struct platform_device msm8960_cpu_idle_device;
 extern struct platform_device msm8930_cpu_idle_device;
@@ -396,6 +397,7 @@
 extern struct platform_device msm_device_vpe;
 extern struct platform_device mpq8064_device_qup_i2c_gsbi5;
 
+extern struct platform_device msm8660_iommu_domain_device;
 extern struct platform_device msm8960_iommu_domain_device;
 extern struct platform_device msm8930_iommu_domain_device;
 extern struct platform_device apq8064_iommu_domain_device;
@@ -406,8 +408,7 @@
 
 extern struct platform_device msm8960_cache_dump_device;
 extern struct platform_device apq8064_cache_dump_device;
-
-extern struct platform_device copper_device_tz_log;
+extern struct platform_device msm8930_cache_dump_device;
 
 extern struct platform_device mdm_sglte_device;
 
diff --git a/arch/arm/mach-msm/dma.c b/arch/arm/mach-msm/dma.c
index d3b2274..4e488d7 100644
--- a/arch/arm/mach-msm/dma.c
+++ b/arch/arm/mach-msm/dma.c
@@ -59,18 +59,21 @@
 	int channel_active;
 	int sd;
 	size_t sd_size;
+	struct list_head staged_commands[MSM_DMOV_CHANNEL_COUNT];
 	struct list_head ready_commands[MSM_DMOV_CHANNEL_COUNT];
 	struct list_head active_commands[MSM_DMOV_CHANNEL_COUNT];
-	spinlock_t lock;
+	struct mutex lock;
+	spinlock_t list_lock;
 	unsigned int irq;
 	struct clk *clk;
 	struct clk *pclk;
 	struct clk *ebiclk;
 	unsigned int clk_ctl;
-	struct timer_list timer;
+	struct delayed_work work;
+	struct workqueue_struct *cmd_wq;
 };
 
-static void msm_dmov_clock_timer(unsigned long);
+static void msm_dmov_clock_work(struct work_struct *);
 static int msm_dmov_clk_toggle(int, int);
 
 #ifdef CONFIG_ARCH_MSM8X60
@@ -163,15 +166,19 @@
 	{
 		.crci_conf = adm0_crci_conf,
 		.chan_conf = adm0_chan_conf,
-		.lock = __SPIN_LOCK_UNLOCKED(dmov_lock),
+		.lock = __MUTEX_INITIALIZER(dmov_conf[0].lock),
+		.list_lock = __SPIN_LOCK_UNLOCKED(dmov_list_lock),
 		.clk_ctl = CLK_DIS,
-		.timer = TIMER_INITIALIZER(msm_dmov_clock_timer, 0, 0),
+		.work = __DELAYED_WORK_INITIALIZER(dmov_conf[0].work,
+				msm_dmov_clock_work),
 	}, {
 		.crci_conf = adm1_crci_conf,
 		.chan_conf = adm1_chan_conf,
-		.lock = __SPIN_LOCK_UNLOCKED(dmov_lock),
+		.lock = __MUTEX_INITIALIZER(dmov_conf[1].lock),
+		.list_lock = __SPIN_LOCK_UNLOCKED(dmov_list_lock),
 		.clk_ctl = CLK_DIS,
-		.timer = TIMER_INITIALIZER(msm_dmov_clock_timer, 0, 1),
+		.work = __DELAYED_WORK_INITIALIZER(dmov_conf[1].work,
+				msm_dmov_clock_work),
 	}
 };
 #else
@@ -179,9 +186,11 @@
 	{
 		.crci_conf = NULL,
 		.chan_conf = NULL,
-		.lock = __SPIN_LOCK_UNLOCKED(dmov_lock),
+		.lock = __MUTEX_INITIALIZER(dmov_conf[0].lock),
+		.list_lock = __SPIN_LOCK_UNLOCKED(dmov_list_lock),
 		.clk_ctl = CLK_DIS,
-		.timer = TIMER_INITIALIZER(msm_dmov_clock_timer, 0, 0),
+		.work = __DELAYED_WORK_INITIALIZER(dmov_conf[0].work,
+				msm_dmov_clock_work),
 	}
 };
 #endif
@@ -257,60 +266,123 @@
 	return ret;
 }
 
-static void msm_dmov_clock_timer(unsigned long adm)
+static void msm_dmov_clock_work(struct work_struct *work)
 {
-	unsigned long irq_flags;
-	spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
-	if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS) {
-		BUG_ON(dmov_conf[adm].channel_active);
+	struct msm_dmov_conf *conf =
+		container_of(to_delayed_work(work), struct msm_dmov_conf, work);
+	int adm = DMOV_IRQ_TO_ADM(conf->irq);
+	mutex_lock(&conf->lock);
+	if (conf->clk_ctl == CLK_TO_BE_DIS) {
+		BUG_ON(conf->channel_active);
 		msm_dmov_clk_toggle(adm, 0);
-		dmov_conf[adm].clk_ctl = CLK_DIS;
+		conf->clk_ctl = CLK_DIS;
 	}
-	spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
+	mutex_unlock(&conf->lock);
 }
 
-void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
+enum {
+	NOFLUSH = 0,
+	GRACEFUL,
+	NONGRACEFUL,
+};
+
+/* Caller must hold the list lock */
+static struct msm_dmov_cmd *start_ready_cmd(unsigned ch, int adm)
 {
-	unsigned long irq_flags;
-	unsigned int status;
+	struct msm_dmov_cmd *cmd;
+
+	if (list_empty(&dmov_conf[adm].ready_commands[ch]))
+		return NULL;
+
+	cmd = list_entry(dmov_conf[adm].ready_commands[ch].next, typeof(*cmd),
+			 list);
+	list_del(&cmd->list);
+	if (cmd->exec_func)
+		cmd->exec_func(cmd);
+	list_add_tail(&cmd->list, &dmov_conf[adm].active_commands[ch]);
+	if (!dmov_conf[adm].channel_active)
+		enable_irq(dmov_conf[adm].irq);
+	dmov_conf[adm].channel_active |= BIT(ch);
+	PRINT_IO("msm dmov enqueue command, %x, ch %d\n", cmd->cmdptr, ch);
+	writel_relaxed(cmd->cmdptr, DMOV_REG(DMOV_CMD_PTR(ch), adm));
+
+	return cmd;
+}
+
+static void msm_dmov_enqueue_cmd_ext_work(struct work_struct *work)
+{
+	struct msm_dmov_cmd *cmd =
+		container_of(work, struct msm_dmov_cmd, work);
+	unsigned id = cmd->id;
+	unsigned status;
+	unsigned long flags;
 	int adm = DMOV_ID_TO_ADM(id);
 	int ch = DMOV_ID_TO_CHAN(id);
 
-	spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
+	mutex_lock(&dmov_conf[adm].lock);
 	if (dmov_conf[adm].clk_ctl == CLK_DIS) {
 		status = msm_dmov_clk_toggle(adm, 1);
 		if (status != 0)
 			goto error;
-	} else if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS)
-		del_timer(&dmov_conf[adm].timer);
+	}
 	dmov_conf[adm].clk_ctl = CLK_EN;
 
+	spin_lock_irqsave(&dmov_conf[adm].list_lock, flags);
+
+	cmd = list_entry(dmov_conf[adm].staged_commands[ch].next, typeof(*cmd),
+			 list);
+	list_del(&cmd->list);
+	list_add_tail(&cmd->list, &dmov_conf[adm].ready_commands[ch]);
 	status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch), adm));
 	if (status & DMOV_STATUS_CMD_PTR_RDY) {
 		PRINT_IO("msm_dmov_enqueue_cmd(%d), start command, status %x\n",
 			id, status);
-		if (cmd->exec_func)
-			cmd->exec_func(cmd);
-		list_add_tail(&cmd->list, &dmov_conf[adm].active_commands[ch]);
-		if (!dmov_conf[adm].channel_active)
-			enable_irq(dmov_conf[adm].irq);
-		dmov_conf[adm].channel_active |= 1U << ch;
-		PRINT_IO("Writing %x exactly to register", cmd->cmdptr);
-		writel_relaxed(cmd->cmdptr, DMOV_REG(DMOV_CMD_PTR(ch), adm));
-	} else {
-		if (!dmov_conf[adm].channel_active) {
-			dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
-			mod_timer(&dmov_conf[adm].timer, jiffies + HZ);
+		cmd = start_ready_cmd(ch, adm);
+		/*
+		 * We added something to the ready list, and still hold the
+		 * list lock. Thus, no need to check for cmd == NULL
+		 */
+		if (cmd->toflush) {
+			int flush = (cmd->toflush == GRACEFUL) ? 1 << 31 : 0;
+			writel_relaxed(flush, DMOV_REG(DMOV_FLUSH0(ch), adm));
 		}
-		if (list_empty(&dmov_conf[adm].active_commands[ch]))
+	} else {
+		cmd->toflush = 0;
+		if (list_empty(&dmov_conf[adm].active_commands[ch]) &&
+		    !list_empty(&dmov_conf[adm].ready_commands[ch]))
 			PRINT_ERROR("msm_dmov_enqueue_cmd_ext(%d), stalled, "
 				"status %x\n", id, status);
 		PRINT_IO("msm_dmov_enqueue_cmd(%d), enqueue command, status "
 		    "%x\n", id, status);
-		list_add_tail(&cmd->list, &dmov_conf[adm].ready_commands[ch]);
 	}
+	if (!dmov_conf[adm].channel_active) {
+		dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
+		schedule_delayed_work(&dmov_conf[adm].work, HZ);
+	}
+	spin_unlock_irqrestore(&dmov_conf[adm].list_lock, flags);
 error:
-	spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
+	mutex_unlock(&dmov_conf[adm].lock);
+}
+
+static void __msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
+{
+	int adm = DMOV_ID_TO_ADM(id);
+	int ch = DMOV_ID_TO_CHAN(id);
+	unsigned long flags;
+	cmd->id = id;
+	cmd->toflush = 0;
+
+	spin_lock_irqsave(&dmov_conf[adm].list_lock, flags);
+	list_add_tail(&cmd->list, &dmov_conf[adm].staged_commands[ch]);
+	spin_unlock_irqrestore(&dmov_conf[adm].list_lock, flags);
+
+	queue_work(dmov_conf[adm].cmd_wq, &cmd->work);
+}
+
+void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
+{
+	INIT_WORK(&cmd->work, msm_dmov_enqueue_cmd_ext_work);
+	__msm_dmov_enqueue_cmd_ext(id, cmd);
 }
 EXPORT_SYMBOL(msm_dmov_enqueue_cmd_ext);
 
@@ -318,8 +390,8 @@
 {
 	/* Disable callback function (for backwards compatibility) */
 	cmd->exec_func = NULL;
-
-	msm_dmov_enqueue_cmd_ext(id, cmd);
+	INIT_WORK(&cmd->work, msm_dmov_enqueue_cmd_ext_work);
+	__msm_dmov_enqueue_cmd_ext(id, cmd);
 }
 EXPORT_SYMBOL(msm_dmov_enqueue_cmd);
 
@@ -329,14 +401,18 @@
 	int ch = DMOV_ID_TO_CHAN(id);
 	int adm = DMOV_ID_TO_ADM(id);
 	int flush = graceful ? DMOV_FLUSH_TYPE : 0;
-	spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
+	struct msm_dmov_cmd *cmd;
+
+	spin_lock_irqsave(&dmov_conf[adm].list_lock, irq_flags);
 	/* XXX not checking if flush cmd sent already */
 	if (!list_empty(&dmov_conf[adm].active_commands[ch])) {
 		PRINT_IO("msm_dmov_flush(%d), send flush cmd\n", id);
 		writel_relaxed(flush, DMOV_REG(DMOV_FLUSH0(ch), adm));
 	}
+	list_for_each_entry(cmd, &dmov_conf[adm].staged_commands[ch], list)
+		cmd->toflush = graceful ? GRACEFUL : NONGRACEFUL;
 	/* spin_unlock_irqrestore has the necessary barrier */
-	spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
+	spin_unlock_irqrestore(&dmov_conf[adm].list_lock, irq_flags);
 }
 EXPORT_SYMBOL(msm_dmov_flush);
 
@@ -372,9 +448,10 @@
 	cmd.dmov_cmd.exec_func = NULL;
 	cmd.id = id;
 	cmd.result = 0;
+	INIT_WORK_ONSTACK(&cmd.dmov_cmd.work, msm_dmov_enqueue_cmd_ext_work);
 	init_completion(&cmd.complete);
 
-	msm_dmov_enqueue_cmd(id, &cmd.dmov_cmd);
+	__msm_dmov_enqueue_cmd_ext(id, &cmd.dmov_cmd);
 	wait_for_completion_io(&cmd.complete);
 
 	if (cmd.result != 0x80000002) {
@@ -398,7 +475,7 @@
 	errdata->flush[5] = readl_relaxed(DMOV_REG(DMOV_FLUSH5(ch), adm));
 }
 
-static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
+static irqreturn_t msm_dmov_isr(int irq, void *dev_id)
 {
 	unsigned int int_status;
 	unsigned int mask;
@@ -411,11 +488,12 @@
 	struct msm_dmov_cmd *cmd;
 	int adm = DMOV_IRQ_TO_ADM(irq);
 
-	spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
+	mutex_lock(&dmov_conf[adm].lock);
 	/* read and clear isr */
 	int_status = readl_relaxed(DMOV_REG(DMOV_ISR, adm));
 	PRINT_FLOW("msm_datamover_irq_handler: DMOV_ISR %x\n", int_status);
 
+	spin_lock_irqsave(&dmov_conf[adm].list_lock, irq_flags);
 	while (int_status) {
 		mask = int_status & -int_status;
 		ch = fls(mask) - 1;
@@ -483,51 +561,37 @@
 			ch_status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch),
 						  adm));
 			PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
-			if ((ch_status & DMOV_STATUS_CMD_PTR_RDY) &&
-			    !list_empty(&dmov_conf[adm].ready_commands[ch])) {
-				cmd = list_entry(dmov_conf[adm].
-					ready_commands[ch].next, typeof(*cmd),
-					list);
-				list_del(&cmd->list);
-				if (cmd->exec_func)
-					cmd->exec_func(cmd);
-				list_add_tail(&cmd->list,
-					&dmov_conf[adm].active_commands[ch]);
-				PRINT_FLOW("msm_datamover_irq_handler id %d,"
-						 "start command\n", id);
-				writel_relaxed(cmd->cmdptr,
-					       DMOV_REG(DMOV_CMD_PTR(ch), adm));
-			}
+			if (ch_status & DMOV_STATUS_CMD_PTR_RDY)
+				start_ready_cmd(ch, adm);
 		} while (ch_status & DMOV_STATUS_RSLT_VALID);
 		if (list_empty(&dmov_conf[adm].active_commands[ch]) &&
-				list_empty(&dmov_conf[adm].ready_commands[ch]))
+		    list_empty(&dmov_conf[adm].ready_commands[ch]))
 			dmov_conf[adm].channel_active &= ~(1U << ch);
 		PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
 	}
+	spin_unlock_irqrestore(&dmov_conf[adm].list_lock, irq_flags);
 
 	if (!dmov_conf[adm].channel_active && valid) {
 		disable_irq_nosync(dmov_conf[adm].irq);
 		dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
-		mod_timer(&dmov_conf[adm].timer, jiffies + HZ);
+		schedule_delayed_work(&dmov_conf[adm].work, HZ);
 	}
 
-	spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
+	mutex_unlock(&dmov_conf[adm].lock);
 	return valid ? IRQ_HANDLED : IRQ_NONE;
 }
 
 static int msm_dmov_suspend_late(struct device *dev)
 {
-	unsigned long irq_flags;
 	struct platform_device *pdev = to_platform_device(dev);
 	int adm = (pdev->id >= 0) ? pdev->id : 0;
-	spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
+	mutex_lock(&dmov_conf[adm].lock);
 	if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS) {
 		BUG_ON(dmov_conf[adm].channel_active);
-		del_timer(&dmov_conf[adm].timer);
 		msm_dmov_clk_toggle(adm, 0);
 		dmov_conf[adm].clk_ctl = CLK_DIS;
 	}
-	spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
+	mutex_unlock(&dmov_conf[adm].lock);
 	return 0;
 }
 
@@ -642,12 +706,19 @@
 	if (!dmov_conf[adm].base)
 		return -ENOMEM;
 
-	ret = request_irq(dmov_conf[adm].irq, msm_datamover_irq_handler,
-		0, "msmdatamover", NULL);
+	dmov_conf[adm].cmd_wq = alloc_ordered_workqueue("dmov%d_wq", 0, adm);
+	if (!dmov_conf[adm].cmd_wq) {
+		PRINT_ERROR("Couldn't allocate ADM%d workqueue.\n", adm);
+		ret = -ENOMEM;
+		goto out_map;
+	}
+
+	ret = request_threaded_irq(dmov_conf[adm].irq, NULL, msm_dmov_isr,
+				   IRQF_ONESHOT, "msmdatamover", NULL);
 	if (ret) {
 		PRINT_ERROR("Requesting ADM%d irq %d failed\n", adm,
 			dmov_conf[adm].irq);
-		goto out_map;
+		goto out_wq;
 	}
 	disable_irq(dmov_conf[adm].irq);
 	ret = msm_dmov_init_clocks(pdev);
@@ -663,6 +734,7 @@
 
 	config_datamover(adm);
 	for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
+		INIT_LIST_HEAD(&dmov_conf[adm].staged_commands[i]);
 		INIT_LIST_HEAD(&dmov_conf[adm].ready_commands[i]);
 		INIT_LIST_HEAD(&dmov_conf[adm].active_commands[i]);
 
@@ -675,6 +747,8 @@
 	return ret;
 out_irq:
 	free_irq(dmov_conf[adm].irq, NULL);
+out_wq:
+	destroy_workqueue(dmov_conf[adm].cmd_wq);
 out_map:
 	iounmap(dmov_conf[adm].base);
 	return ret;
diff --git a/arch/arm/mach-msm/footswitch-pcom.c b/arch/arm/mach-msm/footswitch-pcom.c
index 07d7118..8903859 100644
--- a/arch/arm/mach-msm/footswitch-pcom.c
+++ b/arch/arm/mach-msm/footswitch-pcom.c
@@ -42,7 +42,6 @@
  * @init_data: Regulator platform data
  * @pcom_id: Proc-comm ID of the footswitch
  * @is_enabled: Flag set when footswitch is enabled
- * @is_manual: Flag set when footswitch is in manual proc-comm mode
  * @has_ahb_clk: Flag set if footswitched core has an ahb_clk
  * @has_src_clk: Flag set if footswitched core has a src_clk
  * @src_clk: Controls the core clock's rate
@@ -57,7 +56,6 @@
 	struct regulator_init_data		init_data;
 	unsigned				pcom_id;
 	bool					is_enabled;
-	bool					is_manual;
 	struct clk				*src_clk;
 	struct clk				*core_clk;
 	struct clk				*ahb_clk;
@@ -256,12 +254,19 @@
 	if (pdev->id >= MAX_FS)
 		return -ENODEV;
 
-	fs = &footswitches[pdev->id];
-	if (!fs->is_manual) {
-		pr_err("%s is not in manual mode\n", fs->desc.name);
-		return -EINVAL;
-	}
 	init_data = pdev->dev.platform_data;
+	fs = &footswitches[pdev->id];
+
+	/*
+	 * Enable footswitch in manual mode (ie. not controlled along
+	 * with pcom clocks).
+	 */
+	rc = set_rail_state(fs->pcom_id, PCOM_CLKCTL_RPC_RAIL_ENABLE);
+	if (rc)
+		return rc;
+	rc = set_rail_mode(fs->pcom_id, PCOM_RAIL_MODE_MANUAL);
+	if (rc)
+		return rc;
 
 	rc = get_clocks(&pdev->dev, fs);
 	if (rc)
@@ -305,21 +310,6 @@
 
 static int __init footswitch_init(void)
 {
-	struct footswitch *fs;
-	int ret;
-
-	/*
-	 * Enable all footswitches in manual mode (ie. not controlled along
-	 * with pcom clocks).
-	 */
-	for (fs = footswitches; fs < footswitches + ARRAY_SIZE(footswitches);
-	     fs++) {
-		set_rail_state(fs->pcom_id, PCOM_CLKCTL_RPC_RAIL_ENABLE);
-		ret = set_rail_mode(fs->pcom_id, PCOM_RAIL_MODE_MANUAL);
-		if (!ret)
-			fs->is_manual = 1;
-	}
-
 	return platform_driver_register(&footswitch_driver);
 }
 subsys_initcall(footswitch_init);
diff --git a/arch/arm/mach-msm/gdsc.c b/arch/arm/mach-msm/gdsc.c
index df3a92d..4665aec 100644
--- a/arch/arm/mach-msm/gdsc.c
+++ b/arch/arm/mach-msm/gdsc.c
@@ -63,10 +63,18 @@
 
 	ret = readl_tight_poll_timeout(sc->gdscr, regval, regval & PWR_ON_MASK,
 				       TIMEOUT_US);
-	if (ret)
+	if (ret) {
 		dev_err(&rdev->dev, "%s enable timed out\n", sc->rdesc.name);
+		return ret;
+	}
 
-	return ret;
+	/*
+	 * If clocks to this power domain were already on, they will take an
+	 * additional 4 clock cycles to re-enable after the rail is enabled.
+	 */
+	udelay(1);
+
+	return 0;
 }
 
 static int gdsc_disable(struct regulator_dev *rdev)
@@ -189,4 +197,4 @@
 module_exit(gdsc_exit);
 
 MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Copper GDSC power rail regulator driver");
+MODULE_DESCRIPTION("MSM8974 GDSC power rail regulator driver");
diff --git a/arch/arm/mach-msm/gss-8064.c b/arch/arm/mach-msm/gss-8064.c
index 126f8e0..b9b877a 100644
--- a/arch/arm/mach-msm/gss-8064.c
+++ b/arch/arm/mach-msm/gss-8064.c
@@ -69,40 +69,12 @@
 	wmb();
 }
 
-static void gss_fatal_fn(struct work_struct *work)
+static void restart_gss(void)
 {
-	uint32_t panic_smsm_states = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
-	uint32_t reset_smsm_states = SMSM_SYSTEM_REBOOT_USR |
-					SMSM_SYSTEM_PWRDWN_USR;
-	uint32_t gss_state;
-
-	pr_err("Watchdog bite received from GSS!\n");
-
-	gss_state = smsm_get_state(SMSM_MODEM_STATE);
-
-	if (gss_state & panic_smsm_states) {
-
-		pr_err("GSS SMSM state changed to SMSM_RESET.\n"
-			"Probable err_fatal on the GSS. "
-			"Calling subsystem restart...\n");
-		log_gss_sfr();
-		subsystem_restart("gss");
-
-	} else if (gss_state & reset_smsm_states) {
-
-		pr_err("%s: User-invoked system reset/powerdown. "
-			"Resetting the SoC now.\n",
-			__func__);
-		kernel_restart(NULL);
-	} else {
-		/* TODO: Bus unlock code/sequence goes _here_ */
-		log_gss_sfr();
-		subsystem_restart("gss");
-	}
+	log_gss_sfr();
+	subsystem_restart("gss");
 }
 
-static DECLARE_WORK(gss_fatal_work, gss_fatal_fn);
-
 static void smsm_state_cb(void *data, uint32_t old_state, uint32_t new_state)
 {
 	/* Ignore if we're the one that set SMSM_RESET */
@@ -113,8 +85,7 @@
 		pr_err("GSS SMSM state changed to SMSM_RESET.\n"
 			"Probable err_fatal on the GSS. "
 			"Calling subsystem restart...\n");
-		log_gss_sfr();
-		subsystem_restart("gss");
+		restart_gss();
 	}
 }
 
@@ -180,8 +151,8 @@
 
 static irqreturn_t gss_wdog_bite_irq(int irq, void *dev_id)
 {
-	schedule_work(&gss_fatal_work);
-	disable_irq_nosync(GSS_A5_WDOG_EXPIRED);
+	pr_err("Watchdog bite received from GSS!\n");
+	restart_gss();
 
 	return IRQ_HANDLED;
 }
@@ -272,7 +243,7 @@
 		goto out;
 	}
 
-	gss_data.smem_ramdump_dev = create_ramdump_device("smem");
+	gss_data.smem_ramdump_dev = create_ramdump_device("smem-gss");
 
 	if (!gss_data.smem_ramdump_dev) {
 		pr_err("%s: Unable to create smem ramdump device. (%d)\n",
diff --git a/arch/arm/mach-msm/hotplug.c b/arch/arm/mach-msm/hotplug.c
index 46e835f..f8324ce 100644
--- a/arch/arm/mach-msm/hotplug.c
+++ b/arch/arm/mach-msm/hotplug.c
@@ -16,7 +16,7 @@
 #include <asm/smp_plat.h>
 #include <asm/vfp.h>
 
-#include <mach/qdss.h>
+#include <mach/jtag.h>
 #include <mach/msm_rtb.h>
 
 #include "pm.h"
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 460eefc..1aa3814 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -519,11 +519,13 @@
 	int disable_dmx;
 	int disable_fullhd;
 	u32 cp_enabled;
+	u32 secure_wb_heap;
 #ifdef CONFIG_MSM_BUS_SCALING
 	struct msm_bus_scale_pdata *vidc_bus_client_pdata;
 #endif
 	int cont_mode_dpb_count;
 	int disable_turbo;
+	unsigned long fw_addr;
 };
 
 struct vcap_platform_data {
@@ -541,11 +543,11 @@
 /* common init routines for use by arch/arm/mach-msm/board-*.c */
 
 #ifdef CONFIG_OF_DEVICE
-void msm_copper_init(struct of_dev_auxdata **);
+void msm_8974_init(struct of_dev_auxdata **);
 #endif
 void msm_add_devices(void);
-void msm_copper_add_devices(void);
-void msm_copper_add_drivers(void);
+void msm_8974_add_devices(void);
+void msm_8974_add_drivers(void);
 void msm_map_common_io(void);
 void msm_map_qsd8x50_io(void);
 void msm_map_msm8x60_io(void);
@@ -554,15 +556,15 @@
 void msm_map_apq8064_io(void);
 void msm_map_msm7x30_io(void);
 void msm_map_fsm9xxx_io(void);
-void msm_map_copper_io(void);
+void msm_map_8974_io(void);
 void msm_map_msm8625_io(void);
 void msm_map_msm9625_io(void);
 void msm_init_irq(void);
-void msm_copper_init_irq(void);
+void msm_8974_init_irq(void);
 void vic_handle_irq(struct pt_regs *regs);
-void msm_copper_reserve(void);
-void msm_copper_very_early(void);
-void msm_copper_init_gpiomux(void);
+void msm_8974_reserve(void);
+void msm_8974_very_early(void);
+void msm_8974_init_gpiomux(void);
 
 struct mmc_platform_data;
 int msm_add_sdcc(unsigned int controller,
diff --git a/arch/arm/mach-msm/include/mach/camera.h b/arch/arm/mach-msm/include/mach/camera.h
index 6d2c25a..4d66d88 100644
--- a/arch/arm/mach-msm/include/mach/camera.h
+++ b/arch/arm/mach-msm/include/mach/camera.h
@@ -95,6 +95,7 @@
 	VFE_MSG_OUTPUT_PRIMARY,
 	VFE_MSG_OUTPUT_SECONDARY,
 	VFE_MSG_OUTPUT_TERTIARY1,
+	VFE_MSG_OUTPUT_TERTIARY2,
 };
 
 enum vpe_resp_msg {
diff --git a/arch/arm/mach-msm/include/mach/dma.h b/arch/arm/mach-msm/include/mach/dma.h
index 70519ff..ba621e6 100644
--- a/arch/arm/mach-msm/include/mach/dma.h
+++ b/arch/arm/mach-msm/include/mach/dma.h
@@ -35,7 +35,10 @@
 			      unsigned int result,
 			      struct msm_dmov_errdata *err);
 	void (*exec_func)(struct msm_dmov_cmd *cmd);
+	struct work_struct work;
+	unsigned id;    /* For internal use */
 	void *user;	/* Pointer for caller's reference */
+	u8 toflush;
 };
 
 struct msm_dmov_pdata {
diff --git a/arch/arm/mach-msm/include/mach/iommu_domains.h b/arch/arm/mach-msm/include/mach/iommu_domains.h
index 1d538f2..1a3a022 100644
--- a/arch/arm/mach-msm/include/mach/iommu_domains.h
+++ b/arch/arm/mach-msm/include/mach/iommu_domains.h
@@ -18,10 +18,8 @@
 enum {
 	VIDEO_DOMAIN,
 	CAMERA_DOMAIN,
-	DISPLAY_READ_DOMAIN,
-	DISPLAY_WRITE_DOMAIN,
-	ROTATOR_SRC_DOMAIN,
-	ROTATOR_DST_DOMAIN,
+	DISPLAY_DOMAIN,
+	ROTATOR_DOMAIN,
 	MAX_DOMAINS
 };
 
diff --git a/arch/arm/mach-msm/include/mach/irqs-copper.h b/arch/arm/mach-msm/include/mach/irqs-8974.h
similarity index 94%
rename from arch/arm/mach-msm/include/mach/irqs-copper.h
rename to arch/arm/mach-msm/include/mach/irqs-8974.h
index 8cd8620..d10b537 100644
--- a/arch/arm/mach-msm/include/mach/irqs-copper.h
+++ b/arch/arm/mach-msm/include/mach/irqs-8974.h
@@ -10,8 +10,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef __ASM_ARCH_MSM_IRQS_COPPER_H
-#define __ASM_ARCH_MSM_IRQS_COPPER_H
+#ifndef __ASM_ARCH_MSM_IRQS_8974_H
+#define __ASM_ARCH_MSM_IRQS_8974_H
 
 /* MSM ACPU Interrupt Numbers */
 
diff --git a/arch/arm/mach-msm/include/mach/irqs.h b/arch/arm/mach-msm/include/mach/irqs.h
index d630799..143159e 100644
--- a/arch/arm/mach-msm/include/mach/irqs.h
+++ b/arch/arm/mach-msm/include/mach/irqs.h
@@ -58,8 +58,8 @@
 
 #else
 
-#if defined(CONFIG_ARCH_MSMCOPPER)
-#include "irqs-copper.h"
+#if defined(CONFIG_ARCH_MSM8974)
+#include "irqs-8974.h"
 #elif defined(CONFIG_ARCH_MSM9615)
 #include "irqs-9615.h"
 #elif defined(CONFIG_ARCH_MSM9625)
diff --git a/arch/arm/boot/dts/msmcopper-sim.dts b/arch/arm/mach-msm/include/mach/jtag.h
similarity index 65%
copy from arch/arm/boot/dts/msmcopper-sim.dts
copy to arch/arm/mach-msm/include/mach/jtag.h
index ab6b8ba..3850eff 100644
--- a/arch/arm/boot/dts/msmcopper-sim.dts
+++ b/arch/arm/mach-msm/include/mach/jtag.h
@@ -10,11 +10,15 @@
  * GNU General Public License for more details.
  */
 
-/dts-v1/;
+#ifndef __MACH_JTAG_H
+#define __MACH_JTAG_H
 
-/include/ "msmcopper.dtsi"
+#ifdef CONFIG_MSM_JTAG
+extern void msm_jtag_save_state(void);
+extern void msm_jtag_restore_state(void);
+#else
+static inline void msm_jtag_save_state(void) {}
+static inline void msm_jtag_restore_state(void) {}
+#endif
 
-/ {
-	model = "Qualcomm MSM Copper Simulator";
-	compatible = "qcom,msmcopper-sim", "qcom,msmcopper";
-};
+#endif
diff --git a/arch/arm/mach-msm/include/mach/mdm2.h b/arch/arm/mach-msm/include/mach/mdm2.h
index 997b3be..637a3cc 100644
--- a/arch/arm/mach-msm/include/mach/mdm2.h
+++ b/arch/arm/mach-msm/include/mach/mdm2.h
@@ -13,13 +13,24 @@
 #ifndef _ARCH_ARM_MACH_MSM_MDM2_H
 #define _ARCH_ARM_MACH_MSM_MDM2_H
 
+struct mdm_vddmin_resource {
+	int rpm_id;
+	int ap2mdm_vddmin_gpio;
+	unsigned int modes;
+	unsigned int drive_strength;
+	int mdm2ap_vddmin_gpio;
+};
+
 struct mdm_platform_data {
 	char *mdm_version;
 	int ramdump_delay_ms;
 	int soft_reset_inverted;
 	int early_power_on;
 	int sfr_query;
+	int no_powerdown_after_ramdumps;
+	struct mdm_vddmin_resource *vddmin_resource;
 	struct platform_device *peripheral_platform_device;
+	const unsigned int ramdump_timeout_ms;
 };
 
 #endif
diff --git a/arch/arm/mach-msm/include/mach/msm_bus_board.h b/arch/arm/mach-msm/include/mach/msm_bus_board.h
index 0c556b5..d95e4a4 100644
--- a/arch/arm/mach-msm/include/mach/msm_bus_board.h
+++ b/arch/arm/mach-msm/include/mach/msm_bus_board.h
@@ -83,6 +83,14 @@
 extern struct msm_bus_fabric_registration msm_bus_8930_sys_fpb_pdata;
 extern struct msm_bus_fabric_registration msm_bus_8930_cpss_fpb_pdata;
 
+extern struct msm_bus_fabric_registration msm_bus_8974_sys_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_mmss_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_bimc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_periph_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_config_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_vnoc_pdata;
+
 void msm_bus_rpm_set_mt_mask(void);
 int msm_bus_board_rpm_get_il_ids(uint16_t *id);
 int msm_bus_board_get_iid(int id);
@@ -148,6 +156,20 @@
 		MSM_BUS_CLK_UNHALT<<MSM_BUS_MASTER_SHIFT((master),\
 		MSM_BUS_CLK_HALT_FIELDSIZE))\
 
+#define RPM_BUS_SLAVE_REQ	0x766c7362
+#define RPM_BUS_MASTER_REQ	0x73616d62
+
+enum msm_bus_rpm_slave_field_type {
+	RPM_SLAVE_FIELD_BW = 0x00007762,
+};
+
+enum msm_bus_rpm_mas_field_type {
+	RPM_MASTER_FIELD_BW =		0x00007762,
+	RPM_MASTER_FIELD_BW_T0 =	0x30747762,
+	RPM_MASTER_FIELD_BW_T1 =	0x31747762,
+	RPM_MASTER_FIELD_BW_T2 =	0x32747762,
+};
+
 /* Topology related enums */
 enum msm_bus_fabric_type {
 	MSM_BUS_FAB_DEFAULT = 0,
diff --git a/arch/arm/mach-msm/include/mach/msm_dsps.h b/arch/arm/mach-msm/include/mach/msm_dsps.h
index 32a4f15..0f9dba6 100644
--- a/arch/arm/mach-msm/include/mach/msm_dsps.h
+++ b/arch/arm/mach-msm/include/mach/msm_dsps.h
@@ -85,6 +85,7 @@
  * @ddr_size - size of the DDR region in bytes
  * @smem_start - start of the smem region as physical address
  * @smem_size - size of the smem region in bytes
+ * @ppss_pause_reg - Offset to the PPSS_PAUSE register
  * @signature - signature for validity check.
  */
 struct msm_dsps_platform_data {
@@ -107,6 +108,7 @@
 	int ddr_size;
 	int smem_start;
 	int smem_size;
+	int ppss_pause_reg;
 	u32 signature;
 };
 
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-copper.h b/arch/arm/mach-msm/include/mach/msm_iomap-8974.h
similarity index 66%
rename from arch/arm/mach-msm/include/mach/msm_iomap-copper.h
rename to arch/arm/mach-msm/include/mach/msm_iomap-8974.h
index 441f82a..fb61d54 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-copper.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8974.h
@@ -11,8 +11,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef __ASM_ARCH_MSM_IOMAP_COPPER_H
-#define __ASM_ARCH_MSM_IOMAP_COPPER_H
+#ifndef __ASM_ARCH_MSM_IOMAP_8974_H
+#define __ASM_ARCH_MSM_IOMAP_8974_H
 
 /* Physical base address and size of peripherals.
  * Ordered by the virtual base addresses they will be mapped at.
@@ -23,21 +23,21 @@
  *
  */
 
-#define COPPER_MSM_SHARED_RAM_PHYS	0x0FA00000
+#define MSM8974_MSM_SHARED_RAM_PHYS	0x0FA00000
 
-#define COPPER_QGIC_DIST_PHYS	0xF9000000
-#define COPPER_QGIC_DIST_SIZE	SZ_4K
+#define MSM8974_QGIC_DIST_PHYS	0xF9000000
+#define MSM8974_QGIC_DIST_SIZE	SZ_4K
 
-#define COPPER_QGIC_CPU_PHYS	0xF9002000
-#define COPPER_QGIC_CPU_SIZE	SZ_4K
+#define MSM8974_QGIC_CPU_PHYS	0xF9002000
+#define MSM8974_QGIC_CPU_SIZE	SZ_4K
 
-#define COPPER_APCS_GCC_PHYS	0xF9011000
-#define COPPER_APCS_GCC_SIZE	SZ_4K
+#define MSM8974_APCS_GCC_PHYS	0xF9011000
+#define MSM8974_APCS_GCC_SIZE	SZ_4K
 
-#define COPPER_TLMM_PHYS	0xFD510000
-#define COPPER_TLMM_SIZE	SZ_16K
+#define MSM8974_TLMM_PHYS	0xFD510000
+#define MSM8974_TLMM_SIZE	SZ_16K
 
-#ifdef CONFIG_DEBUG_MSMCOPPER_UART
+#ifdef CONFIG_DEBUG_MSM8974_UART
 #define MSM_DEBUG_UART_BASE	IOMEM(0xFA71E000)
 #define MSM_DEBUG_UART_PHYS	0xF991E000
 #endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-fsm9xxx.h b/arch/arm/mach-msm/include/mach/msm_iomap-fsm9xxx.h
index c30c9e4..a99f1f7 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-fsm9xxx.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-fsm9xxx.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -78,7 +78,10 @@
 #define MSM_UART1_PHYS        0x94000000
 #define MSM_UART1_SIZE        SZ_4K
 
-#define MSM_UART2_PHYS        0x94100000
+#define MSM_UART2_PHYS        0x94010000
 #define MSM_UART2_SIZE        SZ_4K
 
+#define MSM_UART3_PHYS        0x94100000
+#define MSM_UART3_SIZE        SZ_4K
+
 #endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
index 75cc43a..7085db7 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap.h
@@ -50,7 +50,7 @@
 
 #if defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_APQ8064) || \
 	defined(CONFIG_ARCH_MSM8930) || defined(CONFIG_ARCH_MSM9615) || \
-	defined(CONFIG_ARCH_MSMCOPPER) || defined(CONFIG_ARCH_MSM7X27) || \
+	defined(CONFIG_ARCH_MSM8974) || defined(CONFIG_ARCH_MSM7X27) || \
 	defined(CONFIG_ARCH_MSM7X25) || defined(CONFIG_ARCH_MSM7X01A) || \
 	defined(CONFIG_ARCH_MSM8625) || defined(CONFIG_ARCH_MSM7X30) || \
 	defined(CONFIG_ARCH_MSM9625)
@@ -115,7 +115,7 @@
 #include "msm_iomap-8930.h"
 #include "msm_iomap-8064.h"
 #include "msm_iomap-9615.h"
-#include "msm_iomap-copper.h"
+#include "msm_iomap-8974.h"
 #include "msm_iomap-9625.h"
 
 #else
diff --git a/arch/arm/mach-msm/include/mach/msm_spi.h b/arch/arm/mach-msm/include/mach/msm_spi.h
index 51081b6..11d3014 100644
--- a/arch/arm/mach-msm/include/mach/msm_spi.h
+++ b/arch/arm/mach-msm/include/mach/msm_spi.h
@@ -21,4 +21,5 @@
 	int (*dma_config)(void);
 	const char *rsl_id;
 	uint32_t pm_lat;
+	uint32_t infinite_mode;
 };
diff --git a/arch/arm/mach-msm/include/mach/qdsp6v2/audio_acdb.h b/arch/arm/mach-msm/include/mach/qdsp6v2/audio_acdb.h
index 65cf647..3d33350 100644
--- a/arch/arm/mach-msm/include/mach/qdsp6v2/audio_acdb.h
+++ b/arch/arm/mach-msm/include/mach/qdsp6v2/audio_acdb.h
@@ -14,7 +14,7 @@
 #define _AUDIO_ACDB_H
 
 #include <linux/msm_audio_acdb.h>
-#ifdef CONFIG_ARCH_MSMCOPPER
+#ifdef CONFIG_ARCH_MSM8974
 #include <sound/q6adm-v2.h>
 #else
 #include <sound/q6adm.h>
diff --git a/arch/arm/mach-msm/include/mach/qdss.h b/arch/arm/mach-msm/include/mach/qdss.h
deleted file mode 100644
index 05d8577..0000000
--- a/arch/arm/mach-msm/include/mach/qdss.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MACH_QDSS_H
-#define __MACH_QDSS_H
-
-struct qdss_source {
-	struct list_head link;
-	const char *name;
-	uint32_t fport_mask;
-};
-
-struct msm_qdss_platform_data {
-	struct qdss_source *src_table;
-	size_t size;
-	uint8_t afamily;
-};
-
-#ifdef CONFIG_MSM_QDSS
-extern struct qdss_source *qdss_get(const char *name);
-extern void qdss_put(struct qdss_source *src);
-extern int qdss_enable(struct qdss_source *src);
-extern void qdss_disable(struct qdss_source *src);
-extern void qdss_disable_sink(void);
-extern int qdss_clk_enable(void);
-extern void qdss_clk_disable(void);
-#else
-static inline struct qdss_source *qdss_get(const char *name) { return NULL; }
-static inline void qdss_put(struct qdss_source *src) {}
-static inline int qdss_enable(struct qdss_source *src) { return -ENOSYS; }
-static inline void qdss_disable(struct qdss_source *src) {}
-static inline void qdss_disable_sink(void) {}
-static inline int qdss_clk_enable(void) { return -ENOSYS; }
-static inline void qdss_clk_disable(void) {}
-#endif
-
-#ifdef CONFIG_MSM_JTAG
-extern void msm_jtag_save_state(void);
-extern void msm_jtag_restore_state(void);
-#else
-static inline void msm_jtag_save_state(void) {}
-static inline void msm_jtag_restore_state(void) {}
-#endif
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/qpnp-int.h b/arch/arm/mach-msm/include/mach/qpnp-int.h
index a79d2fc..21d95e6 100644
--- a/arch/arm/mach-msm/include/mach/qpnp-int.h
+++ b/arch/arm/mach-msm/include/mach/qpnp-int.h
@@ -52,7 +52,8 @@
  * Used by the PMIC Arbiter driver or equivalent to register
  * callbacks for interrupt events.
  */
-int qpnpint_register_controller(unsigned int busno,
+int qpnpint_register_controller(struct device_node *node,
+				struct spmi_controller *ctrl,
 				struct qpnp_local_int *li_cb);
 
 /**
@@ -68,8 +69,11 @@
 {
 	return -ENXIO;
 }
-static inline int qpnpint_register_controller(unsigned int busno,
-				struct qpnp_local_int *li_cb)
+
+static inline int qpnpint_register_controller(struct device_node *node,
+					      struct spmi_controller *ctrl,
+					      struct qpnp_local_int *li_cb)
+
 {
 	return -ENXIO;
 }
diff --git a/arch/arm/mach-msm/include/mach/qpnp.h b/arch/arm/mach-msm/include/mach/qpnp.h
deleted file mode 100644
index 1d2e440..0000000
--- a/arch/arm/mach-msm/include/mach/qpnp.h
+++ /dev/null
@@ -1,19 +0,0 @@
- /* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/spmi.h>
-
-struct resource *qpnp_get_resource(struct spmi_device *dev,
-				   unsigned int node_idx, unsigned int type,
-				   unsigned int res_num);
-int qpnp_get_irq(struct spmi_device *dev, unsigned int node_idx,
-					  unsigned int res_num);
diff --git a/arch/arm/mach-msm/include/mach/rpm-8064.h b/arch/arm/mach-msm/include/mach/rpm-8064.h
index c4c6b0a..39ec7ff 100644
--- a/arch/arm/mach-msm/include/mach/rpm-8064.h
+++ b/arch/arm/mach-msm/include/mach/rpm-8064.h
@@ -120,7 +120,9 @@
 	MSM_RPM_8064_SEL_HDMI_SWITCH					= 83,
 	MSM_RPM_8064_SEL_DDR_DMM					= 84,
 
-	MSM_RPM_8064_SEL_LAST = MSM_RPM_8064_SEL_DDR_DMM,
+	MSM_RPM_8064_SEL_VDDMIN_GPIO				= 89,
+
+	MSM_RPM_8064_SEL_LAST = MSM_RPM_8064_SEL_VDDMIN_GPIO,
 };
 
 /* RPM resource (4 byte) word ID enum */
@@ -287,8 +289,9 @@
 	MSM_RPM_8064_ID_DDR_DMM_0					= 212,
 	MSM_RPM_8064_ID_DDR_DMM_1					= 213,
 	MSM_RPM_8064_ID_QDSS_CLK					= 214,
+	MSM_RPM_8064_ID_VDDMIN_GPIO					= 215,
 
-	MSM_RPM_8064_ID_LAST = MSM_RPM_8064_ID_QDSS_CLK,
+	MSM_RPM_8064_ID_LAST = MSM_RPM_8064_ID_VDDMIN_GPIO,
 };
 
 
@@ -425,8 +428,9 @@
 	MSM_RPM_8064_STATUS_ID_DDR_DMM_1				= 128,
 	MSM_RPM_8064_STATUS_ID_EBI1_CH0_RANGE				= 129,
 	MSM_RPM_8064_STATUS_ID_EBI1_CH1_RANGE				= 130,
+	MSM_RPM_8064_STATUS_ID_VDDMIN_GPIO				= 131,
 
-	MSM_RPM_8064_STATUS_ID_LAST = MSM_RPM_8064_STATUS_ID_EBI1_CH1_RANGE,
+	MSM_RPM_8064_STATUS_ID_LAST = MSM_RPM_8064_STATUS_ID_VDDMIN_GPIO,
 };
 
 #endif /* __ARCH_ARM_MACH_MSM_RPM_8064_H */
diff --git a/arch/arm/mach-msm/include/mach/rpm-regulator-copper.h b/arch/arm/mach-msm/include/mach/rpm-regulator-8974.h
similarity index 82%
rename from arch/arm/mach-msm/include/mach/rpm-regulator-copper.h
rename to arch/arm/mach-msm/include/mach/rpm-regulator-8974.h
index 2006ad3..07ae600 100644
--- a/arch/arm/mach-msm/include/mach/rpm-regulator-copper.h
+++ b/arch/arm/mach-msm/include/mach/rpm-regulator-8974.h
@@ -11,13 +11,13 @@
  * GNU General Public License for more details.
  */
 
-#ifndef __ARCH_ARM_MACH_MSM_INCLUDE_MACH_RPM_REGULATOR_COPPER_H
-#define __ARCH_ARM_MACH_MSM_INCLUDE_MACH_RPM_REGULATOR_COPPER_H
+#ifndef __ARCH_ARM_MACH_MSM_INCLUDE_MACH_RPM_REGULATOR_8974_H
+#define __ARCH_ARM_MACH_MSM_INCLUDE_MACH_RPM_REGULATOR_8974_H
 
 /**
  * enum rpm_vreg_id - RPM regulator ID numbers (both real and pin control)
  */
-enum rpm_vreg_id_copper {
+enum rpm_vreg_id_8974 {
 	RPM_VREG_ID_PM8941_S1,
 	RPM_VREG_ID_PM8941_S2,
 	RPM_VREG_ID_PM8941_L12,
diff --git a/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h b/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
index 2eb59f5..319c2d8 100644
--- a/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
+++ b/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
@@ -17,6 +17,26 @@
 
 struct rpm_regulator;
 
+/**
+ * enum rpm_regulator_voltage_corner - possible voltage corner values
+ *
+ * These should be used in regulator_set_voltage() and
+ * rpm_regulator_set_voltage() calls for corner type regulators as if they had
+ * units of uV.
+ *
+ * Note, the meaning of corner values is set by the RPM.  It is possible that
+ * future platforms will utilize different corner values.  The values specified
+ * in this enum correspond to MSM8974 for PMIC PM8841 SMPS 2 (VDD_Dig).
+ */
+enum rpm_regulator_voltage_corner {
+	RPM_REGULATOR_CORNER_RETENTION = 1,
+	RPM_REGULATOR_CORNER_SVS_KRAIT,
+	RPM_REGULATOR_CORNER_SVS_SOC,
+	RPM_REGULATOR_CORNER_NORMAL,
+	RPM_REGULATOR_CORNER_TURBO,
+	RPM_REGULATOR_CORNER_SUPER_TURBO,
+};
+
 #if defined(CONFIG_MSM_RPM_REGULATOR_SMD) || defined(CONFIG_MSM_RPM_REGULATOR)
 
 struct rpm_regulator *rpm_regulator_get(struct device *dev, const char *supply);
diff --git a/arch/arm/mach-msm/include/mach/rpm-regulator.h b/arch/arm/mach-msm/include/mach/rpm-regulator.h
index a010257..1f4ef2d 100644
--- a/arch/arm/mach-msm/include/mach/rpm-regulator.h
+++ b/arch/arm/mach-msm/include/mach/rpm-regulator.h
@@ -20,7 +20,7 @@
 #include <mach/rpm-regulator-8660.h>
 #include <mach/rpm-regulator-8960.h>
 #include <mach/rpm-regulator-9615.h>
-#include <mach/rpm-regulator-copper.h>
+#include <mach/rpm-regulator-8974.h>
 #include <mach/rpm-regulator-8930.h>
 
 /**
diff --git a/arch/arm/mach-msm/include/mach/rpm.h b/arch/arm/mach-msm/include/mach/rpm.h
index de4c9d9..f6b9a6e 100644
--- a/arch/arm/mach-msm/include/mach/rpm.h
+++ b/arch/arm/mach-msm/include/mach/rpm.h
@@ -460,6 +460,7 @@
 	MSM_RPM_ID_PM8821_S2_1,
 	MSM_RPM_ID_PM8821_L1_0,
 	MSM_RPM_ID_PM8821_L1_1,
+	MSM_RPM_ID_VDDMIN_GPIO,
 
 	MSM_RPM_ID_LAST,
 };
@@ -825,6 +826,7 @@
 	MSM_RPM_STATUS_ID_PM8821_S2_1,
 	MSM_RPM_STATUS_ID_PM8821_L1_0,
 	MSM_RPM_STATUS_ID_PM8821_L1_1,
+	MSM_RPM_STATUS_ID_VDDMIN_GPIO,
 
 	MSM_RPM_STATUS_ID_LAST,
 };
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index c0ad65b..6b4ce2a 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -32,23 +32,23 @@
 #define SOCINFO_VERSION_MINOR(ver) (ver & 0x0000ffff)
 
 #ifdef CONFIG_OF
-#define early_machine_is_copper()	\
-	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmcopper")
-#define machine_is_copper()		\
-	of_machine_is_compatible("qcom,msmcopper")
-#define machine_is_copper_sim()		\
-	of_machine_is_compatible("qcom,msmcopper-sim")
-#define machine_is_copper_rumi()	\
-	of_machine_is_compatible("qcom,msmcopper-rumi")
+#define early_machine_is_msm8974()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8974")
+#define machine_is_msm8974()		\
+	of_machine_is_compatible("qcom,msm8974")
+#define machine_is_msm8974_sim()		\
+	of_machine_is_compatible("qcom,msm8974-sim")
+#define machine_is_msm8974_rumi()	\
+	of_machine_is_compatible("qcom,msm8974-rumi")
 #define early_machine_is_msm9625()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm9625")
 #define machine_is_msm9625()		\
 	of_machine_is_compatible("qcom,msm9625")
 #else
-#define early_machine_is_copper()	0
-#define machine_is_copper()		0
-#define machine_is_copper_sim()	0
-#define machine_is_copper_rumi()	0
+#define early_machine_is_msm8974()	0
+#define machine_is_msm8974()		0
+#define machine_is_msm8974_sim()	0
+#define machine_is_msm8974_rumi()	0
 #define early_machine_is_msm9625()	0
 #define machine_is_msm9625()		0
 #endif
@@ -75,7 +75,7 @@
 	MSM_CPU_8930,
 	MSM_CPU_7X27AA,
 	MSM_CPU_9615,
-	MSM_CPU_COPPER,
+	MSM_CPU_8974,
 	MSM_CPU_8627,
 	MSM_CPU_8625,
 	MSM_CPU_9625
@@ -256,8 +256,7 @@
 static inline int cpu_is_msm8930(void)
 {
 #ifdef CONFIG_ARCH_MSM8930
-	return (read_msm_cpu_type() == MSM_CPU_8930) ||
-	       (read_msm_cpu_type() == MSM_CPU_8627);
+	return read_msm_cpu_type() == MSM_CPU_8930;
 #else
 	return 0;
 #endif
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index e35d99b..a2f2c31 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -294,28 +294,28 @@
 }
 #endif /* CONFIG_ARCH_APQ8064 */
 
-#ifdef CONFIG_ARCH_MSMCOPPER
-static struct map_desc msm_copper_io_desc[] __initdata = {
-	MSM_CHIP_DEVICE(QGIC_DIST, COPPER),
-	MSM_CHIP_DEVICE(QGIC_CPU, COPPER),
-	MSM_CHIP_DEVICE(APCS_GCC, COPPER),
-	MSM_CHIP_DEVICE(TLMM, COPPER),
+#ifdef CONFIG_ARCH_MSM8974
+static struct map_desc msm_8974_io_desc[] __initdata = {
+	MSM_CHIP_DEVICE(QGIC_DIST, MSM8974),
+	MSM_CHIP_DEVICE(QGIC_CPU, MSM8974),
+	MSM_CHIP_DEVICE(APCS_GCC, MSM8974),
+	MSM_CHIP_DEVICE(TLMM, MSM8974),
 	{
 		.virtual =  (unsigned long) MSM_SHARED_RAM_BASE,
 		.length =   MSM_SHARED_RAM_SIZE,
 		.type =     MT_DEVICE,
 	},
-#ifdef CONFIG_DEBUG_MSMCOPPER_UART
+#ifdef CONFIG_DEBUG_MSM8974_UART
 	MSM_DEVICE(DEBUG_UART),
 #endif
 };
 
-void __init msm_map_copper_io(void)
+void __init msm_map_8974_io(void)
 {
-	msm_shared_ram_phys = COPPER_MSM_SHARED_RAM_PHYS;
-	msm_map_io(msm_copper_io_desc, ARRAY_SIZE(msm_copper_io_desc));
+	msm_shared_ram_phys = MSM8974_MSM_SHARED_RAM_PHYS;
+	msm_map_io(msm_8974_io_desc, ARRAY_SIZE(msm_8974_io_desc));
 }
-#endif /* CONFIG_ARCH_MSMCOPPER */
+#endif /* CONFIG_ARCH_MSM8974 */
 
 #ifdef CONFIG_ARCH_MSM7X30
 static struct map_desc msm7x30_io_desc[] __initdata = {
diff --git a/arch/arm/mach-msm/iommu_domains.c b/arch/arm/mach-msm/iommu_domains.c
index a7e06ba..e92b5c5 100644
--- a/arch/arm/mach-msm/iommu_domains.c
+++ b/arch/arm/mach-msm/iommu_domains.c
@@ -47,31 +47,57 @@
 				unsigned long page_size,
 				int cached)
 {
-	int i, ret_value = 0;
-	unsigned long order = get_order(page_size);
-	unsigned long aligned_size = ALIGN(size, page_size);
-	unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
+	int ret = 0;
+	int i = 0;
 	unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
 	unsigned long temp_iova = start_iova;
+	if (page_size == SZ_4K) {
+		struct scatterlist *sglist;
+		unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
+		struct page *dummy_page = phys_to_page(phy_addr);
 
-	for (i = 0; i < nrpages; i++) {
-		int ret = iommu_map(domain, temp_iova, phy_addr, page_size,
-					cached);
-		if (ret) {
-			pr_err("%s: could not map %lx in domain %p, error: %d\n",
-				__func__, start_iova, domain, ret);
-			ret_value = -EAGAIN;
+		sglist = vmalloc(sizeof(*sglist) * nrpages);
+		if (!sglist) {
+			ret = -ENOMEM;
 			goto out;
 		}
-		temp_iova += page_size;
+
+		sg_init_table(sglist, nrpages);
+
+		for (i = 0; i < nrpages; i++)
+			sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);
+
+		ret = iommu_map_range(domain, temp_iova, sglist, size, cached);
+		if (ret) {
+			pr_err("%s: could not map extra %lx in domain %p\n",
+				__func__, start_iova, domain);
+		}
+
+		vfree(sglist);
+	} else {
+		unsigned long order = get_order(page_size);
+		unsigned long aligned_size = ALIGN(size, page_size);
+		unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
+
+		for (i = 0; i < nrpages; i++) {
+			ret = iommu_map(domain, temp_iova, phy_addr, page_size,
+						cached);
+			if (ret) {
+				pr_err("%s: could not map %lx in domain %p, error: %d\n",
+					__func__, start_iova, domain, ret);
+				ret = -EAGAIN;
+				goto out;
+			}
+			temp_iova += page_size;
+		}
 	}
-	return ret_value;
+	return ret;
 out:
 	for (; i > 0; --i) {
 		temp_iova -= page_size;
 		iommu_unmap(domain, start_iova, page_size);
 	}
-	return ret_value;
+	return ret;
 }
 
 void msm_iommu_unmap_extra(struct iommu_domain *domain,
diff --git a/arch/arm/mach-msm/jtag.c b/arch/arm/mach-msm/jtag.c
index 8dae9c6..bf5857c 100644
--- a/arch/arm/mach-msm/jtag.c
+++ b/arch/arm/mach-msm/jtag.c
@@ -19,11 +19,16 @@
 #include <linux/export.h>
 #include <linux/printk.h>
 #include <linux/ratelimit.h>
+#include <linux/coresight.h>
 #include <mach/scm.h>
+#include <mach/jtag.h>
 
-#include "qdss-priv.h"
 #include "cp14.h"
 
+#define BM(lsb, msb)		((BIT(msb) - BIT(lsb)) + BIT(msb))
+#define BMVAL(val, lsb, msb)	((val & BM(lsb, msb)) >> lsb)
+#define BVAL(val, n)		((val & BIT(n)) >> n)
+
 /* no of dbg regs + 1 (for storing the reg count) */
 #define MAX_DBG_REGS		(90)
 #define MAX_DBG_STATE_SIZE	(MAX_DBG_REGS * num_possible_cpus())
diff --git a/arch/arm/mach-msm/mdm2.c b/arch/arm/mach-msm/mdm2.c
index bd7bd9e..6e7086e 100644
--- a/arch/arm/mach-msm/mdm2.c
+++ b/arch/arm/mach-msm/mdm2.c
@@ -42,9 +42,7 @@
 #include "clock.h"
 #include "mdm_private.h"
 
-#define MDM_MODEM_TIMEOUT	6000
-#define MDM_HOLD_TIME		4000
-#define MDM_MODEM_DELTA		100
+#define MDM_PBLRDY_CNT		20
 
 static int mdm_debug_on;
 static int power_on_count;
@@ -79,20 +77,64 @@
 	mutex_unlock(&hsic_status_lock);
 }
 
+/* This function can be called from atomic context. */
+static void mdm_toggle_soft_reset(struct mdm_modem_drv *mdm_drv)
+{
+	int soft_reset_direction_assert = 0,
+	    soft_reset_direction_de_assert = 1;
+
+	if (mdm_drv->pdata->soft_reset_inverted) {
+		soft_reset_direction_assert = 1;
+		soft_reset_direction_de_assert = 0;
+	}
+	gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
+			soft_reset_direction_assert);
+	/* Use mdelay because this function can be called from atomic
+	 * context.
+	 */
+	mdelay(10);
+	gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
+			soft_reset_direction_de_assert);
+}
+
+/* This function can be called from atomic context. */
+static void mdm_atomic_soft_reset(struct mdm_modem_drv *mdm_drv)
+{
+	mdm_toggle_soft_reset(mdm_drv);
+}
+
 static void mdm_power_down_common(struct mdm_modem_drv *mdm_drv)
 {
+	int i;
 	int soft_reset_direction =
 		mdm_drv->pdata->soft_reset_inverted ? 1 : 0;
 
-	gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
-				soft_reset_direction);
+	/* Wait for the modem to complete its power down actions. */
+	for (i = 20; i > 0; i--) {
+		if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
+			break;
+		msleep(100);
+	}
+	if (i == 0) {
+		pr_err("%s: MDM2AP_STATUS never went low. Doing a hard reset\n",
+			   __func__);
+		gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
+					soft_reset_direction);
+		/*
+		* Currently, there is a debounce timer on the charm PMIC. It is
+		* necessary to hold the PMIC RESET low for ~3.5 seconds
+		* for the reset to fully take place. Sleep here to ensure the
+		* reset has occured before the function exits.
+		*/
+		msleep(4000);
+	}
 	mdm_peripheral_disconnect(mdm_drv);
 }
 
 static void mdm_do_first_power_on(struct mdm_modem_drv *mdm_drv)
 {
-	int soft_reset_direction =
-		mdm_drv->pdata->soft_reset_inverted ? 0 : 1;
+	int i;
+	int pblrdy;
 
 	if (power_on_count != 1) {
 		pr_err("%s: Calling fn when power_on_count != 1\n",
@@ -103,6 +145,13 @@
 	pr_err("%s: Powering on modem for the first time\n", __func__);
 	mdm_peripheral_disconnect(mdm_drv);
 
+	/* If this is the first power-up after a panic, the modem may still
+	 * be in a power-on state, in which case we need to toggle the gpio
+	 * instead of just de-asserting it. No harm done if the modem was
+	 * powered down.
+	 */
+	mdm_toggle_soft_reset(mdm_drv);
+
 	/* If the device has a kpd pwr gpio then toggle it. */
 	if (mdm_drv->ap2mdm_kpdpwr_n_gpio > 0) {
 		/* Pull AP2MDM_KPDPWR gpio high and wait for PS_HOLD to settle,
@@ -114,31 +163,45 @@
 		gpio_direction_output(mdm_drv->ap2mdm_kpdpwr_n_gpio, 0);
 	}
 
-	/* De-assert the soft reset line. */
-	pr_debug("%s: De-asserting soft reset gpio\n", __func__);
-	gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
-						  soft_reset_direction);
+	if (!mdm_drv->mdm2ap_pblrdy)
+		goto start_mdm_peripheral;
 
+	for (i = 0; i  < MDM_PBLRDY_CNT; i++) {
+		pblrdy = gpio_get_value(mdm_drv->mdm2ap_pblrdy);
+		if (pblrdy)
+			break;
+		usleep_range(5000, 5000);
+	}
+
+	pr_debug("%s: i:%d\n", __func__, i);
+
+start_mdm_peripheral:
 	mdm_peripheral_connect(mdm_drv);
 	msleep(200);
 }
 
 static void mdm_do_soft_power_on(struct mdm_modem_drv *mdm_drv)
 {
-	int soft_reset_direction =
-		mdm_drv->pdata->soft_reset_inverted ? 0 : 1;
+	int i;
+	int pblrdy;
 
-	/* De-assert the soft reset line. */
 	pr_err("%s: soft resetting mdm modem\n", __func__);
-
 	mdm_peripheral_disconnect(mdm_drv);
+	mdm_toggle_soft_reset(mdm_drv);
 
-	gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
-		soft_reset_direction == 1 ? 0 : 1);
-	usleep_range(5000, 10000);
-	gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
-		soft_reset_direction == 1 ? 1 : 0);
+	if (!mdm_drv->mdm2ap_pblrdy)
+		goto start_mdm_peripheral;
 
+	for (i = 0; i  < MDM_PBLRDY_CNT; i++) {
+		pblrdy = gpio_get_value(mdm_drv->mdm2ap_pblrdy);
+		if (pblrdy)
+			break;
+		usleep_range(5000, 5000);
+	}
+
+	pr_debug("%s: i:%d\n", __func__, i);
+
+start_mdm_peripheral:
 	mdm_peripheral_connect(mdm_drv);
 	msleep(200);
 }
@@ -189,6 +252,7 @@
 static struct mdm_ops mdm_cb = {
 	.power_on_mdm_cb = mdm_power_on_common,
 	.reset_mdm_cb = mdm_power_on_common,
+	.atomic_reset_mdm_cb = mdm_atomic_soft_reset,
 	.power_down_mdm_cb = mdm_power_down_common,
 	.debug_state_changed_cb = debug_state_changed,
 	.status_cb = mdm_status_changed,
diff --git a/arch/arm/mach-msm/mdm_common.c b/arch/arm/mach-msm/mdm_common.c
index 74bf25d..5b181e1 100644
--- a/arch/arm/mach-msm/mdm_common.c
+++ b/arch/arm/mach-msm/mdm_common.c
@@ -36,6 +36,7 @@
 #include <mach/restart.h>
 #include <mach/subsystem_notif.h>
 #include <mach/subsystem_restart.h>
+#include <mach/rpm.h>
 #include <linux/msm_charm.h>
 #include "msm_watchdog.h"
 #include "mdm_private.h"
@@ -44,12 +45,13 @@
 #define MDM_MODEM_TIMEOUT	6000
 #define MDM_MODEM_DELTA	100
 #define MDM_BOOT_TIMEOUT	60000L
-#define MDM_RDUMP_TIMEOUT	60000L
+#define MDM_RDUMP_TIMEOUT	120000L
 #define MDM2AP_STATUS_TIMEOUT_MS 60000L
 
 static int mdm_debug_on;
 static struct workqueue_struct *mdm_queue;
 static struct workqueue_struct *mdm_sfr_queue;
+static unsigned int dump_timeout_ms;
 
 #define EXTERNAL_MODEM "external_modem"
 
@@ -65,6 +67,57 @@
 #define SFR_MAX_RETRIES		10
 #define SFR_RETRY_INTERVAL	1000
 
+static irqreturn_t mdm_vddmin_change(int irq, void *dev_id)
+{
+	int value = gpio_get_value(
+		mdm_drv->pdata->vddmin_resource->mdm2ap_vddmin_gpio);
+
+	if (value == 0)
+		pr_info("External Modem entered Vddmin\n");
+	else
+		pr_info("External Modem exited Vddmin\n");
+
+	return IRQ_HANDLED;
+}
+
+static void mdm_setup_vddmin_gpios(void)
+{
+	struct msm_rpm_iv_pair req;
+	struct mdm_vddmin_resource *vddmin_res;
+	int irq, ret;
+
+	/* This resource may not be supported by some platforms. */
+	vddmin_res = mdm_drv->pdata->vddmin_resource;
+	if (!vddmin_res)
+		return;
+
+	req.id = vddmin_res->rpm_id;
+	req.value = ((uint32_t)vddmin_res->ap2mdm_vddmin_gpio & 0x0000FFFF)
+							<< 16;
+	req.value |= ((uint32_t)vddmin_res->modes & 0x000000FF) << 8;
+	req.value |= (uint32_t)vddmin_res->drive_strength & 0x000000FF;
+
+	msm_rpm_set(MSM_RPM_CTX_SET_0, &req, 1);
+
+	/* Monitor low power gpio from mdm */
+	irq = MSM_GPIO_TO_INT(vddmin_res->mdm2ap_vddmin_gpio);
+	if (irq < 0) {
+		pr_err("%s: could not get LPM POWER IRQ resource.\n",
+			__func__);
+		goto error_end;
+	}
+
+	ret = request_threaded_irq(irq, NULL, mdm_vddmin_change,
+		IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+		"mdm lpm", NULL);
+
+	if (ret < 0)
+		pr_err("%s: MDM LPM IRQ#%d request failed with error=%d",
+			__func__, irq, ret);
+error_end:
+	return;
+}
+
 static void mdm_restart_reason_fn(struct work_struct *work)
 {
 	int ret, ntries = 0;
@@ -260,8 +313,8 @@
 	if (i <= 0) {
 		pr_err("%s: MDM2AP_STATUS never went low\n", __func__);
 		/* Reset the modem so that it will go into download mode. */
-		if (mdm_drv && mdm_drv->ops->reset_mdm_cb)
-			mdm_drv->ops->reset_mdm_cb(mdm_drv);
+		if (mdm_drv && mdm_drv->ops->atomic_reset_mdm_cb)
+			mdm_drv->ops->atomic_reset_mdm_cb(mdm_drv);
 	}
 	return NOTIFY_DONE;
 }
@@ -288,8 +341,17 @@
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t mdm_pblrdy_change(int irq, void *dev_id)
+{
+	pr_info("%s: pbl ready:%d\n", __func__,
+			gpio_get_value(mdm_drv->mdm2ap_pblrdy));
+
+	return IRQ_HANDLED;
+}
+
 static int mdm_subsys_shutdown(const struct subsys_data *crashed_subsys)
 {
+	mdm_drv->mdm_ready = 0;
 	gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 1);
 	if (mdm_drv->pdata->ramdump_delay_ms > 0) {
 		/* Wait for the external modem to complete
@@ -335,7 +397,7 @@
 		mdm_drv->boot_type = CHARM_RAM_DUMPS;
 		complete(&mdm_needs_reload);
 		if (!wait_for_completion_timeout(&mdm_ram_dumps,
-				msecs_to_jiffies(MDM_RDUMP_TIMEOUT))) {
+				msecs_to_jiffies(dump_timeout_ms))) {
 			mdm_drv->mdm_ram_dump_status = -ETIMEDOUT;
 			pr_info("%s: mdm modem ramdumps timed out.\n",
 					__func__);
@@ -343,7 +405,8 @@
 			pr_info("%s: mdm modem ramdumps completed.\n",
 					__func__);
 		INIT_COMPLETION(mdm_ram_dumps);
-		mdm_drv->ops->power_down_mdm_cb(mdm_drv);
+		if (!mdm_drv->pdata->no_powerdown_after_ramdumps)
+			mdm_drv->ops->power_down_mdm_cb(mdm_drv);
 	}
 	return mdm_drv->mdm_ram_dump_status;
 }
@@ -445,10 +508,18 @@
 	if (pres)
 		mdm_drv->ap2mdm_pmic_pwr_en_gpio = pres->start;
 
+	/* MDM2AP_PBLRDY */
+	pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
+							"MDM2AP_PBLRDY");
+	if (pres)
+		mdm_drv->mdm2ap_pblrdy = pres->start;
+
 	mdm_drv->boot_type                  = CHARM_NORMAL_BOOT;
 
 	mdm_drv->ops      = mdm_ops;
 	mdm_drv->pdata    = pdev->dev.platform_data;
+	dump_timeout_ms = mdm_drv->pdata->ramdump_timeout_ms > 0 ?
+		mdm_drv->pdata->ramdump_timeout_ms : MDM_RDUMP_TIMEOUT;
 }
 
 int mdm_common_create(struct platform_device  *pdev,
@@ -472,6 +543,8 @@
 		gpio_request(mdm_drv->ap2mdm_kpdpwr_n_gpio, "AP2MDM_KPDPWR_N");
 	gpio_request(mdm_drv->mdm2ap_status_gpio, "MDM2AP_STATUS");
 	gpio_request(mdm_drv->mdm2ap_errfatal_gpio, "MDM2AP_ERRFATAL");
+	if (mdm_drv->mdm2ap_pblrdy > 0)
+		gpio_request(mdm_drv->mdm2ap_pblrdy, "MDM2AP_PBLRDY");
 
 	if (mdm_drv->ap2mdm_pmic_pwr_en_gpio > 0)
 		gpio_request(mdm_drv->ap2mdm_pmic_pwr_en_gpio,
@@ -560,12 +633,35 @@
 	mdm_drv->mdm_status_irq = irq;
 
 status_err:
+	if (mdm_drv->mdm2ap_pblrdy > 0) {
+		irq = MSM_GPIO_TO_INT(mdm_drv->mdm2ap_pblrdy);
+		if (irq < 0) {
+			pr_err("%s: could not get MDM2AP_PBLRDY IRQ resource",
+				__func__);
+			goto pblrdy_err;
+		}
+
+		ret = request_threaded_irq(irq, NULL, mdm_pblrdy_change,
+			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+			IRQF_SHARED,
+			"mdm pbl ready", mdm_drv);
+
+		if (ret < 0) {
+			pr_err("%s: MDM2AP_PBL IRQ#%d request failed error=%d",
+				__func__, irq, ret);
+			goto pblrdy_err;
+		}
+	}
+
+pblrdy_err:
 	/*
 	 * If AP2MDM_PMIC_PWR_EN gpio is used, pull it high. It remains
 	 * high until the whole phone is shut down.
 	 */
 	if (mdm_drv->ap2mdm_pmic_pwr_en_gpio > 0)
 		gpio_direction_output(mdm_drv->ap2mdm_pmic_pwr_en_gpio, 1);
+	/* Register VDDmin gpios with RPM */
+	mdm_setup_vddmin_gpios();
 
 	/* Perform early powerup of the external modem in order to
 	 * allow tabla devices to be found.
diff --git a/arch/arm/mach-msm/mdm_private.h b/arch/arm/mach-msm/mdm_private.h
index f157d88..7ac3727 100644
--- a/arch/arm/mach-msm/mdm_private.h
+++ b/arch/arm/mach-msm/mdm_private.h
@@ -18,6 +18,7 @@
 struct mdm_ops {
 	void (*power_on_mdm_cb)(struct mdm_modem_drv *mdm_drv);
 	void (*reset_mdm_cb)(struct mdm_modem_drv *mdm_drv);
+	void (*atomic_reset_mdm_cb)(struct mdm_modem_drv *mdm_drv);
 	void (*normal_boot_done_cb)(struct mdm_modem_drv *mdm_drv);
 	void (*power_down_mdm_cb)(struct mdm_modem_drv *mdm_drv);
 	void (*debug_state_changed_cb)(int value);
@@ -35,6 +36,7 @@
 	unsigned ap2mdm_kpdpwr_n_gpio;
 	unsigned ap2mdm_soft_reset_gpio;
 	unsigned ap2mdm_pmic_pwr_en_gpio;
+	unsigned mdm2ap_pblrdy;
 
 	int mdm_errfatal_irq;
 	int mdm_status_irq;
diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c
index 40845d7..a1b21c5 100644
--- a/arch/arm/mach-msm/memory.c
+++ b/arch/arm/mach-msm/memory.c
@@ -363,64 +363,6 @@
 }
 EXPORT_SYMBOL(allocate_contiguous_ebi_nomap);
 
-/* emulation of the deprecated pmem_kalloc and pmem_kfree */
-int32_t pmem_kalloc(const size_t size, const uint32_t flags)
-{
-	int pmem_memtype;
-	int memtype = MEMTYPE_NONE;
-	int ebi1_memtype = MEMTYPE_EBI1;
-	unsigned int align;
-	int32_t paddr;
-
-	switch (flags & PMEM_ALIGNMENT_MASK) {
-	case PMEM_ALIGNMENT_4K:
-		align = SZ_4K;
-		break;
-	case PMEM_ALIGNMENT_1M:
-		align = SZ_1M;
-		break;
-	default:
-		pr_alert("Invalid alignment %x\n",
-			(flags & PMEM_ALIGNMENT_MASK));
-		return -EINVAL;
-	}
-
-	/* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
-	if (cpu_is_msm7x30() || cpu_is_msm8x55())
-			ebi1_memtype = MEMTYPE_EBI0;
-
-	pmem_memtype = flags & PMEM_MEMTYPE_MASK;
-	if (pmem_memtype == PMEM_MEMTYPE_EBI1)
-		memtype = ebi1_memtype;
-	else if (pmem_memtype == PMEM_MEMTYPE_SMI)
-		memtype = MEMTYPE_SMI_KERNEL;
-	else {
-		pr_alert("Invalid memory type %x\n",
-			flags & PMEM_MEMTYPE_MASK);
-		return -EINVAL;
-	}
-
-	paddr = _allocate_contiguous_memory_nomap(size, memtype, align,
-		__builtin_return_address(0));
-
-	if (!paddr && pmem_memtype == PMEM_MEMTYPE_SMI)
-		paddr = _allocate_contiguous_memory_nomap(size,
-			ebi1_memtype, align, __builtin_return_address(0));
-
-	if (!paddr)
-		return -ENOMEM;
-	return paddr;
-}
-EXPORT_SYMBOL(pmem_kalloc);
-
-int pmem_kfree(const int32_t physaddr)
-{
-	free_contiguous_memory_by_paddr(physaddr);
-
-	return 0;
-}
-EXPORT_SYMBOL(pmem_kfree);
-
 unsigned int msm_ttbr0;
 
 void store_ttbr0(void)
diff --git a/arch/arm/mach-msm/modem-8660.c b/arch/arm/mach-msm/modem-8660.c
index 0b7b768..9c558e4 100644
--- a/arch/arm/mach-msm/modem-8660.c
+++ b/arch/arm/mach-msm/modem-8660.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -275,8 +275,6 @@
 {
 	if (reset_modem == 1)
 		smsm_reset_modem(SMSM_RESET);
-	else if (reset_modem == 2)
-		subsystem_restart("lpass");
 
 	reset_modem = 0;
 	schedule_delayed_work(&debug_crash_modem_work, msecs_to_jiffies(1000));
diff --git a/arch/arm/mach-msm/modem-8960.c b/arch/arm/mach-msm/modem-8960.c
index 5d02bda..1132be2 100644
--- a/arch/arm/mach-msm/modem-8960.c
+++ b/arch/arm/mach-msm/modem-8960.c
@@ -281,7 +281,8 @@
 {
 	int ret;
 
-	if (!cpu_is_msm8960() && !cpu_is_msm8930() && !cpu_is_msm9615())
+	if (!cpu_is_msm8960() && !cpu_is_msm8930() && !cpu_is_msm9615() &&
+	    !cpu_is_msm8627())
 		return -ENODEV;
 
 	ret = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_RESET,
@@ -336,7 +337,7 @@
 		goto out;
 	}
 
-	smem_ramdump_dev = create_ramdump_device("smem");
+	smem_ramdump_dev = create_ramdump_device("smem-modem");
 
 	if (!smem_ramdump_dev) {
 		pr_err("%s: Unable to create smem ramdump device. (%d)\n",
diff --git a/arch/arm/mach-msm/mpm-8625.c b/arch/arm/mach-msm/mpm-8625.c
index fa966d2..954e5cc 100644
--- a/arch/arm/mach-msm/mpm-8625.c
+++ b/arch/arm/mach-msm/mpm-8625.c
@@ -152,7 +152,7 @@
 	return 0;
 }
 
-void __init msm_gic_irq_extn_init(void __iomem *db, void __iomem *cb)
+void __init msm_gic_irq_extn_init(void)
 {
 	gic_arch_extn.irq_mask	= msm_gic_mask_irq;
 	gic_arch_extn.irq_unmask = msm_gic_unmask_irq;
diff --git a/arch/arm/mach-msm/mpm-8625.h b/arch/arm/mach-msm/mpm-8625.h
index 4ada9e2..1c28390 100644
--- a/arch/arm/mach-msm/mpm-8625.h
+++ b/arch/arm/mach-msm/mpm-8625.h
@@ -14,7 +14,7 @@
 #ifndef _ARCH_ARM_MACH_MSM_MPM_8625_H_
 #define _ARCH_ARM_MACH_MSM_MPM_8625_H_
 
-void msm_gic_irq_extn_init(void __iomem *, void __iomem *);
+void msm_gic_irq_extn_init(void);
 
 unsigned int msm_gic_spi_ppi_pending(void);
 int msm_gic_irq_idle_sleep_allowed(void);
diff --git a/arch/arm/mach-msm/msm_bus/Makefile b/arch/arm/mach-msm/msm_bus/Makefile
index 766856c..ab62c20 100644
--- a/arch/arm/mach-msm/msm_bus/Makefile
+++ b/arch/arm/mach-msm/msm_bus/Makefile
@@ -8,4 +8,5 @@
 obj-$(CONFIG_ARCH_MSM9615) += msm_bus_board_9615.o
 obj-$(CONFIG_ARCH_APQ8064) += msm_bus_board_8064.o
 obj-$(CONFIG_ARCH_MSM8930) += msm_bus_board_8930.o
+obj-$(CONFIG_ARCH_MSM8974) += msm_bus_board_8974.o
 obj-$(CONFIG_DEBUG_FS) += msm_bus_dbg.o
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c b/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c
new file mode 100644
index 0000000..265716d
--- /dev/null
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_board_8974.c
@@ -0,0 +1,2002 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#include <mach/board.h>
+#include <mach/rpm.h>
+#include "msm_bus_core.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_bimc.h"
+
+#define NMASTERS 120
+#define NSLAVES 150
+#define NFAB_8974 7
+
+enum msm_bus_8974_master_ports_type {
+	/* System NOC Masters */
+	MASTER_PORT_LPASS_AHB = 0,
+	MASTER_PORT_QDSS_BAM,
+	MASTER_PORT_SNOC_CFG,
+	MASTER_PORT_GW_BIMC_SNOC,
+	MASTER_PORT_GW_CNOC_SNOC,
+	MASTER_PORT_CRYPTO_CORE0,
+	MASTER_PORT_CRYPTO_CORE1,
+	MASTER_PORT_LPASS_PROC,
+	MASTER_PORT_MSS,
+	MASTER_PORT_MSS_NAV,
+	MASTER_PORT_OCMEM_DMA,
+	MASTER_PORT_GW_PNOC_SNOC,
+	MASTER_PORT_WCSS,
+	MASTER_PORT_QDSS_ETR,
+	MASTER_PORT_USB3,
+
+	/* MMSS NOC Masters */
+	MASTER_PORT_GW_CNOC_MNOC_MMSS_CFG = 0,
+	MASTER_PORT_GW_CNOC_MNOC_CFG,
+	MASTER_PORT_GFX3D_PORT0,
+	MASTER_PORT_GFX3D_PORT1,
+	MASTER_PORT_JPEG,
+	MASTER_PORT_MDP,
+	/* Venus video core */
+	MASTER_PORT_VIDEO_PORT0,
+	MASTER_PORT_VIDEO_PORT1,
+	MASTER_PORT_VFE = 16,
+
+	/* BIMC Masters */
+	MASTER_PORT_KMPSS_M0 = 0,
+	MASTER_PORT_KMPSS_M1,
+	MASTER_PORT_MSS_PROC,
+	MASTER_PORT_GW_MNOC_BIMC_0,
+	MASTER_PORT_GW_MNOC_BIMC_1,
+	MASTER_PORT_GW_SNOC_BIMC_0,
+	MASTER_PORT_GW_SNOC_BIMC_1,
+
+	/* OCMEM NOC Masters */
+	MASTER_PORT_CNOC_ONOC_CFG = 0,
+	MASTER_PORT_JPEG_OCMEM,
+	MASTER_PORT_MDP_OCMEM,
+	MASTER_PORT_VIDEO_P0_OCMEM,
+	MASTER_PORT_VIDEO_P1_OCMEM,
+	MASTER_PORT_VFE_OCMEM,
+
+	/* Peripheral NOC Masters */
+	MASTER_PORT_SDCC_1 = 0,
+	MASTER_PORT_SDCC_3,
+	MASTER_PORT_SDCC_2,
+	MASTER_PORT_SDCC_4,
+	MASTER_PORT_TSIF,
+	MASTER_PORT_BAM_DMA,
+	MASTER_PORT_BLSP_2,
+	MASTER_PORT_USB_HSIC,
+	MASTER_PORT_BLSP_1,
+	MASTER_PORT_USB_HS,
+	MASTER_PORT_PNOC_CFG,
+	MASTER_PORT_GW_SNOC_PNOC,
+
+	/* Config NOC Masters */
+	MASTER_PORT_RPM_INST = 0,
+	MASTER_PORT_RPM_DATA,
+	MASTER_PORT_RPM_SYS,
+	MASTER_PORT_DEHR,
+	MASTER_PORT_QDSS_DAP,
+	MASTER_PORT_SPDM,
+	MASTER_PORT_TIC,
+	MASTER_PORT_GW_SNOC_CNOC,
+};
+
+enum msm_bus_8974_slave_ports_type {
+	/* System NOC Slaves */
+	SLAVE_PORT_KMPSS = 1,
+	SLAVE_PORT_LPASS,
+	SLAVE_PORT_USB3 = 4,
+	SLAVE_PORT_WCSS = 6,
+	SLAVE_PORT_GW_SNOC_BIMC_P0,
+	SLAVE_PORT_GW_SNOC_BIMC_P1,
+	SLAVE_PORT_GW_SNOC_CNOC,
+	SLAVE_PORT_OCIMEM,
+	SLAVE_PORT_SNOC_OCMEM,
+	SLAVE_PORT_GW_SNOC_PNOC,
+	SLAVE_PORT_SERVICE_SNOC,
+	SLAVE_PORT_QDSS_STM,
+
+	/* MMSS NOC Slaves */
+	SLAVE_PORT_CAMERA_CFG = 0,
+	SLAVE_PORT_DISPLAY_CFG,
+	SLAVE_PORT_OCMEM_CFG,
+	SLAVE_PORT_CPR_CFG,
+	SLAVE_PORT_CPR_XPU_CFG,
+	SLAVE_PORT_MISC_CFG = 6,
+	SLAVE_PORT_MISC_XPU_CFG,
+	SLAVE_PORT_VENUS_CFG,
+	SLAVE_PORT_GFX3D_CFG,
+	SLAVE_PORT_MMSS_CLK_CFG = 11,
+	SLAVE_PORT_MMSS_CLK_XPU_CFG,
+	SLAVE_PORT_MNOC_MPU_CFG,
+	SLAVE_PORT_ONOC_MPU_CFG,
+	SLAVE_PORT_GW_MMSS_BIMC_P0 = 16,
+	SLAVE_PORT_GW_MMSS_BIMC_P1,
+	SLAVE_PORT_SERVICE_MNOC,
+
+	/* BIMC Slaves */
+	SLAVE_PORT_EBI1_CH0 = 0,
+	SLAVE_PORT_EBI1_CH1,
+	SLAVE_PORT_KMPSS_L2,
+	SLAVE_PORT_GW_BIMC_SNOC,
+
+	/* OCMEM NOC Slaves */
+	SLAVE_PORT_OCMEM_P0 = 0,
+	SLAVE_PORT_OCMEM_P1,
+	SLAVE_PORT_SERVICE_ONOC,
+
+	/*Peripheral NOC Slaves */
+	SLAVE_PORT_SDCC_1 = 0,
+	SLAVE_PORT_SDCC_3,
+	SLAVE_PORT_SDCC_2,
+	SLAVE_PORT_SDCC_4,
+	SLAVE_PORT_TSIF,
+	SLAVE_PORT_BAM_DMA,
+	SLAVE_PORT_BLSP_2,
+	SLAVE_PORT_USB_HSIC,
+	SLAVE_PORT_BLSP_1,
+	SLAVE_PORT_USB_HS,
+	SLAVE_PORT_PDM,
+	SLAVE_PORT_PERIPH_APU_CFG,
+	SLAVE_PORT_PNOC_MPU_CFG,
+	SLAVE_PORT_PRNG,
+	SLAVE_PORT_GW_PNOC_SNOC,
+	SLAVE_PORT_SERVICE_PNOC,
+
+	/* Config NOC slaves */
+	SLAVE_PORT_CLK_CTL = 1,
+	SLAVE_PORT_CNOC_MSS,
+	SLAVE_PORT_SECURITY,
+	SLAVE_PORT_TCSR,
+	SLAVE_PORT_TLMM,
+	SLAVE_PORT_CRYPTO_0_CFG,
+	SLAVE_PORT_CRYPTO_1_CFG,
+	SLAVE_PORT_IMEM_CFG,
+	SLAVE_PORT_MESSAGE_RAM,
+	SLAVE_PORT_BIMC_CFG,
+	SLAVE_PORT_BOOT_ROM,
+	SLAVE_PORT_CNOC_MNOC_MMSS_CFG,
+	SLAVE_PORT_PMIC_ARB,
+	SLAVE_PORT_SPDM_WRAPPER,
+	SLAVE_PORT_DEHR_CFG,
+	SLAVE_PORT_MPM,
+	SLAVE_PORT_QDSS_CFG,
+	SLAVE_PORT_RBCPR_CFG,
+	SLAVE_PORT_RBCPR_QDSS_APU_CFG,
+	SLAVE_PORT_CNOC_MNOC_CFG,
+	SLAVE_PORT_SNOC_MPU_CFG,
+	SLAVE_PORT_CNOC_ONOC_CFG,
+	SLAVE_PORT_PNOC_CFG,
+	SLAVE_PORT_SNOC_CFG,
+	SLAVE_PORT_EBI1_DLL_CFG,
+	SLAVE_PORT_PHY_APU_CFG,
+	SLAVE_PORT_EBI1_PHY_CFG,
+	SLAVE_PORT_RPM,
+	SLAVE_PORT_GW_CNOC_SNOC,
+	SLAVE_PORT_SERVICE_CNOC,
+};
+
+/* Hardware IDs for RPM */
+enum msm_bus_8974_mas_hw_id {
+	MAS_APPSS_PROC = 0,
+	MAS_AMSS_PROC,
+	MAS_MNOC_BIMC,
+	MAS_SNOC_BIMC,
+	MAS_CNOC_MNOC_MMSS_CFG,
+	MAS_CNOC_MNOC_CFG,
+	MAS_GFX3D,
+	MAS_JPEG,
+	MAS_MDP,
+	MAS_VIDEO_P0,
+	MAS_VIDEO_P1,
+	MAS_VFE,
+	MAS_CNOC_ONOC_CFG,
+	MAS_JPEG_OCMEM,
+	MAS_MDP_OCMEM,
+	MAS_VIDEO_P0_OCMEM,
+	MAS_VIDEO_P1_OCMEM,
+	MAS_VFE_OCMEM,
+	MAS_LPASS_AHB,
+	MAS_QDSS_BAM,
+	MAS_SNOC_CFG,
+	MAS_BIMC_SNOC,
+	MAS_CNOC_SNOC,
+	MAS_CRYPTO_CORE0,
+	MAS_CRYPTO_CORE1,
+	MAS_LPASS_PROC,
+	MAS_MSS,
+	MAS_MSS_NAV,
+	MAS_OCMEM_DMA,
+	MAS_PNOC_SNOC,
+	MAS_WCSS,
+	MAS_QDSS_ETR,
+	MAS_USB3,
+	MAS_SDCC_1,
+	MAS_SDCC_3,
+	MAS_SDCC_2,
+	MAS_SDCC_4,
+	MAS_TSIF,
+	MAS_BAM_DMA,
+	MAS_BLSP_2,
+	MAS_USB_HSIC,
+	MAS_BLSP_1,
+	MAS_USB_HS,
+	MAS_PNOC_CFG,
+	MAS_SNOC_PNOC,
+	MAS_RPM_INST,
+	MAS_RPM_DATA,
+	MAS_RPM_SYS,
+	MAS_DEHR,
+	MAS_QDSS_DAP,
+	MAS_SPDM,
+	MAS_TIC,
+	MAS_SNOC_CNOC,
+	MAS_OVNOC_SNOC,
+	MAS_OVNOC_ONOC,
+	MAS_V_OCMEM_GFX3D,
+	MAS_ONOC_OVNOC,
+	MAS_SNOC_OVNOC,
+};
+
+enum msm_bus_8974_slv_hw_id {
+	SLV_EBI = 0,
+	SLV_APSS_L2,
+	SLV_BIMC_SNOC,
+	SLV_CAMERA_CFG,
+	SLV_DISPLAY_CFG,
+	SLV_OCMEM_CFG,
+	SLV_CPR_CFG,
+	SLV_CPR_XPU_CFG,
+	SLV_MISC_CFG,
+	SLV_MISC_XPU_CFG,
+	SLV_VENUS_CFG,
+	SLV_GFX3D_CFG,
+	SLV_MMSS_CLK_CFG,
+	SLV_MMSS_CLK_XPU_CFG,
+	SLV_MNOC_MPU_CFG,
+	SLV_ONOC_MPU_CFG,
+	SLV_MMSS_BIMC,
+	SLV_SERVICE_MNOC,
+	SLV_OCMEM,
+	SLV_SERVICE_ONOC,
+	SLV_APPSS,
+	SLV_LPASS,
+	SLV_USB3,
+	SLV_WCSS,
+	SLV_SNOC_BIMC,
+	SLV_SNOC_CNOC,
+	SLV_OCIMEM,
+	SLV_SNOC_OCMEM,
+	SLV_SNOC_PNOC,
+	SLV_SERVICE_SNOC,
+	SLV_QDSS_STM,
+	SLV_SDCC_1,
+	SLV_SDCC_3,
+	SLV_SDCC_2,
+	SLV_SDCC_4,
+	SLV_TSIF,
+	SLV_BAM_DMA,
+	SLV_BLSP_2,
+	SLV_USB_HSIC,
+	SLV_BLSP_1,
+	SLV_USB_HS,
+	SLV_PDM,
+	SLV_PERIPH_APU_CFG,
+	SLV_MPU_CFG,
+	SLV_PRNG,
+	SLV_PNOC_SNOC,
+	SLV_SERVICE_PNOC,
+	SLV_CLK_CTL,
+	SLV_CNOC_MSS,
+	SLV_SECURITY,
+	SLV_TCSR,
+	SLV_TLMM,
+	SLV_CRYPTO_0_CFG,
+	SLV_CRYPTO_1_CFG,
+	SLV_IMEM_CFG,
+	SLV_MESSAGE_RAM,
+	SLV_BIMC_CFG,
+	SLV_BOOT_ROM,
+	SLV_CNOC_MNOC_MMSS_CFG,
+	SLV_PMIC_ARB,
+	SLV_SPDM_WRAPPER,
+	SLV_DEHR_CFG,
+	SLV_MPM,
+	SLV_QDSS_CFG,
+	SLV_RBCPR_CFG,
+	SLV_RBCPR_QDSS_APU_CFG,
+	SLV_CNOC_MNOC_CFG,
+	SLV_SNOC_MPU_CFG,
+	SLV_CNOC_ONOC_CFG,
+	SLV_PNOC_CFG,
+	SLV_SNOC_CFG,
+	SLV_EBI1_DLL_CFG,
+	SLV_PHY_APU_CFG,
+	SLV_EBI1_PHY_CFG,
+	SLV_RPM,
+	SLV_CNOC_SNOC,
+	SLV_SERVICE_CNOC,
+	SLV_SNOC_OVNOC,
+	SLV_ONOC_OVNOC,
+	SLV_OVNOC_ONOC,
+	SLV_OVNOC_SNOC,
+};
+
+static uint32_t master_iids[NMASTERS];
+static uint32_t slave_iids[NSLAVES];
+
+/* System NOC nodes */
+static int mport_lpass_ahb[] = {MASTER_PORT_LPASS_AHB,};
+static int mport_qdss_bam[] = {MASTER_PORT_QDSS_BAM,};
+static int mport_snoc_cfg[] = {MASTER_PORT_SNOC_CFG,};
+static int mport_gw_bimc_snoc[] = {MASTER_PORT_GW_BIMC_SNOC,};
+static int mport_gw_cnoc_snoc[] = {MASTER_PORT_GW_CNOC_SNOC,};
+static int mport_crypto_core0[] = {MASTER_PORT_CRYPTO_CORE0,};
+static int mport_crypto_core1[] = {MASTER_PORT_CRYPTO_CORE1};
+static int mport_lpass_proc[] = {MASTER_PORT_LPASS_PROC};
+static int mport_mss[] = {MASTER_PORT_MSS};
+static int mport_mss_nav[] = {MASTER_PORT_MSS_NAV};
+static int mport_ocmem_dma[] = {MASTER_PORT_OCMEM_DMA};
+static int mport_gw_pnoc_snoc[] = {MASTER_PORT_GW_PNOC_SNOC};
+static int mport_wcss[] = {MASTER_PORT_WCSS};
+static int mport_qdss_etr[] = {MASTER_PORT_QDSS_ETR};
+static int mport_usb3[] = {MASTER_PORT_USB3};
+
+static int sport_kmpss[] = {SLAVE_PORT_KMPSS};
+static int sport_lpass[] = {SLAVE_PORT_LPASS};
+static int sport_usb3[] = {SLAVE_PORT_USB3};
+static int sport_wcss[] = {SLAVE_PORT_WCSS};
+static int sport_gw_snoc_bimc[] = {
+	SLAVE_PORT_GW_SNOC_BIMC_P0,
+	SLAVE_PORT_GW_SNOC_BIMC_P1,
+	};
+static int sport_gw_snoc_cnoc[] = {SLAVE_PORT_GW_SNOC_CNOC};
+static int sport_ocimem[] = {SLAVE_PORT_OCIMEM};
+static int sport_snoc_ocmem[] = {SLAVE_PORT_SNOC_OCMEM};
+static int sport_gw_snoc_pnoc[] = {SLAVE_PORT_GW_SNOC_PNOC};
+static int sport_service_snoc[] = {SLAVE_PORT_SERVICE_SNOC};
+static int sport_qdss_stm[] = {SLAVE_PORT_QDSS_STM};
+
+
+/* MMSS NOC nodes */
+static int mport_gw_cnoc_mnoc_cfg[] = {
+	MASTER_PORT_GW_CNOC_MNOC_MMSS_CFG,
+	MASTER_PORT_GW_CNOC_MNOC_CFG,
+};
+static int mport_gfx3d[] = {
+	MASTER_PORT_GFX3D_PORT0,
+	MASTER_PORT_GFX3D_PORT1,
+};
+static int mport_jpeg[] = {MASTER_PORT_JPEG};
+static int mport_mdp[] = {MASTER_PORT_MDP};
+static int mport_video_port0[] = {MASTER_PORT_VIDEO_PORT0};
+static int mport_video_port1[] = {MASTER_PORT_VIDEO_PORT1};
+static int mport_vfe[] = {MASTER_PORT_VFE};
+
+static int sport_camera_cfg[] = {SLAVE_PORT_CAMERA_CFG};
+static int sport_display_cfg[] = {SLAVE_PORT_DISPLAY_CFG};
+static int sport_ocmem_cfg[] = {SLAVE_PORT_OCMEM_CFG};
+static int sport_cpr_cfg[] = {SLAVE_PORT_CPR_CFG};
+static int sport_cpr_xpu_cfg[] = {SLAVE_PORT_CPR_XPU_CFG,};
+static int sport_misc_cfg[] = {SLAVE_PORT_MISC_CFG};
+static int sport_misc_xpu_cfg[] = {SLAVE_PORT_MISC_XPU_CFG};
+static int sport_venus_cfg[] = {SLAVE_PORT_VENUS_CFG};
+static int sport_gfx3d_cfg[] = {SLAVE_PORT_GFX3D_CFG};
+static int sport_mmss_clk_cfg[] = {SLAVE_PORT_MMSS_CLK_CFG};
+static int sport_mmss_clk_xpu_cfg[] = {
+	SLAVE_PORT_MMSS_CLK_XPU_CFG
+};
+static int sport_mnoc_mpu_cfg[] = {SLAVE_PORT_MNOC_MPU_CFG};
+static int sport_onoc_mpu_cfg[] = {SLAVE_PORT_ONOC_MPU_CFG};
+static int sport_gw_mmss_bimc[] = {
+	SLAVE_PORT_GW_MMSS_BIMC_P0,
+	SLAVE_PORT_GW_MMSS_BIMC_P1,
+};
+static int sport_service_mnoc[] = {SLAVE_PORT_SERVICE_MNOC};
+
+/* BIMC Nodes */
+
+static int mport_kmpss_m0[] = {MASTER_PORT_KMPSS_M0,};
+static int mport_kmpss_m1[] = {MASTER_PORT_KMPSS_M1};
+static int mport_mss_proc[] = {MASTER_PORT_MSS_PROC};
+static int mport_gw_mnoc_bimc[] = {
+	MASTER_PORT_GW_MNOC_BIMC_0,
+	MASTER_PORT_GW_MNOC_BIMC_1,
+};
+static int mport_gw_snoc_bimc[] = {
+	MASTER_PORT_GW_SNOC_BIMC_0,
+	MASTER_PORT_GW_SNOC_BIMC_1,
+};
+
+static int sport_ebi1[] = {
+	SLAVE_PORT_EBI1_CH0,
+	SLAVE_PORT_EBI1_CH1,
+};
+static int sport_kmpss_l2[] = {SLAVE_PORT_KMPSS_L2,};
+static int sport_gw_bimc_snoc[] = {SLAVE_PORT_GW_BIMC_SNOC,};
+
+/* OCMEM NOC Nodes */
+static int mport_cnoc_onoc_cfg[] = {
+	MASTER_PORT_CNOC_ONOC_CFG,
+};
+static int mport_jpeg_ocmem[] = {MASTER_PORT_JPEG_OCMEM,};
+static int mport_mdp_ocmem[] = {MASTER_PORT_MDP_OCMEM,};
+static int mport_video_p0_ocmem[] = {
+	MASTER_PORT_VIDEO_P0_OCMEM,
+};
+static int mport_video_p1_ocmem[] = {
+	MASTER_PORT_VIDEO_P1_OCMEM,
+};
+static int mport_vfe_ocmem[] = {MASTER_PORT_VFE_OCMEM,};
+static int sport_ocmem[] = {
+	SLAVE_PORT_OCMEM_P0,
+	SLAVE_PORT_OCMEM_P1,
+};
+
+static int sport_service_onoc[] = {SLAVE_PORT_SERVICE_ONOC,};
+
+/* Peripheral NOC Nodes */
+static int mport_sdcc_1[] = {MASTER_PORT_SDCC_1,};
+static int mport_sdcc_3[] = {MASTER_PORT_SDCC_3,};
+static int mport_sdcc_2[] = {MASTER_PORT_SDCC_2,};
+static int mport_sdcc_4[] = {MASTER_PORT_SDCC_4,};
+static int mport_tsif[] = {MASTER_PORT_TSIF,};
+static int mport_bam_dma[] = {MASTER_PORT_BAM_DMA,};
+static int mport_blsp_2[] = {MASTER_PORT_BLSP_2,};
+static int mport_usb_hsic[] = {MASTER_PORT_USB_HSIC,};
+static int mport_blsp_1[] = {MASTER_PORT_BLSP_1,};
+static int mport_usb_hs[] = {MASTER_PORT_USB_HS,};
+static int mport_pnoc_cfg[] = {MASTER_PORT_PNOC_CFG,};
+static int mport_gw_snoc_pnoc[] = {MASTER_PORT_GW_SNOC_PNOC,};
+
+static int sport_sdcc_1[] = {SLAVE_PORT_SDCC_1,};
+static int sport_sdcc_3[] = {SLAVE_PORT_SDCC_3,};
+static int sport_sdcc_2[] = {SLAVE_PORT_SDCC_2,};
+static int sport_sdcc_4[] = {SLAVE_PORT_SDCC_4,};
+static int sport_tsif[] = {SLAVE_PORT_TSIF,};
+static int sport_bam_dma[] = {SLAVE_PORT_BAM_DMA,};
+static int sport_blsp_2[] = {SLAVE_PORT_BLSP_2,};
+static int sport_usb_hsic[] = {SLAVE_PORT_USB_HSIC,};
+static int sport_blsp_1[] = {SLAVE_PORT_BLSP_1,};
+static int sport_usb_hs[] = {SLAVE_PORT_USB_HS,};
+static int sport_pdm[] = {SLAVE_PORT_PDM,};
+static int sport_periph_apu_cfg[] = {
+	SLAVE_PORT_PERIPH_APU_CFG,
+};
+static int sport_pnoc_mpu_cfg[] = {SLAVE_PORT_PNOC_MPU_CFG,};
+static int sport_prng[] = {SLAVE_PORT_PRNG,};
+static int sport_gw_pnoc_snoc[] = {SLAVE_PORT_GW_PNOC_SNOC,};
+static int sport_service_pnoc[] = {SLAVE_PORT_SERVICE_PNOC,};
+
+/* Config NOC Nodes */
+static int mport_rpm_inst[] = {MASTER_PORT_RPM_INST,};
+static int mport_rpm_data[] = {MASTER_PORT_RPM_DATA,};
+static int mport_rpm_sys[] = {MASTER_PORT_RPM_SYS,};
+static int mport_dehr[] = {MASTER_PORT_DEHR,};
+static int mport_qdss_dap[] = {MASTER_PORT_QDSS_DAP,};
+static int mport_spdm[] = {MASTER_PORT_SPDM,};
+static int mport_tic[] = {MASTER_PORT_TIC,};
+static int mport_gw_snoc_cnoc[] = {MASTER_PORT_GW_SNOC_CNOC,};
+
+static int sport_clk_ctl[] = {SLAVE_PORT_CLK_CTL,};
+static int sport_cnoc_mss[] = {SLAVE_PORT_CNOC_MSS,};
+static int sport_security[] = {SLAVE_PORT_SECURITY,};
+static int sport_tcsr[] = {SLAVE_PORT_TCSR,};
+static int sport_tlmm[] = {SLAVE_PORT_TLMM,};
+static int sport_crypto_0_cfg[] = {SLAVE_PORT_CRYPTO_0_CFG,};
+static int sport_crypto_1_cfg[] = {SLAVE_PORT_CRYPTO_1_CFG,};
+static int sport_imem_cfg[] = {SLAVE_PORT_IMEM_CFG,};
+static int sport_message_ram[] = {SLAVE_PORT_MESSAGE_RAM,};
+static int sport_bimc_cfg[] = {SLAVE_PORT_BIMC_CFG,};
+static int sport_boot_rom[] = {SLAVE_PORT_BOOT_ROM,};
+static int sport_cnoc_mnoc_mmss_cfg[] = {SLAVE_PORT_CNOC_MNOC_MMSS_CFG,};
+static int sport_cnoc_mnoc_cfg[] = {SLAVE_PORT_CNOC_MNOC_CFG,};
+static int sport_pmic_arb[] = {SLAVE_PORT_PMIC_ARB,};
+static int sport_spdm_wrapper[] = {SLAVE_PORT_SPDM_WRAPPER,};
+static int sport_dehr_cfg[] = {SLAVE_PORT_DEHR_CFG,};
+static int sport_mpm[] = {SLAVE_PORT_MPM,};
+static int sport_qdss_cfg[] = {SLAVE_PORT_QDSS_CFG,};
+static int sport_rbcpr_cfg[] = {SLAVE_PORT_RBCPR_CFG,};
+static int sport_rbcpr_qdss_apu_cfg[] = {SLAVE_PORT_RBCPR_QDSS_APU_CFG,};
+static int sport_snoc_mpu_cfg[] = {SLAVE_PORT_SNOC_MPU_CFG,};
+static int sport_cnoc_onoc_cfg[] = {SLAVE_PORT_CNOC_ONOC_CFG,};
+static int sport_pnoc_cfg[] = {SLAVE_PORT_PNOC_CFG,};
+static int sport_snoc_cfg[] = {SLAVE_PORT_SNOC_CFG,};
+static int sport_ebi1_dll_cfg[] = {SLAVE_PORT_EBI1_DLL_CFG,};
+static int sport_phy_apu_cfg[] = {SLAVE_PORT_PHY_APU_CFG,};
+static int sport_ebi1_phy_cfg[] = {SLAVE_PORT_EBI1_PHY_CFG,};
+static int sport_rpm[] = {SLAVE_PORT_RPM,};
+static int sport_gw_cnoc_snoc[] = {SLAVE_PORT_GW_CNOC_SNOC,};
+static int sport_service_cnoc[] = {SLAVE_PORT_SERVICE_CNOC,};
+
+static int tier2[] = {MSM_BUS_BW_TIER2,};
+
+/*
+ * QOS Ports defined only when qos ports are different than
+ * master ports
+ **/
+static int qports_gemini[] = {0};
+static int qports_mdp[] = {1};
+static int qports_venus_p0[] = {4};
+static int qports_venus_p1[] = {5};
+static int qports_vfe[] = {6};
+static int qports_gemini_ocmem[] = {0};
+static int qports_mdp_ocmem[] = {1};
+static int qports_venus_p0_ocmem[] = {2};
+static int qports_venus_p1_ocmem[] = {3};
+static int qports_vfe_ocmem[] = {4};
+static int qports_crypto_c0[] = {2};
+static int qports_crypto_c1[] = {3};
+static int qports_lpass_proc[] = {4};
+static int qports_ocmem_dma[] = {7};
+static int qports_gw_snoc_bimc[] = {5, 6};
+static int qports_kmpss[] = {0, 1};
+static int qports_lpass_ahb[] = {0};
+static int qports_qdss_bam[] = {1};
+static int qports_gw_pnoc_snoc[] = {8};
+static int qports_qdss_etr[] = {10};
+static int qports_usb3[] = {11};
+static int qports_oxili[] = {2, 3};
+static int qports_gw_mnoc_bimc[] = {3, 4};
+
+static struct msm_bus_node_info sys_noc_info[] = {
+	{
+		.id = MSM_BUS_MASTER_LPASS_AHB,
+		.masterp = mport_lpass_ahb,
+		.num_mports = ARRAY_SIZE(mport_lpass_ahb),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.qport = qports_lpass_ahb,
+		.mas_hw_id = MAS_LPASS_AHB,
+		.mode = NOC_QOS_MODE_FIXED,
+		.prio_rd = 2,
+		.prio_wr = 2,
+	},
+	{
+		.id = MSM_BUS_MASTER_QDSS_BAM,
+		.masterp = mport_qdss_bam,
+		.num_mports = ARRAY_SIZE(mport_qdss_bam),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_qdss_bam,
+		.mas_hw_id = MAS_QDSS_BAM,
+	},
+	{
+		.id = MSM_BUS_MASTER_SNOC_CFG,
+		.masterp = mport_snoc_cfg,
+		.num_mports = ARRAY_SIZE(mport_snoc_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.mas_hw_id = MAS_SNOC_CFG,
+	},
+	{
+		.id = MSM_BUS_FAB_BIMC,
+		.gateway = 1,
+		.slavep = sport_gw_snoc_bimc,
+		.num_sports = ARRAY_SIZE(sport_gw_snoc_bimc),
+		.masterp = mport_gw_bimc_snoc,
+		.num_mports = ARRAY_SIZE(mport_gw_bimc_snoc),
+		.buswidth = 8,
+		.mas_hw_id = MAS_BIMC_SNOC,
+		.slv_hw_id = SLV_SNOC_BIMC,
+	},
+	{
+		.id = MSM_BUS_FAB_CONFIG_NOC,
+		.gateway = 1,
+		.slavep = sport_gw_snoc_cnoc,
+		.num_sports = ARRAY_SIZE(sport_gw_snoc_cnoc),
+		.masterp = mport_gw_cnoc_snoc,
+		.num_mports = ARRAY_SIZE(mport_gw_cnoc_snoc),
+		.buswidth = 8,
+		.mas_hw_id = MAS_CNOC_SNOC,
+		.slv_hw_id = SLV_SNOC_CNOC,
+	},
+	{
+		.id = MSM_BUS_FAB_PERIPH_NOC,
+		.gateway = 1,
+		.slavep = sport_gw_snoc_pnoc,
+		.num_sports = ARRAY_SIZE(sport_gw_snoc_pnoc),
+		.masterp = mport_gw_pnoc_snoc,
+		.num_mports = ARRAY_SIZE(mport_gw_pnoc_snoc),
+		.buswidth = 8,
+		.qport = qports_gw_pnoc_snoc,
+		.mas_hw_id = MAS_PNOC_SNOC,
+		.slv_hw_id = SLV_SNOC_PNOC,
+		.mode = NOC_QOS_MODE_FIXED,
+		.prio_rd = 2,
+		.prio_wr = 2,
+	},
+	{
+		.id = MSM_BUS_FAB_OCMEM_VNOC,
+		.gateway = 1,
+		.buswidth = 8,
+		.mas_hw_id = MAS_OVNOC_SNOC,
+		.slv_hw_id = SLV_SNOC_OVNOC,
+	},
+	{
+		.id = MSM_BUS_MASTER_CRYPTO_CORE0,
+		.masterp = mport_crypto_core0,
+		.num_mports = ARRAY_SIZE(mport_crypto_core0),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_crypto_c0,
+		.mas_hw_id = MAS_CRYPTO_CORE0,
+	},
+	{
+		.id = MSM_BUS_MASTER_CRYPTO_CORE1,
+		.masterp = mport_crypto_core1,
+		.num_mports = ARRAY_SIZE(mport_crypto_core1),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_crypto_c1,
+		.mas_hw_id = MAS_CRYPTO_CORE1,
+	},
+	{
+		.id = MSM_BUS_MASTER_LPASS_PROC,
+		.masterp = mport_lpass_proc,
+		.num_mports = ARRAY_SIZE(mport_lpass_proc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.qport = qports_lpass_proc,
+		.mas_hw_id = MAS_LPASS_PROC,
+		.mode = NOC_QOS_MODE_FIXED,
+		.prio_rd = 2,
+		.prio_wr = 2,
+	},
+	{
+		.id = MSM_BUS_MASTER_MSS,
+		.masterp = mport_mss,
+		.num_mports = ARRAY_SIZE(mport_mss),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.mas_hw_id = MAS_MSS,
+	},
+	{
+		.id = MSM_BUS_MASTER_MSS_NAV,
+		.masterp = mport_mss_nav,
+		.num_mports = ARRAY_SIZE(mport_mss_nav),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.mas_hw_id = MAS_MSS_NAV,
+	},
+	{
+		.id = MSM_BUS_MASTER_OCMEM_DMA,
+		.masterp = mport_ocmem_dma,
+		.num_mports = ARRAY_SIZE(mport_ocmem_dma),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_ocmem_dma,
+		.mas_hw_id = MAS_OCMEM_DMA,
+	},
+	{
+		.id = MSM_BUS_MASTER_WCSS,
+		.masterp = mport_wcss,
+		.num_mports = ARRAY_SIZE(mport_wcss),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.mas_hw_id = MAS_WCSS,
+	},
+	{
+		.id = MSM_BUS_MASTER_QDSS_ETR,
+		.masterp = mport_qdss_etr,
+		.num_mports = ARRAY_SIZE(mport_qdss_etr),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.qport = qports_qdss_etr,
+		.mode = NOC_QOS_MODE_FIXED,
+		.mas_hw_id = MAS_QDSS_ETR,
+	},
+	{
+		.id = MSM_BUS_MASTER_USB3,
+		.masterp = mport_usb3,
+		.num_mports = ARRAY_SIZE(mport_usb3),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_usb3,
+		.mas_hw_id = MAS_USB3,
+		.prio_rd = 2,
+		.prio_wr = 2,
+	},
+	{
+		.id = MSM_BUS_SLAVE_AMPSS,
+		.slavep = sport_kmpss,
+		.num_sports = ARRAY_SIZE(sport_kmpss),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_APPSS,
+	},
+	{
+		.id = MSM_BUS_SLAVE_LPASS,
+		.slavep = sport_lpass,
+		.num_sports = ARRAY_SIZE(sport_lpass),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_LPASS,
+	},
+	{
+		.id = MSM_BUS_SLAVE_USB3,
+		.slavep = sport_usb3,
+		.num_sports = ARRAY_SIZE(sport_usb3),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_USB3,
+	},
+	{
+		.id = MSM_BUS_SLAVE_WCSS,
+		.slavep = sport_wcss,
+		.num_sports = ARRAY_SIZE(sport_wcss),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_WCSS,
+	},
+	{
+		.id = MSM_BUS_SLAVE_OCIMEM,
+		.slavep = sport_ocimem,
+		.num_sports = ARRAY_SIZE(sport_ocimem),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_OCIMEM,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SNOC_OCMEM,
+		.slavep = sport_snoc_ocmem,
+		.num_sports = ARRAY_SIZE(sport_snoc_ocmem),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SNOC_OCMEM,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SERVICE_SNOC,
+		.slavep = sport_service_snoc,
+		.num_sports = ARRAY_SIZE(sport_service_snoc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SERVICE_SNOC,
+	},
+	{
+		.id = MSM_BUS_SLAVE_QDSS_STM,
+		.slavep = sport_qdss_stm,
+		.num_sports = ARRAY_SIZE(sport_qdss_stm),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_QDSS_STM,
+	},
+};
+
+
+static struct msm_bus_node_info mmss_noc_info[]  = {
+	{
+		.id = MSM_BUS_MASTER_GRAPHICS_3D,
+		.masterp = mport_gfx3d,
+		.num_mports = ARRAY_SIZE(mport_gfx3d),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.hw_sel = MSM_BUS_NOC,
+		.perm_mode = NOC_QOS_MODES_ALL_PERM,
+		.mode = NOC_QOS_MODE_FIXED,
+		.ws = 10000,
+		.qport = qports_oxili,
+		.mas_hw_id = MAS_GFX3D,
+	},
+	{
+		.id = MSM_BUS_MASTER_JPEG,
+		.masterp = mport_jpeg,
+		.num_mports = ARRAY_SIZE(mport_jpeg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.hw_sel = MSM_BUS_NOC,
+		.perm_mode = NOC_QOS_MODES_ALL_PERM,
+		.mode = NOC_QOS_MODE_BYPASS,
+		.qport = qports_gemini,
+		.ws = 10000,
+		.mas_hw_id = MAS_JPEG,
+	},
+	{
+		.id = MSM_BUS_MASTER_MDP_PORT0,
+		.masterp = mport_mdp,
+		.num_mports = ARRAY_SIZE(mport_mdp),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.hw_sel = MSM_BUS_NOC,
+		.perm_mode = NOC_QOS_MODES_ALL_PERM,
+		.mode = NOC_QOS_MODE_BYPASS,
+		.qport = qports_mdp,
+		.ws = 10000,
+		.mas_hw_id = MAS_MDP,
+	},
+	{
+		.id = MSM_BUS_MASTER_VIDEO_P0,
+		.masterp = mport_video_port0,
+		.num_mports = ARRAY_SIZE(mport_video_port0),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.hw_sel = MSM_BUS_NOC,
+		.perm_mode = NOC_QOS_MODES_ALL_PERM,
+		.mode = NOC_QOS_MODE_BYPASS,
+		.ws = 10000,
+		.qport = qports_venus_p0,
+		.mas_hw_id = MAS_VIDEO_P0,
+	},
+	{
+		.id = MSM_BUS_MASTER_VIDEO_P1,
+		.masterp = mport_video_port1,
+		.num_mports = ARRAY_SIZE(mport_video_port1),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.hw_sel = MSM_BUS_NOC,
+		.perm_mode = NOC_QOS_MODES_ALL_PERM,
+		.mode = NOC_QOS_MODE_BYPASS,
+		.ws = 10000,
+		.qport = qports_venus_p1,
+		.mas_hw_id = MAS_VIDEO_P1,
+	},
+	{
+		.id = MSM_BUS_MASTER_VFE,
+		.masterp = mport_vfe,
+		.num_mports = ARRAY_SIZE(mport_vfe),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.hw_sel = MSM_BUS_NOC,
+		.perm_mode = NOC_QOS_MODES_ALL_PERM,
+		.mode = NOC_QOS_MODE_BYPASS,
+		.ws = 10000,
+		.qport = qports_vfe,
+		.mas_hw_id = MAS_VFE,
+	},
+	{
+		.id = MSM_BUS_FAB_CONFIG_NOC,
+		.gateway = 1,
+		.masterp = mport_gw_cnoc_mnoc_cfg,
+		.num_mports = ARRAY_SIZE(mport_gw_cnoc_mnoc_cfg),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_RPM,
+		.mas_hw_id = MAS_CNOC_MNOC_MMSS_CFG,
+	},
+	{
+		.id = MSM_BUS_FAB_BIMC,
+		.gateway = 1,
+		.slavep = sport_gw_mmss_bimc,
+		.num_sports = ARRAY_SIZE(sport_gw_mmss_bimc),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_MMSS_BIMC,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CAMERA_CFG,
+		.slavep = sport_camera_cfg,
+		.num_sports = ARRAY_SIZE(sport_camera_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_CAMERA_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_DISPLAY_CFG,
+		.slavep = sport_display_cfg,
+		.num_sports = ARRAY_SIZE(sport_display_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_DISPLAY_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_OCMEM_CFG,
+		.slavep = sport_ocmem_cfg,
+		.num_sports = ARRAY_SIZE(sport_ocmem_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_OCMEM_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CPR_CFG,
+		.slavep = sport_cpr_cfg,
+		.num_sports = ARRAY_SIZE(sport_cpr_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_CPR_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CPR_XPU_CFG,
+		.slavep = sport_cpr_xpu_cfg,
+		.num_sports = ARRAY_SIZE(sport_cpr_xpu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_CPR_XPU_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_MISC_CFG,
+		.slavep = sport_misc_cfg,
+		.num_sports = ARRAY_SIZE(sport_misc_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_MISC_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_MISC_XPU_CFG,
+		.slavep = sport_misc_xpu_cfg,
+		.num_sports = ARRAY_SIZE(sport_misc_xpu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_MISC_XPU_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_VENUS_CFG,
+		.slavep = sport_venus_cfg,
+		.num_sports = ARRAY_SIZE(sport_venus_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_VENUS_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_GRAPHICS_3D_CFG,
+		.slavep = sport_gfx3d_cfg,
+		.num_sports = ARRAY_SIZE(sport_gfx3d_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_GFX3D_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_MMSS_CLK_CFG,
+		.slavep = sport_mmss_clk_cfg,
+		.num_sports = ARRAY_SIZE(sport_mmss_clk_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_MMSS_CLK_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG,
+		.slavep = sport_mmss_clk_xpu_cfg,
+		.num_sports = ARRAY_SIZE(sport_mmss_clk_xpu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_MMSS_CLK_XPU_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_MNOC_MPU_CFG,
+		.slavep = sport_mnoc_mpu_cfg,
+		.num_sports = ARRAY_SIZE(sport_mnoc_mpu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_MNOC_MPU_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_ONOC_MPU_CFG,
+		.slavep = sport_onoc_mpu_cfg,
+		.num_sports = ARRAY_SIZE(sport_onoc_mpu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_ONOC_MPU_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SERVICE_MNOC,
+		.slavep = sport_service_mnoc,
+		.num_sports = ARRAY_SIZE(sport_service_mnoc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_SERVICE_MNOC,
+	},
+};
+
+static struct msm_bus_node_info bimc_info[]  = {
+	{
+		.id = MSM_BUS_MASTER_AMPSS_M0,
+		.masterp = mport_kmpss_m0,
+		.num_mports = ARRAY_SIZE(mport_kmpss_m0),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.hw_sel = MSM_BUS_BIMC,
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_kmpss,
+		.ws = 10000,
+		.mas_hw_id = MAS_APPSS_PROC,
+		.prio_lvl = 0,
+		.prio_rd = 2,
+		.prio_wr = 2,
+	},
+	{
+		.id = MSM_BUS_MASTER_AMPSS_M1,
+		.masterp = mport_kmpss_m1,
+		.num_mports = ARRAY_SIZE(mport_kmpss_m1),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.hw_sel = MSM_BUS_BIMC,
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_kmpss,
+		.ws = 10000,
+		.mas_hw_id = MAS_APPSS_PROC,
+	},
+	{
+		.id = MSM_BUS_MASTER_MSS_PROC,
+		.masterp = mport_mss_proc,
+		.num_mports = ARRAY_SIZE(mport_mss_proc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.hw_sel = MSM_BUS_RPM,
+		.mas_hw_id = MAS_AMSS_PROC,
+	},
+	{
+		.id = MSM_BUS_FAB_MMSS_NOC,
+		.gateway = 1,
+		.masterp = mport_gw_mnoc_bimc,
+		.num_mports = ARRAY_SIZE(mport_gw_mnoc_bimc),
+		.qport = qports_gw_mnoc_bimc,
+		.buswidth = 8,
+		.ws = 10000,
+		.mas_hw_id = MAS_MNOC_BIMC,
+		.hw_sel = MSM_BUS_BIMC,
+		.mode = NOC_QOS_MODE_BYPASS,
+	},
+	{
+		.id = MSM_BUS_FAB_SYS_NOC,
+		.gateway = 1,
+		.slavep = sport_gw_bimc_snoc,
+		.num_sports = ARRAY_SIZE(sport_gw_bimc_snoc),
+		.masterp = mport_gw_snoc_bimc,
+		.num_mports = ARRAY_SIZE(mport_gw_snoc_bimc),
+		.qport = qports_gw_snoc_bimc,
+		.buswidth = 8,
+		.ws = 10000,
+		.mas_hw_id = MAS_SNOC_BIMC,
+		.slv_hw_id = SLV_BIMC_SNOC,
+	},
+	{
+		.id = MSM_BUS_SLAVE_EBI_CH0,
+		.slavep = sport_ebi1,
+		.num_sports = ARRAY_SIZE(sport_ebi1),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_EBI,
+		.mode = NOC_QOS_MODE_BYPASS,
+	},
+	{
+		.id = MSM_BUS_SLAVE_AMPSS_L2,
+		.slavep = sport_kmpss_l2,
+		.num_sports = ARRAY_SIZE(sport_kmpss_l2),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_APSS_L2,
+	},
+};
+
+static struct msm_bus_node_info ocmem_noc_info[]  = {
+	{
+		.id = MSM_BUS_FAB_OCMEM_VNOC,
+		.gateway = 1,
+		.buswidth = 16,
+		.mas_hw_id = MAS_OVNOC_ONOC,
+		.slv_hw_id = SLV_ONOC_OVNOC,
+	},
+	{
+		.id = MSM_BUS_MASTER_JPEG_OCMEM,
+		.masterp = mport_jpeg_ocmem,
+		.num_mports = ARRAY_SIZE(mport_jpeg_ocmem),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.perm_mode = NOC_QOS_PERM_MODE_FIXED,
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_gemini_ocmem,
+		.mas_hw_id = MAS_JPEG_OCMEM,
+		.hw_sel = MSM_BUS_NOC,
+	},
+	{
+		.id = MSM_BUS_MASTER_MDP_OCMEM,
+		.masterp = mport_mdp_ocmem,
+		.num_mports = ARRAY_SIZE(mport_mdp_ocmem),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.perm_mode = NOC_QOS_PERM_MODE_FIXED,
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_mdp_ocmem,
+		.mas_hw_id = MAS_MDP_OCMEM,
+		.hw_sel = MSM_BUS_NOC,
+	},
+	{
+		.id = MSM_BUS_MASTER_VIDEO_P0_OCMEM,
+		.masterp = mport_video_p0_ocmem,
+		.num_mports = ARRAY_SIZE(mport_video_p0_ocmem),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.perm_mode = NOC_QOS_PERM_MODE_FIXED,
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_venus_p0_ocmem,
+		.mas_hw_id = MAS_VIDEO_P0_OCMEM,
+		.hw_sel = MSM_BUS_NOC,
+	},
+	{
+		.id = MSM_BUS_MASTER_VIDEO_P1_OCMEM,
+		.masterp = mport_video_p1_ocmem,
+		.num_mports = ARRAY_SIZE(mport_video_p1_ocmem),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.perm_mode = NOC_QOS_PERM_MODE_FIXED,
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_venus_p1_ocmem,
+		.mas_hw_id = MAS_VIDEO_P1_OCMEM,
+		.hw_sel = MSM_BUS_NOC,
+	},
+	{
+		.id = MSM_BUS_MASTER_VFE_OCMEM,
+		.masterp = mport_vfe_ocmem,
+		.num_mports = ARRAY_SIZE(mport_vfe_ocmem),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.perm_mode = NOC_QOS_PERM_MODE_FIXED,
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_vfe_ocmem,
+		.mas_hw_id = MAS_VFE_OCMEM,
+		.hw_sel = MSM_BUS_NOC,
+		.prio_rd = 1,
+		.prio_wr = 1,
+	},
+	{
+		.id = MSM_BUS_MASTER_CNOC_ONOC_CFG,
+		.masterp = mport_cnoc_onoc_cfg,
+		.num_mports = ARRAY_SIZE(mport_cnoc_onoc_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.mas_hw_id = MAS_CNOC_ONOC_CFG,
+		.hw_sel = MSM_BUS_NOC,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SERVICE_ONOC,
+		.slavep = sport_service_onoc,
+		.num_sports = ARRAY_SIZE(sport_service_onoc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.slv_hw_id = SLV_SERVICE_ONOC,
+	},
+};
+
+static struct msm_bus_node_info periph_noc_info[] = {
+	{
+		.id = MSM_BUS_MASTER_PNOC_CFG,
+		.masterp = mport_pnoc_cfg,
+		.num_mports = ARRAY_SIZE(mport_pnoc_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_PNOC_CFG,
+	},
+	{
+		.id = MSM_BUS_MASTER_SDCC_1,
+		.masterp = mport_sdcc_1,
+		.num_mports = ARRAY_SIZE(mport_sdcc_1),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_SDCC_1,
+	},
+	{
+		.id = MSM_BUS_MASTER_SDCC_3,
+		.masterp = mport_sdcc_3,
+		.num_mports = ARRAY_SIZE(mport_sdcc_3),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_SDCC_3,
+	},
+	{
+		.id = MSM_BUS_MASTER_SDCC_4,
+		.masterp = mport_sdcc_4,
+		.num_mports = ARRAY_SIZE(mport_sdcc_4),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_SDCC_4,
+	},
+	{
+		.id = MSM_BUS_MASTER_SDCC_2,
+		.masterp = mport_sdcc_2,
+		.num_mports = ARRAY_SIZE(mport_sdcc_2),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_SDCC_2,
+	},
+	{
+		.id = MSM_BUS_MASTER_TSIF,
+		.masterp = mport_tsif,
+		.num_mports = ARRAY_SIZE(mport_tsif),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_TSIF,
+	},
+	{
+		.id = MSM_BUS_MASTER_BAM_DMA,
+		.masterp = mport_bam_dma,
+		.num_mports = ARRAY_SIZE(mport_bam_dma),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_BAM_DMA,
+	},
+	{
+		.id = MSM_BUS_MASTER_BLSP_2,
+		.masterp = mport_blsp_2,
+		.num_mports = ARRAY_SIZE(mport_blsp_2),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_BLSP_2,
+	},
+	{
+		.id = MSM_BUS_MASTER_USB_HSIC,
+		.masterp = mport_usb_hsic,
+		.num_mports = ARRAY_SIZE(mport_usb_hsic),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_USB_HSIC,
+	},
+	{
+		.id = MSM_BUS_MASTER_BLSP_1,
+		.masterp = mport_blsp_1,
+		.num_mports = ARRAY_SIZE(mport_blsp_1),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_BLSP_1,
+	},
+	{
+		.id = MSM_BUS_MASTER_USB_HS,
+		.masterp = mport_usb_hs,
+		.num_mports = ARRAY_SIZE(mport_usb_hs),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_USB_HS,
+	},
+	{
+		.id = MSM_BUS_FAB_SYS_NOC,
+		.gateway = 1,
+		.slavep = sport_gw_pnoc_snoc,
+		.num_sports = ARRAY_SIZE(sport_gw_pnoc_snoc),
+		.masterp = mport_gw_snoc_pnoc,
+		.num_mports = ARRAY_SIZE(mport_gw_snoc_pnoc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_PNOC_SNOC,
+		.mas_hw_id = MAS_SNOC_PNOC,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SDCC_1,
+		.slavep = sport_sdcc_1,
+		.num_sports = ARRAY_SIZE(sport_sdcc_1),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SDCC_1,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SDCC_3,
+		.slavep = sport_sdcc_3,
+		.num_sports = ARRAY_SIZE(sport_sdcc_3),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SDCC_3,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SDCC_2,
+		.slavep = sport_sdcc_2,
+		.num_sports = ARRAY_SIZE(sport_sdcc_2),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SDCC_2,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SDCC_4,
+		.slavep = sport_sdcc_4,
+		.num_sports = ARRAY_SIZE(sport_sdcc_4),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SDCC_4,
+	},
+	{
+		.id = MSM_BUS_SLAVE_TSIF,
+		.slavep = sport_tsif,
+		.num_sports = ARRAY_SIZE(sport_tsif),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_TSIF,
+	},
+	{
+		.id = MSM_BUS_SLAVE_BAM_DMA,
+		.slavep = sport_bam_dma,
+		.num_sports = ARRAY_SIZE(sport_bam_dma),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_BAM_DMA,
+	},
+	{
+		.id = MSM_BUS_SLAVE_BLSP_2,
+		.slavep = sport_blsp_2,
+		.num_sports = ARRAY_SIZE(sport_blsp_2),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_BLSP_2,
+	},
+	{
+		.id = MSM_BUS_SLAVE_USB_HSIC,
+		.slavep = sport_usb_hsic,
+		.num_sports = ARRAY_SIZE(sport_usb_hsic),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_USB_HSIC,
+	},
+	{
+		.id = MSM_BUS_SLAVE_BLSP_1,
+		.slavep = sport_blsp_1,
+		.num_sports = ARRAY_SIZE(sport_blsp_1),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_BLSP_1,
+	},
+	{
+		.id = MSM_BUS_SLAVE_USB_HS,
+		.slavep = sport_usb_hs,
+		.num_sports = ARRAY_SIZE(sport_usb_hs),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_USB_HS,
+	},
+	{
+		.id = MSM_BUS_SLAVE_PDM,
+		.slavep = sport_pdm,
+		.num_sports = ARRAY_SIZE(sport_pdm),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_PDM,
+	},
+	{
+		.id = MSM_BUS_SLAVE_PERIPH_APU_CFG,
+		.slavep = sport_periph_apu_cfg,
+		.num_sports = ARRAY_SIZE(sport_periph_apu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_PERIPH_APU_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_PNOC_MPU_CFG,
+		.slavep = sport_pnoc_mpu_cfg,
+		.num_sports = ARRAY_SIZE(sport_pnoc_mpu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_MPU_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_PRNG,
+		.slavep = sport_prng,
+		.num_sports = ARRAY_SIZE(sport_prng),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_PRNG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SERVICE_PNOC,
+		.slavep = sport_service_pnoc,
+		.num_sports = ARRAY_SIZE(sport_service_pnoc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SERVICE_PNOC,
+	},
+};
+
+static struct msm_bus_node_info config_noc_info[] = {
+	{
+		.id = MSM_BUS_MASTER_RPM_INST,
+		.masterp = mport_rpm_inst,
+		.num_mports = ARRAY_SIZE(mport_rpm_inst),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_RPM_INST,
+	},
+	{
+		.id = MSM_BUS_MASTER_RPM_DATA,
+		.masterp = mport_rpm_data,
+		.num_mports = ARRAY_SIZE(mport_rpm_data),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_RPM_DATA,
+	},
+	{
+		.id = MSM_BUS_MASTER_RPM_SYS,
+		.masterp = mport_rpm_sys,
+		.num_mports = ARRAY_SIZE(mport_rpm_sys),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_RPM_SYS,
+	},
+	{
+		.id = MSM_BUS_MASTER_DEHR,
+		.masterp = mport_dehr,
+		.num_mports = ARRAY_SIZE(mport_dehr),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_DEHR,
+	},
+	{
+		.id = MSM_BUS_MASTER_QDSS_DAP,
+		.masterp = mport_qdss_dap,
+		.num_mports = ARRAY_SIZE(mport_qdss_dap),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_QDSS_DAP,
+	},
+	{
+		.id = MSM_BUS_MASTER_SPDM,
+		.masterp = mport_spdm,
+		.num_mports = ARRAY_SIZE(mport_spdm),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_SPDM,
+	},
+	{
+		.id = MSM_BUS_MASTER_TIC,
+		.masterp = mport_tic,
+		.num_mports = ARRAY_SIZE(mport_tic),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_TIC,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CLK_CTL,
+		.slavep = sport_clk_ctl,
+		.num_sports = ARRAY_SIZE(sport_clk_ctl),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_CLK_CTL,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CNOC_MSS,
+		.slavep = sport_cnoc_mss,
+		.num_sports = ARRAY_SIZE(sport_cnoc_mss),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_CNOC_MSS,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SECURITY,
+		.slavep = sport_security,
+		.num_sports = ARRAY_SIZE(sport_security),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SECURITY,
+	},
+	{
+		.id = MSM_BUS_SLAVE_TCSR,
+		.slavep = sport_tcsr,
+		.num_sports = ARRAY_SIZE(sport_tcsr),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_TCSR,
+	},
+	{
+		.id = MSM_BUS_SLAVE_TLMM,
+		.slavep = sport_tlmm,
+		.num_sports = ARRAY_SIZE(sport_tlmm),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_TLMM,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CRYPTO_0_CFG,
+		.slavep = sport_crypto_0_cfg,
+		.num_sports = ARRAY_SIZE(sport_crypto_0_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_CRYPTO_0_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CRYPTO_1_CFG,
+		.slavep = sport_crypto_1_cfg,
+		.num_sports = ARRAY_SIZE(sport_crypto_1_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_CRYPTO_1_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_IMEM_CFG,
+		.slavep = sport_imem_cfg,
+		.num_sports = ARRAY_SIZE(sport_imem_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_IMEM_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_MESSAGE_RAM,
+		.slavep = sport_message_ram,
+		.num_sports = ARRAY_SIZE(sport_message_ram),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_MESSAGE_RAM,
+	},
+	{
+		.id = MSM_BUS_SLAVE_BIMC_CFG,
+		.slavep = sport_bimc_cfg,
+		.num_sports = ARRAY_SIZE(sport_bimc_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_BIMC_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_BOOT_ROM,
+		.slavep = sport_boot_rom,
+		.num_sports = ARRAY_SIZE(sport_boot_rom),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_BOOT_ROM,
+	},
+	{
+		.id = MSM_BUS_SLAVE_PMIC_ARB,
+		.slavep = sport_pmic_arb,
+		.num_sports = ARRAY_SIZE(sport_pmic_arb),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_PMIC_ARB,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SPDM_WRAPPER,
+		.slavep = sport_spdm_wrapper,
+		.num_sports = ARRAY_SIZE(sport_spdm_wrapper),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SPDM_WRAPPER,
+	},
+	{
+		.id = MSM_BUS_SLAVE_DEHR_CFG,
+		.slavep = sport_dehr_cfg,
+		.num_sports = ARRAY_SIZE(sport_dehr_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_DEHR_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_MPM,
+		.slavep = sport_mpm,
+		.num_sports = ARRAY_SIZE(sport_mpm),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_MPM,
+	},
+	{
+		.id = MSM_BUS_SLAVE_QDSS_CFG,
+		.slavep = sport_qdss_cfg,
+		.num_sports = ARRAY_SIZE(sport_qdss_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_QDSS_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_RBCPR_CFG,
+		.slavep = sport_rbcpr_cfg,
+		.num_sports = ARRAY_SIZE(sport_rbcpr_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_RBCPR_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG,
+		.slavep = sport_rbcpr_qdss_apu_cfg,
+		.num_sports = ARRAY_SIZE(sport_rbcpr_qdss_apu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_RBCPR_QDSS_APU_CFG,
+	},
+	{
+		.id = MSM_BUS_FAB_SYS_NOC,
+		.gateway = 1,
+		.slavep = sport_gw_cnoc_snoc,
+		.num_sports = ARRAY_SIZE(sport_gw_cnoc_snoc),
+		.masterp = mport_gw_snoc_cnoc,
+		.num_mports = ARRAY_SIZE(mport_gw_snoc_cnoc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_SNOC_CNOC,
+		.slv_hw_id = SLV_CNOC_SNOC,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CNOC_ONOC_CFG,
+		.slavep = sport_cnoc_onoc_cfg,
+		.num_sports = ARRAY_SIZE(sport_cnoc_onoc_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_CNOC_ONOC_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG,
+		.slavep = sport_cnoc_mnoc_mmss_cfg,
+		.num_sports = ARRAY_SIZE(sport_cnoc_mnoc_mmss_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_CNOC_MNOC_MMSS_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CNOC_MNOC_CFG,
+		.slavep = sport_cnoc_mnoc_cfg,
+		.num_sports = ARRAY_SIZE(sport_cnoc_mnoc_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_CNOC_MNOC_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_PNOC_CFG,
+		.slavep = sport_pnoc_cfg,
+		.num_sports = ARRAY_SIZE(sport_pnoc_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_PNOC_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SNOC_MPU_CFG,
+		.slavep = sport_snoc_mpu_cfg,
+		.num_sports = ARRAY_SIZE(sport_snoc_mpu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SNOC_MPU_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SNOC_CFG,
+		.slavep = sport_snoc_cfg,
+		.num_sports = ARRAY_SIZE(sport_snoc_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SNOC_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_EBI1_DLL_CFG,
+		.slavep = sport_ebi1_dll_cfg,
+		.num_sports = ARRAY_SIZE(sport_ebi1_dll_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_EBI1_DLL_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_PHY_APU_CFG,
+		.slavep = sport_phy_apu_cfg,
+		.num_sports = ARRAY_SIZE(sport_phy_apu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_PHY_APU_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_EBI1_PHY_CFG,
+		.slavep = sport_ebi1_phy_cfg,
+		.num_sports = ARRAY_SIZE(sport_ebi1_phy_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_EBI1_PHY_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_RPM,
+		.slavep = sport_rpm,
+		.num_sports = ARRAY_SIZE(sport_rpm),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_RPM,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SERVICE_CNOC,
+		.slavep = sport_service_cnoc,
+		.num_sports = ARRAY_SIZE(sport_service_cnoc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SERVICE_CNOC,
+	},
+};
+
+/* A virtual NoC is needed for connection to OCMEM */
+static struct msm_bus_node_info ocmem_vnoc_info[] = {
+	{
+		.id = MSM_BUS_MASTER_V_OCMEM_GFX3D,
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_V_OCMEM_GFX3D,
+	},
+	{
+		.id = MSM_BUS_SLAVE_OCMEM,
+		.slavep = sport_ocmem,
+		.num_sports = ARRAY_SIZE(sport_ocmem),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.slv_hw_id = SLV_OCMEM,
+		.tier = tier2,
+		.slaveclk[DUAL_CTX] = "ocmem_clk",
+		.slaveclk[ACTIVE_CTX] = "ocmem_a_clk",
+	},
+	{
+		.id = MSM_BUS_FAB_SYS_NOC,
+		.gateway = 1,
+		.buswidth = 8,
+		.ws = 10000,
+		.mas_hw_id = MAS_SNOC_OVNOC,
+		.slv_hw_id = SLV_OVNOC_SNOC,
+	},
+	{
+		.id = MSM_BUS_FAB_OCMEM_NOC,
+		.gateway = 1,
+		.buswidth = 16,
+		.ws = 10000,
+		.mas_hw_id = MAS_ONOC_OVNOC,
+		.slv_hw_id = SLV_OVNOC_ONOC,
+	},
+};
+
+static void msm_bus_board_assign_iids(struct msm_bus_fabric_registration
+	*fabreg, int fabid)
+{
+	int i;
+	for (i = 0; i < fabreg->len; i++) {
+		if (!fabreg->info[i].gateway) {
+			fabreg->info[i].priv_id = fabid + fabreg->info[i].id;
+			if (fabreg->info[i].id < SLAVE_ID_KEY) {
+				WARN(fabreg->info[i].id >= NMASTERS,
+					"id %d exceeds array size!\n",
+					fabreg->info[i].id);
+				master_iids[fabreg->info[i].id] =
+					fabreg->info[i].priv_id;
+			} else {
+				WARN((fabreg->info[i].id - SLAVE_ID_KEY) >=
+					NSLAVES, "id %d exceeds array size!\n",
+					fabreg->info[i].id);
+				slave_iids[fabreg->info[i].id - (SLAVE_ID_KEY)]
+					= fabreg->info[i].priv_id;
+			}
+		} else {
+			fabreg->info[i].priv_id = fabreg->info[i].id;
+		}
+	}
+}
+
+static int msm_bus_board_8974_get_iid(int id)
+{
+	if ((id < SLAVE_ID_KEY && id >= NMASTERS) ||
+		id >= (SLAVE_ID_KEY + NSLAVES)) {
+		MSM_BUS_ERR("Cannot get iid. Invalid id %d passed\n", id);
+		return -EINVAL;
+	}
+
+	return CHECK_ID(((id < SLAVE_ID_KEY) ? master_iids[id] :
+		slave_iids[id - SLAVE_ID_KEY]), id);
+}
+
+int msm_bus_board_rpm_get_il_ids(uint16_t *id)
+{
+	return -ENXIO;
+}
+
+static struct msm_bus_board_algorithm msm_bus_board_algo = {
+	.board_nfab = NFAB_8974,
+	.get_iid = msm_bus_board_8974_get_iid,
+	.assign_iids = msm_bus_board_assign_iids,
+};
+
+struct msm_bus_fabric_registration msm_bus_8974_sys_noc_pdata = {
+	.id = MSM_BUS_FAB_SYS_NOC,
+	.name = "msm_sys_noc",
+	.info = sys_noc_info,
+	.len = ARRAY_SIZE(sys_noc_info),
+	.ahb = 0,
+	.fabclk[DUAL_CTX] = "bus_clk",
+	.fabclk[ACTIVE_CTX] = "bus_a_clk",
+	.nmasters = 15,
+	.nslaves = 12,
+	.ntieredslaves = 0,
+	.board_algo = &msm_bus_board_algo,
+	.qos_freq = 4800,
+	.hw_sel = MSM_BUS_NOC,
+	.rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_8974_mmss_noc_pdata = {
+	.id = MSM_BUS_FAB_MMSS_NOC,
+	.name = "msm_mmss_noc",
+	.info = mmss_noc_info,
+	.len = ARRAY_SIZE(mmss_noc_info),
+	.ahb = 0,
+	.fabclk[DUAL_CTX] = "bus_clk",
+	.fabclk[ACTIVE_CTX] = "bus_a_clk",
+	.nmasters = 9,
+	.nslaves = 16,
+	.ntieredslaves = 0,
+	.board_algo = &msm_bus_board_algo,
+	.qos_freq = 4800,
+	.hw_sel = MSM_BUS_NOC,
+	.rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_8974_bimc_pdata = {
+	.id = MSM_BUS_FAB_BIMC,
+	.name = "msm_bimc",
+	.info = bimc_info,
+	.len = ARRAY_SIZE(bimc_info),
+	.ahb = 0,
+	.fabclk[DUAL_CTX] = "mem_clk",
+	.fabclk[ACTIVE_CTX] = "mem_a_clk",
+	.nmasters = 7,
+	.nslaves = 4,
+	.ntieredslaves = 0,
+	.board_algo = &msm_bus_board_algo,
+	.qos_freq = 4800,
+	.hw_sel = MSM_BUS_BIMC,
+	.rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_8974_ocmem_noc_pdata = {
+	.id = MSM_BUS_FAB_OCMEM_NOC,
+	.name = "msm_ocmem_noc",
+	.info = ocmem_noc_info,
+	.len = ARRAY_SIZE(ocmem_noc_info),
+	.ahb = 0,
+	.fabclk[DUAL_CTX] = "bus_clk",
+	.fabclk[ACTIVE_CTX] = "bus_a_clk",
+	.nmasters = 6,
+	.nslaves = 3,
+	.ntieredslaves = 0,
+	.board_algo = &msm_bus_board_algo,
+	.qos_freq = 4800,
+	.hw_sel = MSM_BUS_NOC,
+	.rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_8974_periph_noc_pdata = {
+	.id = MSM_BUS_FAB_PERIPH_NOC,
+	.name = "msm_periph_noc",
+	.info = periph_noc_info,
+	.len = ARRAY_SIZE(periph_noc_info),
+	.ahb = 0,
+	.fabclk[DUAL_CTX] = "bus_clk",
+	.fabclk[ACTIVE_CTX] = "bus_a_clk",
+	.nmasters = 12,
+	.nslaves = 16,
+	.ntieredslaves = 0,
+	.board_algo = &msm_bus_board_algo,
+	.hw_sel = MSM_BUS_NOC,
+	.rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_8974_config_noc_pdata = {
+	.id = MSM_BUS_FAB_CONFIG_NOC,
+	.name = "msm_config_noc",
+	.info = config_noc_info,
+	.len = ARRAY_SIZE(config_noc_info),
+	.ahb = 0,
+	.fabclk[DUAL_CTX] = "bus_clk",
+	.fabclk[ACTIVE_CTX] = "bus_a_clk",
+	.nmasters = 8,
+	.nslaves = 30,
+	.ntieredslaves = 0,
+	.board_algo = &msm_bus_board_algo,
+	.hw_sel = MSM_BUS_NOC,
+	.rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_8974_ocmem_vnoc_pdata = {
+	.id = MSM_BUS_FAB_OCMEM_VNOC,
+	.name = "msm_ocmem_vnoc",
+	.info = ocmem_vnoc_info,
+	.len = ARRAY_SIZE(ocmem_vnoc_info),
+	.ahb = 0,
+	.nmasters = 5,
+	.nslaves = 4,
+	.ntieredslaves = 0,
+	.board_algo = &msm_bus_board_algo,
+	.hw_sel = MSM_BUS_NOC,
+	.virt = 1,
+	.rpm_enabled = 1,
+};
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_core.h b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
index a9d6e4f..264afbd 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_core.h
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
@@ -70,7 +70,7 @@
 	int ahb;
 	int hw_sel;
 	const char *slaveclk[NUM_CTX];
-	const char *memclk;
+	const char *memclk[NUM_CTX];
 	unsigned int buswidth;
 	unsigned int ws;
 	unsigned int mode;
@@ -116,7 +116,7 @@
 	struct path_node *pnode;
 	int commit_index;
 	struct nodeclk nodeclk[NUM_CTX];
-	struct nodeclk memclk;
+	struct nodeclk memclk[NUM_CTX];
 	void *hw_data;
 };
 
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
index 3671916..e035e35 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
@@ -131,7 +131,7 @@
 		fabric->pdata->id, fabric->pdata->len);
 	fabric->hw_data = fabric->fabdev.hw_algo.allocate_hw_data(pdev,
 		fabric->pdata);
-	if (ZERO_OR_NULL_PTR(fabric->hw_data)) {
+	if (ZERO_OR_NULL_PTR(fabric->hw_data) && fabric->pdata->ahb == 0) {
 		MSM_BUS_ERR("Couldn't allocate hw_data for fab: %d\n",
 			fabric->fabdev.id);
 		goto error;
@@ -163,17 +163,18 @@
 				info->nodeclk[ctx].enable = false;
 				info->nodeclk[ctx].dirty = false;
 			}
-		}
-		if (info->node_info->memclk) {
-			info->memclk.clk = clk_get_sys("msm_bus",
-					info->node_info->memclk);
-			if (IS_ERR(info->memclk.clk)) {
-				MSM_BUS_ERR("Couldn't get clk %s\n",
-					info->node_info->memclk);
-				err = -EINVAL;
+
+			if (info->node_info->memclk[ctx]) {
+				info->memclk[ctx].clk = clk_get_sys("msm_bus",
+						info->node_info->memclk[ctx]);
+				if (IS_ERR(info->memclk[ctx].clk)) {
+					MSM_BUS_ERR("Couldn't get clk %s\n",
+						info->node_info->memclk[ctx]);
+					err = -EINVAL;
+				}
+				info->memclk[ctx].enable = false;
+				info->memclk[ctx].dirty = false;
 			}
-			info->memclk.enable = false;
-			info->memclk.dirty = false;
 		}
 
 		ret = info->node_info->gateway ?
@@ -316,13 +317,13 @@
 				nodeclk->rate = rate;
 			}
 		}
-		if (!status && slave->memclk.clk) {
+		if (!status && slave->memclk[ctx].clk) {
 			rate = *slave->link_info.sel_clk;
-			if (slave->memclk.rate != rate) {
-				slave->memclk.rate = rate;
-				slave->memclk.dirty = true;
+			if (slave->memclk[ctx].rate != rate) {
+				slave->memclk[ctx].rate = rate;
+				slave->memclk[ctx].dirty = true;
 			}
-			slave->memclk.rate = rate;
+			slave->memclk[ctx].rate = rate;
 			fabric->clk_dirty = true;
 		}
 	}
@@ -337,8 +338,8 @@
 	struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
 	void *sel_cdata;
 
-	/* Temporarily stub out arbitration settings for copper */
-	if (machine_is_copper())
+	/* Temporarily stub out arbitration settings for msm8974 */
+	if (machine_is_msm8974())
 		return;
 
 	sel_cdata = fabric->cdata[ctx];
@@ -361,10 +362,19 @@
 static int msm_bus_fabric_clk_set(int enable, struct msm_bus_inode_info *info)
 {
 	int i, status = 0;
-	for (i = 0; i < NUM_CTX; i++)
+	long rounded_rate;
+
+	for (i = 0; i < NUM_CTX; i++) {
 		if (info->nodeclk[i].dirty) {
-			status = clk_set_rate(info->nodeclk[i].clk, info->
-				nodeclk[i].rate);
+			if (info->nodeclk[i].rate != 0) {
+				rounded_rate = clk_round_rate(info->
+					nodeclk[i].clk, info->nodeclk[i].rate);
+				status = clk_set_rate(info->nodeclk[i].clk,
+					rounded_rate);
+				MSM_BUS_DBG("AXI: node: %d set_rate: %ld\n",
+					info->node_info->id, rounded_rate);
+			}
+
 			if (enable && !(info->nodeclk[i].enable)) {
 				clk_prepare_enable(info->nodeclk[i].clk);
 				info->nodeclk[i].dirty = false;
@@ -377,17 +387,26 @@
 			}
 		}
 
-	if (info->memclk.dirty) {
-		status = clk_set_rate(info->memclk.clk, info->memclk.rate);
-		if (enable && !(info->memclk.enable)) {
-			clk_prepare_enable(info->memclk.clk);
-			info->memclk.dirty = false;
-			info->memclk.enable = true;
-		} else if (info->memclk.rate == 0 && (!enable) &&
-			(info->memclk.enable)) {
-			clk_disable_unprepare(info->memclk.clk);
-			info->memclk.dirty = false;
-			info->memclk.enable = false;
+		if (info->memclk[i].dirty) {
+			if (info->nodeclk[i].rate != 0) {
+				rounded_rate = clk_round_rate(info->
+					memclk[i].clk, info->memclk[i].rate);
+				status = clk_set_rate(info->memclk[i].clk,
+					rounded_rate);
+				MSM_BUS_DBG("AXI: node: %d set_rate: %ld\n",
+					info->node_info->id, rounded_rate);
+			}
+
+			if (enable && !(info->memclk[i].enable)) {
+				clk_prepare_enable(info->memclk[i].clk);
+				info->memclk[i].dirty = false;
+				info->memclk[i].enable = true;
+			} else if (info->memclk[i].rate == 0 && (!enable) &&
+				(info->memclk[i].enable)) {
+				clk_disable_unprepare(info->memclk[i].clk);
+				info->memclk[i].dirty = false;
+				info->memclk[i].enable = false;
+			}
 		}
 	}
 
diff --git a/arch/arm/mach-msm/msm_dsps.c b/arch/arm/mach-msm/msm_dsps.c
index eda22e1..200d717 100644
--- a/arch/arm/mach-msm/msm_dsps.c
+++ b/arch/arm/mach-msm/msm_dsps.c
@@ -45,9 +45,8 @@
 #include "timer.h"
 
 #define DRV_NAME	"msm_dsps"
-#define DRV_VERSION	"4.01"
+#define DRV_VERSION	"4.02"
 
-#define PPSS_PAUSE_REG	0x1804
 
 #define PPSS_TIMER0_32KHZ_REG	0x1004
 #define PPSS_TIMER0_20MHZ_REG	0x0804
@@ -137,23 +136,29 @@
 
 /**
  *  Suspend DSPS CPU.
+ *
+ * Only call if dsps_pwr_ctl_en is false.
+ * If dsps_pwr_ctl_en is true, then DSPS will control its own power state.
  */
 static void dsps_suspend(void)
 {
 	pr_debug("%s.\n", __func__);
 
-	writel_relaxed(1, drv->ppss_base + PPSS_PAUSE_REG);
+	writel_relaxed(1, drv->ppss_base + drv->pdata->ppss_pause_reg);
 	mb(); /* Make sure write commited before ioctl returns. */
 }
 
 /**
  *  Resume DSPS CPU.
+ *
+ * Only call if dsps_pwr_ctl_en is false.
+ * If dsps_pwr_ctl_en is true, then DSPS will control its own power state.
  */
 static void dsps_resume(void)
 {
 	pr_debug("%s.\n", __func__);
 
-	writel_relaxed(0, drv->ppss_base + PPSS_PAUSE_REG);
+	writel_relaxed(0, drv->ppss_base + drv->pdata->ppss_pause_reg);
 	mb(); /* Make sure write commited before ioctl returns. */
 }
 
@@ -425,8 +430,10 @@
 
 	switch (cmd) {
 	case DSPS_IOCTL_ON:
-		ret = dsps_power_on_handler();
-		dsps_resume();
+		if (!drv->pdata->dsps_pwr_ctl_en) {
+			ret = dsps_power_on_handler();
+			dsps_resume();
+		}
 		break;
 	case DSPS_IOCTL_OFF:
 		if (!drv->pdata->dsps_pwr_ctl_en) {
@@ -567,7 +574,7 @@
 
 	drv->smem_ramdump_segments[0].address = drv->pdata->smem_start;
 	drv->smem_ramdump_segments[0].size =  drv->pdata->smem_size;
-	drv->smem_ramdump_dev = create_ramdump_device("smem");
+	drv->smem_ramdump_dev = create_ramdump_device("smem-dsps");
 	if (!drv->smem_ramdump_dev) {
 		pr_err("%s: create_ramdump_device(\"smem\") fail\n",
 		       __func__);
@@ -634,7 +641,8 @@
 			return ret;
 		}
 
-		dsps_resume();
+		if (!drv->pdata->dsps_pwr_ctl_en)
+			dsps_resume();
 	}
 	drv->ref_count++;
 
@@ -761,7 +769,6 @@
 {
 	pr_debug("%s\n", __func__);
 	disable_irq_nosync(drv->wdog_irq);
-	dsps_suspend();
 	pil_force_shutdown(drv->pdata->pil_name);
 	dsps_power_off_handler();
 	return 0;
@@ -779,7 +786,6 @@
 	pil_force_boot(drv->pdata->pil_name);
 	atomic_set(&drv->crash_in_progress, 0);
 	enable_irq(drv->wdog_irq);
-	dsps_resume();
 	return 0;
 }
 
diff --git a/arch/arm/mach-msm/msm_rq_stats.c b/arch/arm/mach-msm/msm_rq_stats.c
index 738819f..2ea7ed3 100644
--- a/arch/arm/mach-msm/msm_rq_stats.c
+++ b/arch/arm/mach-msm/msm_rq_stats.c
@@ -44,7 +44,7 @@
 	sysfs_notify(rq_info.kobj, NULL, "def_timer_ms");
 }
 
-static ssize_t show_run_queue_avg(struct kobject *kobj,
+static ssize_t run_queue_avg_show(struct kobject *kobj,
 		struct kobj_attribute *attr, char *buf)
 {
 	unsigned int val = 0;
@@ -59,6 +59,8 @@
 	return snprintf(buf, PAGE_SIZE, "%d.%d\n", val/10, val%10);
 }
 
+static struct kobj_attribute run_queue_avg_attr = __ATTR_RO(run_queue_avg);
+
 static ssize_t show_run_queue_poll_ms(struct kobject *kobj,
 				      struct kobj_attribute *attr, char *buf)
 {
@@ -93,6 +95,10 @@
 	return count;
 }
 
+static struct kobj_attribute run_queue_poll_ms_attr =
+	__ATTR(run_queue_poll_ms, S_IWUSR | S_IRUSR, show_run_queue_poll_ms,
+			store_run_queue_poll_ms);
+
 static ssize_t show_def_timer_ms(struct kobject *kobj,
 		struct kobj_attribute *attr, char *buf)
 {
@@ -111,67 +117,33 @@
 	return count;
 }
 
-#define MSM_RQ_STATS_RO_ATTRIB(att) ({ \
-		struct attribute *attrib = NULL; \
-		struct kobj_attribute *ptr = NULL; \
-		ptr = kzalloc(sizeof(struct kobj_attribute), GFP_KERNEL); \
-		if (ptr) { \
-			ptr->attr.name = #att; \
-			ptr->attr.mode = S_IRUGO; \
-			ptr->show = show_##att; \
-			ptr->store = NULL; \
-			attrib = &ptr->attr; \
-		} \
-		attrib; })
+static struct kobj_attribute def_timer_ms_attr =
+	__ATTR(def_timer_ms, S_IWUSR | S_IRUSR, show_def_timer_ms,
+			store_def_timer_ms);
 
-#define MSM_RQ_STATS_RW_ATTRIB(att) ({ \
-		struct attribute *attrib = NULL; \
-		struct kobj_attribute *ptr = NULL; \
-		ptr = kzalloc(sizeof(struct kobj_attribute), GFP_KERNEL); \
-		if (ptr) { \
-			ptr->attr.name = #att; \
-			ptr->attr.mode = S_IWUSR|S_IRUSR; \
-			ptr->show = show_##att; \
-			ptr->store = store_##att; \
-			attrib = &ptr->attr; \
-		} \
-		attrib; })
+static struct attribute *rq_attrs[] = {
+	&def_timer_ms_attr.attr,
+	&run_queue_avg_attr.attr,
+	&run_queue_poll_ms_attr.attr,
+	NULL,
+};
+
+static struct attribute_group rq_attr_group = {
+	.attrs = rq_attrs,
+};
 
 static int init_rq_attribs(void)
 {
-	int i;
-	int err = 0;
-	const int attr_count = 4;
-
-	struct attribute **attribs =
-		kzalloc(sizeof(struct attribute *) * attr_count, GFP_KERNEL);
-
-	if (!attribs)
-		goto rel;
+	int err;
 
 	rq_info.rq_avg = 0;
-
-	attribs[0] = MSM_RQ_STATS_RW_ATTRIB(def_timer_ms);
-	attribs[1] = MSM_RQ_STATS_RO_ATTRIB(run_queue_avg);
-	attribs[2] = MSM_RQ_STATS_RW_ATTRIB(run_queue_poll_ms);
-	attribs[3] = NULL;
-
-	for (i = 0; i < attr_count - 1 ; i++) {
-		if (!attribs[i])
-			goto rel2;
-	}
-
-	rq_info.attr_group = kzalloc(sizeof(struct attribute_group),
-						GFP_KERNEL);
-	if (!rq_info.attr_group)
-		goto rel3;
-	rq_info.attr_group->attrs = attribs;
+	rq_info.attr_group = &rq_attr_group;
 
 	/* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
 	rq_info.kobj = kobject_create_and_add("rq-stats",
-	&get_cpu_device(0)->kobj);
+			&get_cpu_device(0)->kobj);
 	if (!rq_info.kobj)
-		goto rel3;
+		return -ENOMEM;
 
 	err = sysfs_create_group(rq_info.kobj, rq_info.attr_group);
 	if (err)
@@ -179,19 +151,7 @@
 	else
 		kobject_uevent(rq_info.kobj, KOBJ_ADD);
 
-	if (!err)
-		return err;
-
-rel3:
-	kfree(rq_info.attr_group);
-	kfree(rq_info.kobj);
-rel2:
-	for (i = 0; i < attr_count - 1; i++)
-		kfree(attribs[i]);
-rel:
-	kfree(attribs);
-
-	return -ENOMEM;
+	return err;
 }
 
 static int __init msm_rq_stats_init(void)
diff --git a/arch/arm/mach-msm/msm_watchdog_v2.c b/arch/arm/mach-msm/msm_watchdog_v2.c
new file mode 100644
index 0000000..a5f8bcc
--- /dev/null
+++ b/arch/arm/mach-msm/msm_watchdog_v2.c
@@ -0,0 +1,400 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <linux/platform_device.h>
+
+#define MODULE_NAME "msm_watchdog"
+#define WDT0_ACCSCSSNBARK_INT 0
+#define TCSR_WDT_CFG	0x30
+#define WDT0_RST	0x04
+#define WDT0_EN		0x08
+#define WDT0_STS	0x0C
+#define WDT0_BARK_TIME	0x10
+#define WDT0_BITE_TIME	0x14
+
+#define MASK_SIZE	32
+
+struct msm_watchdog_data {
+	unsigned int __iomem phys_base;
+	size_t size;
+	void __iomem *base;
+	struct device *dev;
+	unsigned int pet_time;
+	unsigned int bark_time;
+	unsigned int bark_irq;
+	unsigned int bite_irq;
+	unsigned int do_ipi_ping;
+	unsigned long long last_pet;
+	unsigned min_slack_ticks;
+	unsigned long long min_slack_ns;
+	cpumask_t alive_mask;
+	struct work_struct init_dogwork_struct;
+	struct delayed_work dogwork_struct;
+	struct notifier_block panic_blk;
+};
+
+/*
+ * On the kernel command line specify
+ * msm_watchdog.enable=1 to enable the watchdog
+ * By default watchdog is turned on
+ */
+static int enable = 1;
+module_param(enable, int, 0);
+
+/*
+ * On the kernel command line specify
+ * msm_watchdog.WDT_HZ=<clock val in HZ> to set Watchdog
+ * ticks. By default it is set to 32765.
+ */
+static long WDT_HZ = 32765;
+module_param(WDT_HZ, long, 0);
+
+/*
+ * If the watchdog is enabled at bootup (enable=1),
+ * the runtime_disable sysfs node at
+ * /sys/module/msm_watchdog/parameters/runtime_disable
+ * can be used to deactivate the watchdog.
+ * This is a one-time setting. The watchdog
+ * cannot be re-enabled once it is disabled.
+ */
+static int runtime_disable;
+static int wdog_enable_set(const char *val, struct kernel_param *kp);
+module_param_call(runtime_disable, wdog_enable_set, param_get_int,
+			&runtime_disable, 0644);
+
+static void pet_watchdog_work(struct work_struct *work);
+static void init_watchdog_work(struct work_struct *work);
+
+static void dump_cpu_alive_mask(struct msm_watchdog_data *wdog_dd)
+{
+	static char alive_mask_buf[MASK_SIZE];
+	size_t count = cpulist_scnprintf(alive_mask_buf, MASK_SIZE,
+						&wdog_dd->alive_mask);
+	alive_mask_buf[count] = '\n';
+	alive_mask_buf[count++] = '\0';
+	printk(KERN_INFO "cpu alive mask from last pet\n%s", alive_mask_buf);
+}
+
+static int msm_watchdog_suspend(struct device *dev)
+{
+	struct msm_watchdog_data *wdog_dd =
+			(struct msm_watchdog_data *)dev_get_drvdata(dev);
+	if (!enable)
+		return 0;
+	__raw_writel(1, wdog_dd->base + WDT0_RST);
+	__raw_writel(0, wdog_dd->base + WDT0_EN);
+	mb();
+	return 0;
+}
+
+static int msm_watchdog_resume(struct device *dev)
+{
+	struct msm_watchdog_data *wdog_dd =
+			(struct msm_watchdog_data *)dev_get_drvdata(dev);
+	if (!enable)
+		return 0;
+	__raw_writel(1, wdog_dd->base + WDT0_EN);
+	__raw_writel(1, wdog_dd->base + WDT0_RST);
+	mb();
+	return 0;
+}
+
+static int panic_wdog_handler(struct notifier_block *this,
+			      unsigned long event, void *ptr)
+{
+	struct msm_watchdog_data *wdog_dd = container_of(this,
+				struct msm_watchdog_data, panic_blk);
+	if (panic_timeout == 0) {
+		__raw_writel(0, wdog_dd->base + WDT0_EN);
+		mb();
+	} else {
+		__raw_writel(WDT_HZ * (panic_timeout + 4),
+				wdog_dd->base + WDT0_BARK_TIME);
+		__raw_writel(WDT_HZ * (panic_timeout + 4),
+				wdog_dd->base + WDT0_BITE_TIME);
+		__raw_writel(1, wdog_dd->base + WDT0_RST);
+	}
+	return NOTIFY_DONE;
+}
+/*
+ * TODO: implement enable/disable.
+ */
+static int wdog_enable_set(const char *val, struct kernel_param *kp)
+{
+	return 0;
+}
+
+
+static void pet_watchdog(struct msm_watchdog_data *wdog_dd)
+{
+	int slack;
+	unsigned long long time_ns;
+	unsigned long long slack_ns;
+	unsigned long long bark_time_ns = wdog_dd->bark_time * 1000000ULL;
+
+	slack = __raw_readl(wdog_dd->base + WDT0_STS) >> 3;
+	slack = ((wdog_dd->bark_time*WDT_HZ)/1000) - slack;
+	if (slack < wdog_dd->min_slack_ticks)
+		wdog_dd->min_slack_ticks = slack;
+	__raw_writel(1, wdog_dd->base + WDT0_RST);
+	time_ns = sched_clock();
+	slack_ns = (wdog_dd->last_pet + bark_time_ns) - time_ns;
+	if (slack_ns < wdog_dd->min_slack_ns)
+		wdog_dd->min_slack_ns = slack_ns;
+	wdog_dd->last_pet = time_ns;
+}
+
+static void keep_alive_response(void *info)
+{
+	int cpu = smp_processor_id();
+	struct msm_watchdog_data *wdog_dd = (struct msm_watchdog_data *)info;
+	cpumask_set_cpu(cpu, &wdog_dd->alive_mask);
+	smp_mb();
+}
+
+/*
+ * If this function does not return, it implies one of the
+ * other cpu's is not responsive.
+ */
+static void ping_other_cpus(struct msm_watchdog_data *wdog_dd)
+{
+	int cpu;
+	cpumask_clear(&wdog_dd->alive_mask);
+	smp_mb();
+	for_each_cpu(cpu, cpu_online_mask)
+		smp_call_function_single(cpu, keep_alive_response, wdog_dd, 1);
+}
+
+static void pet_watchdog_work(struct work_struct *work)
+{
+	unsigned long delay_time;
+	struct delayed_work *delayed_work = to_delayed_work(work);
+	struct msm_watchdog_data *wdog_dd = container_of(delayed_work,
+						struct msm_watchdog_data,
+							dogwork_struct);
+	delay_time = msecs_to_jiffies(wdog_dd->pet_time);
+	if (wdog_dd->do_ipi_ping)
+		ping_other_cpus(wdog_dd);
+	pet_watchdog(wdog_dd);
+	if (wdog_dd->do_ipi_ping)
+		dump_cpu_alive_mask(wdog_dd);
+	if (enable)
+		schedule_delayed_work(&wdog_dd->dogwork_struct,
+							delay_time);
+}
+
+static int msm_watchdog_remove(struct platform_device *pdev)
+{
+	struct msm_watchdog_data *wdog_dd =
+			(struct msm_watchdog_data *)platform_get_drvdata(pdev);
+	if (enable) {
+		__raw_writel(0, wdog_dd->base + WDT0_EN);
+		mb();
+		enable = 0;
+		/*
+		 * TODO: Not sure if we need to call into TZ to disable
+		 * secure wdog.
+		 */
+		/* In case we got suspended mid-exit */
+		__raw_writel(0, wdog_dd->base + WDT0_EN);
+	}
+	printk(KERN_INFO "MSM Watchdog Exit - Deactivated\n");
+	kzfree(wdog_dd);
+	return 0;
+}
+
+static irqreturn_t wdog_bark_handler(int irq, void *dev_id)
+{
+	struct msm_watchdog_data *wdog_dd = (struct msm_watchdog_data *)dev_id;
+	unsigned long nanosec_rem;
+	unsigned long long t = sched_clock();
+
+	nanosec_rem = do_div(t, 1000000000);
+	printk(KERN_INFO "Watchdog bark! Now = %lu.%06lu\n", (unsigned long) t,
+		nanosec_rem / 1000);
+
+	nanosec_rem = do_div(wdog_dd->last_pet, 1000000000);
+	printk(KERN_INFO "Watchdog last pet at %lu.%06lu\n", (unsigned long)
+		wdog_dd->last_pet, nanosec_rem / 1000);
+	if (wdog_dd->do_ipi_ping)
+		dump_cpu_alive_mask(wdog_dd);
+	panic("Apps watchdog bark received!");
+	return IRQ_HANDLED;
+}
+
+static void init_watchdog_work(struct work_struct *work)
+{
+	struct msm_watchdog_data *wdog_dd = container_of(work,
+						struct msm_watchdog_data,
+							init_dogwork_struct);
+	unsigned long delay_time;
+	u64 timeout;
+	delay_time = msecs_to_jiffies(wdog_dd->pet_time);
+	wdog_dd->min_slack_ticks = UINT_MAX;
+	wdog_dd->min_slack_ns = ULLONG_MAX;
+	timeout = (wdog_dd->bark_time * WDT_HZ)/1000;
+	__raw_writel(timeout, wdog_dd->base + WDT0_BARK_TIME);
+	__raw_writel(timeout + 3*WDT_HZ, wdog_dd->base + WDT0_BITE_TIME);
+
+	wdog_dd->panic_blk.notifier_call = panic_wdog_handler;
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &wdog_dd->panic_blk);
+	schedule_delayed_work(&wdog_dd->dogwork_struct, delay_time);
+
+	__raw_writel(1, wdog_dd->base + WDT0_EN);
+	__raw_writel(1, wdog_dd->base + WDT0_RST);
+	wdog_dd->last_pet = sched_clock();
+	printk(KERN_INFO "MSM Watchdog Initialized\n");
+	return;
+}
+
+static struct of_device_id msm_wdog_match_table[] = {
+	{ .compatible = "qcom,msm-watchdog" },
+	{}
+};
+
+static void __devinit dump_pdata(struct msm_watchdog_data *pdata)
+{
+	dev_dbg(pdata->dev, "wdog bark_time %d", pdata->bark_time);
+	dev_dbg(pdata->dev, "wdog pet_time %d", pdata->pet_time);
+	dev_dbg(pdata->dev, "wdog perform ipi ping %d", pdata->do_ipi_ping);
+	dev_dbg(pdata->dev, "wdog base address is 0x%x\n", (unsigned int)
+								pdata->base);
+}
+
+static int __devinit msm_wdog_dt_to_pdata(struct platform_device *pdev,
+					struct msm_watchdog_data *pdata)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct resource *wdog_resource;
+	int ret;
+
+	wdog_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pdata->size = resource_size(wdog_resource);
+	pdata->phys_base = wdog_resource->start;
+	if (unlikely(!(devm_request_region(&pdev->dev, pdata->phys_base,
+					pdata->size, "msm-watchdog")))) {
+		dev_err(&pdev->dev, "%s cannot reserve watchdog region\n",
+								__func__);
+		return -ENXIO;
+	}
+	pdata->base  = devm_ioremap(&pdev->dev, pdata->phys_base,
+							pdata->size);
+	if (!pdata->base) {
+		dev_err(&pdev->dev, "%s cannot map wdog register space\n",
+				__func__);
+		return -ENXIO;
+	}
+
+	pdata->bark_irq = platform_get_irq(pdev, 0);
+	pdata->bite_irq = platform_get_irq(pdev, 1);
+	ret = of_property_read_u32(node, "qcom,bark-time", &pdata->bark_time);
+	if (ret) {
+		dev_err(&pdev->dev, "reading bark time failed\n");
+		return -ENXIO;
+	}
+	ret = of_property_read_u32(node, "qcom,pet-time", &pdata->pet_time);
+	if (ret) {
+		dev_err(&pdev->dev, "reading pet time failed\n");
+		return -ENXIO;
+	}
+	ret = of_property_read_u32(node, "qcom,ipi-ping", &pdata->do_ipi_ping);
+	if (ret) {
+		dev_err(&pdev->dev, "reading do ipi failed\n");
+		return -ENXIO;
+	}
+	if (!pdata->bark_time) {
+		dev_err(&pdev->dev, "%s watchdog bark time not setup\n",
+								__func__);
+		return -ENXIO;
+	}
+	if (!pdata->pet_time) {
+		dev_err(&pdev->dev, "%s watchdog pet time not setup\n",
+								__func__);
+		return -ENXIO;
+	}
+	if (pdata->do_ipi_ping > 1) {
+		dev_err(&pdev->dev, "%s invalid watchdog ipi value\n",
+								__func__);
+		return -ENXIO;
+	}
+	dump_pdata(pdata);
+	return 0;
+}
+
+static int __devinit msm_watchdog_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct msm_watchdog_data *wdog_dd;
+
+	if (!pdev->dev.of_node || !enable)
+		return -ENODEV;
+	wdog_dd = kzalloc(sizeof(struct msm_watchdog_data), GFP_KERNEL);
+	if (!wdog_dd)
+		return -EIO;
+	ret = msm_wdog_dt_to_pdata(pdev, wdog_dd);
+	if (ret)
+		goto err;
+	wdog_dd->dev = &pdev->dev;
+	platform_set_drvdata(pdev, wdog_dd);
+	ret = devm_request_irq(&pdev->dev, wdog_dd->bark_irq, wdog_bark_handler,
+				IRQF_TRIGGER_RISING, "apps_wdog_bark", wdog_dd);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to request bark irq\n");
+		ret = -ENXIO;
+		goto err;
+	}
+	cpumask_clear(&wdog_dd->alive_mask);
+	INIT_WORK(&wdog_dd->init_dogwork_struct, init_watchdog_work);
+	INIT_DELAYED_WORK(&wdog_dd->dogwork_struct, pet_watchdog_work);
+	schedule_work_on(0, &wdog_dd->init_dogwork_struct);
+	return 0;
+err:
+	kzfree(wdog_dd);
+	return ret;
+}
+
+static const struct dev_pm_ops msm_watchdog_dev_pm_ops = {
+	.suspend_noirq = msm_watchdog_suspend,
+	.resume_noirq = msm_watchdog_resume,
+};
+
+static struct platform_driver msm_watchdog_driver = {
+	.probe = msm_watchdog_probe,
+	.remove = msm_watchdog_remove,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.pm = &msm_watchdog_dev_pm_ops,
+		.of_match_table = msm_wdog_match_table,
+	},
+};
+
+static int __devinit init_watchdog(void)
+{
+	return platform_driver_register(&msm_watchdog_driver);
+}
+
+late_initcall(init_watchdog);
+MODULE_DESCRIPTION("MSM Watchdog Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/msm_xo.c b/arch/arm/mach-msm/msm_xo.c
index 407231a..fb01427 100644
--- a/arch/arm/mach-msm/msm_xo.c
+++ b/arch/arm/mach-msm/msm_xo.c
@@ -234,7 +234,8 @@
 	struct msm_xo *xo = xo_voter->xo;
 	int is_d0 = xo == &msm_xo_sources[MSM_XO_TCXO_D0];
 	int needs_workaround = cpu_is_msm8960() || cpu_is_apq8064() ||
-			       cpu_is_msm8930() || cpu_is_msm9615();
+			       cpu_is_msm8930() || cpu_is_msm9615() ||
+			       cpu_is_msm8627();
 
 	if (xo_voter->mode == mode)
 		return 0;
diff --git a/arch/arm/mach-msm/pcie.c b/arch/arm/mach-msm/pcie.c
index f0809d3..5818bef 100644
--- a/arch/arm/mach-msm/pcie.c
+++ b/arch/arm/mach-msm/pcie.c
@@ -639,14 +639,14 @@
 subsys_initcall(msm_pcie_init);
 
 /* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */
-static void __devinit msm_pcie_fixup_header(struct pci_dev *dev)
+static void __devinit msm_pcie_fixup_early(struct pci_dev *dev)
 {
 	PCIE_DBG("hdr_type %d\n", dev->hdr_type);
 	if (dev->hdr_type == 1)
 		dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
 }
-DECLARE_PCI_FIXUP_HEADER(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
-			 msm_pcie_fixup_header);
+DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+			msm_pcie_fixup_early);
 
 /*
  * actual physical (BAR) address of the device resources starts from 0x10xxxxxx;
diff --git a/arch/arm/mach-msm/perf_event_msm_krait_l2.c b/arch/arm/mach-msm/perf_event_msm_krait_l2.c
index d82f4dd..3635572 100644
--- a/arch/arm/mach-msm/perf_event_msm_krait_l2.c
+++ b/arch/arm/mach-msm/perf_event_msm_krait_l2.c
@@ -14,17 +14,24 @@
 #include <linux/irq.h>
 #include <asm/pmu.h>
 #include <linux/platform_device.h>
+#include <linux/spinlock.h>
 
 #include <mach/msm-krait-l2-accessors.h>
 
 #define MAX_L2_PERIOD	((1ULL << 32) - 1)
-#define MAX_KRAIT_L2_CTRS 5
+#define MAX_KRAIT_L2_CTRS 10
+
+#define PMCR_NUM_EV_SHIFT 11
+#define PMCR_NUM_EV_MASK 0x1f
+
+#define L2_EVT_MASK 0xfffff
+
+#define L2_SLAVE_EV_PREFIX 4
 
 #define L2PMCCNTR 0x409
 #define L2PMCCNTCR 0x408
 #define L2PMCCNTSR 0x40A
 #define L2CYCLE_CTR_BIT 31
-#define L2CYCLE_CTR_EVENT_IDX 4
 #define L2CYCLE_CTR_RAW_CODE 0xfe
 
 #define L2PMOVSR	0x406
@@ -48,16 +55,65 @@
 
 /* event format is -e rsRCCG See get_event_desc() */
 
-#define EVENT_REG_MASK		0xf000
-#define EVENT_GROUPSEL_MASK	0x000f
-#define	EVENT_GROUPCODE_MASK	0x0ff0
+#define EVENT_PREFIX_MASK	0xf0000
+#define EVENT_REG_MASK		0x0f000
+#define EVENT_GROUPSEL_MASK	0x0000f
+#define	EVENT_GROUPCODE_MASK	0x00ff0
+
+#define EVENT_PREFIX_SHIFT	16
 #define EVENT_REG_SHIFT		12
 #define EVENT_GROUPCODE_SHIFT	4
 
 #define	RESRX_VALUE_EN	0x80000000
 
+/*
+ * The L2 PMU is shared between all CPU's, so protect
+ * its bitmap access.
+ */
+struct pmu_constraints {
+	u64 pmu_bitmap;
+	raw_spinlock_t lock;
+} l2_pmu_constraints = {
+	.pmu_bitmap = 0,
+	.lock = __RAW_SPIN_LOCK_UNLOCKED(l2_pmu_constraints.lock),
+};
+
+/* NRCCG format for perf RAW codes. */
+PMU_FORMAT_ATTR(l2_prefix, "config:16-19");
+PMU_FORMAT_ATTR(l2_reg,	"config:12-15");
+PMU_FORMAT_ATTR(l2_code, "config:4-11");
+PMU_FORMAT_ATTR(l2_grp,	"config:0-3");
+
+static struct attribute *msm_l2_ev_formats[] = {
+	&format_attr_l2_prefix.attr,
+	&format_attr_l2_reg.attr,
+	&format_attr_l2_code.attr,
+	&format_attr_l2_grp.attr,
+	NULL,
+};
+
+/*
+ * Format group is essential to access PMU's from userspace
+ * via their .name field.
+ */
+static struct attribute_group msm_l2_pmu_format_group = {
+	.name = "format",
+	.attrs = msm_l2_ev_formats,
+};
+
+static const struct attribute_group *msm_l2_pmu_attr_grps[] = {
+	&msm_l2_pmu_format_group,
+	NULL,
+};
+
 static u32 l2_orig_filter_prefix = 0x000f0030;
 
+/* L2 slave port traffic filtering */
+static u32 l2_slv_filter_prefix = 0x000f0010;
+
+static int total_l2_ctrs;
+static int l2_cycle_ctr_idx;
+
 static u32 pmu_type;
 
 static struct arm_pmu krait_l2_pmu;
@@ -128,25 +184,31 @@
 	set_l2_indirect_reg(group_reg, resr_val);
 }
 
-static void set_evfilter_task_mode(int ctr)
+static void set_evfilter_task_mode(int ctr, unsigned int is_slv)
 {
 	u32 filter_reg = (ctr * 16) + IA_L2PMXEVFILTER_BASE;
 	u32 filter_val = l2_orig_filter_prefix | 1 << smp_processor_id();
 
+	if (is_slv)
+		filter_val = l2_slv_filter_prefix;
+
 	set_l2_indirect_reg(filter_reg, filter_val);
 }
 
-static void set_evfilter_sys_mode(int ctr)
+static void set_evfilter_sys_mode(int ctr, unsigned int is_slv)
 {
 	u32 filter_reg = (ctr * 16) + IA_L2PMXEVFILTER_BASE;
 	u32 filter_val = l2_orig_filter_prefix | 0xf;
 
+	if (is_slv)
+		filter_val = l2_slv_filter_prefix;
+
 	set_l2_indirect_reg(filter_reg, filter_val);
 }
 
 static void enable_intenset(u32 idx)
 {
-	if (idx == L2CYCLE_CTR_EVENT_IDX)
+	if (idx == l2_cycle_ctr_idx)
 		set_l2_indirect_reg(L2PMINTENSET, 1 << L2CYCLE_CTR_BIT);
 	else
 		set_l2_indirect_reg(L2PMINTENSET, 1 << idx);
@@ -154,7 +216,7 @@
 
 static void disable_intenclr(u32 idx)
 {
-	if (idx == L2CYCLE_CTR_EVENT_IDX)
+	if (idx == l2_cycle_ctr_idx)
 		set_l2_indirect_reg(L2PMINTENCLR, 1 << L2CYCLE_CTR_BIT);
 	else
 		set_l2_indirect_reg(L2PMINTENCLR, 1 << idx);
@@ -162,7 +224,7 @@
 
 static void enable_counter(u32 idx)
 {
-	if (idx == L2CYCLE_CTR_EVENT_IDX)
+	if (idx == l2_cycle_ctr_idx)
 		set_l2_indirect_reg(L2PMCNTENSET, 1 << L2CYCLE_CTR_BIT);
 	else
 		set_l2_indirect_reg(L2PMCNTENSET, 1 << idx);
@@ -170,7 +232,7 @@
 
 static void disable_counter(u32 idx)
 {
-	if (idx == L2CYCLE_CTR_EVENT_IDX)
+	if (idx == l2_cycle_ctr_idx)
 		set_l2_indirect_reg(L2PMCNTENCLR, 1 << L2CYCLE_CTR_BIT);
 	else
 		set_l2_indirect_reg(L2PMCNTENCLR, 1 << idx);
@@ -181,7 +243,7 @@
 	u32 val;
 	u32 counter_reg = (idx * 16) + IA_L2PMXEVCNTR_BASE;
 
-	if (idx == L2CYCLE_CTR_EVENT_IDX)
+	if (idx == l2_cycle_ctr_idx)
 		val = get_l2_indirect_reg(L2PMCCNTR);
 	else
 		val = get_l2_indirect_reg(counter_reg);
@@ -193,7 +255,7 @@
 {
 	u32 counter_reg = (idx * 16) + IA_L2PMXEVCNTR_BASE;
 
-	if (idx == L2CYCLE_CTR_EVENT_IDX)
+	if (idx == l2_cycle_ctr_idx)
 		set_l2_indirect_reg(L2PMCCNTR, val);
 	else
 		set_l2_indirect_reg(counter_reg, val);
@@ -212,12 +274,21 @@
 {
 	struct event_desc evdesc;
 	unsigned long iflags;
+	unsigned int is_slv = 0;
+	unsigned int evt_prefix;
 
 	raw_spin_lock_irqsave(&krait_l2_pmu_hw_events.pmu_lock, iflags);
 
 	if (hwc->config_base == L2CYCLE_CTR_RAW_CODE)
 		goto out;
 
+	/* Check if user requested any special origin filtering. */
+	evt_prefix = (hwc->config_base &
+			EVENT_PREFIX_MASK) >> EVENT_PREFIX_SHIFT;
+
+	if (evt_prefix == L2_SLAVE_EV_PREFIX)
+		is_slv = 1;
+
 	set_evcntcr(idx);
 
 	memset(&evdesc, 0, sizeof(evdesc));
@@ -230,9 +301,9 @@
 		  evdesc.event_group_code);
 
 	if (cpu < 0)
-		set_evfilter_task_mode(idx);
+		set_evfilter_task_mode(idx, is_slv);
 	else
-		set_evfilter_sys_mode(idx);
+		set_evfilter_sys_mode(idx, is_slv);
 
 out:
 	enable_intenset(idx);
@@ -264,11 +335,11 @@
 	int ctr = 0;
 
 	if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) {
-		if (!test_and_set_bit(L2CYCLE_CTR_EVENT_IDX, cpuc->used_mask))
-			return L2CYCLE_CTR_EVENT_IDX;
+		if (!test_and_set_bit(l2_cycle_ctr_idx, cpuc->used_mask))
+			return l2_cycle_ctr_idx;
 	}
 
-	for (ctr = 0; ctr < MAX_KRAIT_L2_CTRS - 1; ctr++) {
+	for (ctr = 0; ctr < total_l2_ctrs - 1; ctr++) {
 		if (!test_and_set_bit(ctr, cpuc->used_mask))
 			return ctr;
 	}
@@ -323,7 +394,7 @@
 		bitp = __ffs(pmovsr);
 
 		if (bitp == L2CYCLE_CTR_BIT)
-			idx = L2CYCLE_CTR_EVENT_IDX;
+			idx = l2_cycle_ctr_idx;
 		else
 			idx = bitp;
 
@@ -358,7 +429,7 @@
 static int krait_l2_map_event(struct perf_event *event)
 {
 	if (pmu_type > 0 && pmu_type == event->attr.type)
-		return event->attr.config & 0xfffff;
+		return event->attr.config & L2_EVT_MASK;
 	else
 		return -ENOENT;
 }
@@ -378,6 +449,63 @@
 		free_irq(irq, NULL);
 }
 
+static int msm_l2_test_set_ev_constraint(struct perf_event *event)
+{
+	u32 evt_type = event->attr.config & L2_EVT_MASK;
+	u8 reg   = (evt_type & 0x0F000) >> 12;
+	u8 group =  evt_type & 0x0000F;
+	unsigned long flags;
+	u32 err = 0;
+	u64 bitmap_t;
+
+	raw_spin_lock_irqsave(&l2_pmu_constraints.lock, flags);
+
+	bitmap_t = 1 << ((reg * 4) + group);
+
+	if (!(l2_pmu_constraints.pmu_bitmap & bitmap_t)) {
+		l2_pmu_constraints.pmu_bitmap |= bitmap_t;
+		goto out;
+	}
+
+	/* Bit is already set. Constraint failed. */
+	err = -EPERM;
+out:
+	raw_spin_unlock_irqrestore(&l2_pmu_constraints.lock, flags);
+	return err;
+}
+
+static int msm_l2_clear_ev_constraint(struct perf_event *event)
+{
+	u32 evt_type = event->attr.config & L2_EVT_MASK;
+	u8 reg   = (evt_type & 0x0F000) >> 12;
+	u8 group =  evt_type & 0x0000F;
+	unsigned long flags;
+	u64 bitmap_t;
+
+	raw_spin_lock_irqsave(&l2_pmu_constraints.lock, flags);
+
+	bitmap_t = 1 << ((reg * 4) + group);
+
+	/* Clear constraint bit. */
+	l2_pmu_constraints.pmu_bitmap &= ~bitmap_t;
+
+	raw_spin_unlock_irqrestore(&l2_pmu_constraints.lock, flags);
+	return 1;
+}
+
+int get_num_events(void)
+{
+	int val;
+
+	val = get_l2_indirect_reg(L2PMCR);
+
+	/*
+	 * Read bits 15:11 of the L2PMCR and add 1
+	 * for the cycle counter.
+	 */
+	return ((val >> PMCR_NUM_EV_SHIFT) & PMCR_NUM_EV_MASK) + 1;
+}
+
 static struct arm_pmu krait_l2_pmu = {
 	.id		=	ARM_PERF_PMU_ID_KRAIT_L2,
 	.type		=	ARM_PMU_DEVICE_L2CC,
@@ -393,16 +521,18 @@
 	.read_counter	=	krait_l2_read_counter,
 	.write_counter	=	krait_l2_write_counter,
 	.map_event	=	krait_l2_map_event,
-	.max_period	=	(1LLU << 32) - 1,
+	.max_period	=	MAX_L2_PERIOD,
 	.get_hw_events	=	krait_l2_get_hw_events,
-	.num_events	=	MAX_KRAIT_L2_CTRS,
+	.test_set_event_constraints	= msm_l2_test_set_ev_constraint,
+	.clear_event_constraints	= msm_l2_clear_ev_constraint,
+	.pmu.attr_groups		= msm_l2_pmu_attr_grps,
 };
 
 static int __devinit krait_l2_pmu_device_probe(struct platform_device *pdev)
 {
 	krait_l2_pmu.plat_device = pdev;
 
-	if (!armpmu_register(&krait_l2_pmu, "krait-l2", -1))
+	if (!armpmu_register(&krait_l2_pmu, "kraitl2", -1))
 		pmu_type = krait_l2_pmu.pmu.type;
 
 	return 0;
@@ -420,6 +550,21 @@
 	/* Reset all ctrs */
 	set_l2_indirect_reg(L2PMCR, L2PMCR_RESET_ALL);
 
+	/* Get num of counters in the L2cc PMU. */
+	total_l2_ctrs = get_num_events();
+	krait_l2_pmu.num_events	= total_l2_ctrs;
+
+	pr_info("Detected %d counters on the L2CC PMU.\n",
+			total_l2_ctrs);
+
+	/*
+	 * The L2 cycle counter index in the used_mask
+	 * bit stream is always after the other counters.
+	 * Counter indexes begin from 0 to keep it consistent
+	 * with the h/w.
+	 */
+	l2_cycle_ctr_idx = total_l2_ctrs - 1;
+
 	/* Avoid spurious interrupt if any */
 	get_reset_pmovsr();
 
diff --git a/arch/arm/mach-msm/perf_event_msm_l2.c b/arch/arm/mach-msm/perf_event_msm_l2.c
index 3310d92..aae2552 100644
--- a/arch/arm/mach-msm/perf_event_msm_l2.c
+++ b/arch/arm/mach-msm/perf_event_msm_l2.c
@@ -13,20 +13,77 @@
 #include <linux/irq.h>
 #include <asm/pmu.h>
 #include <linux/platform_device.h>
+#include <linux/spinlock.h>
 
 
-#define MAX_SCORPION_L2_CTRS 5
+#define MAX_SCORPION_L2_CTRS 10
+
 #define SCORPION_L2CYCLE_CTR_BIT 31
-#define SCORPION_L2CYCLE_CTR_EVENT_IDX 4
 #define SCORPION_L2CYCLE_CTR_RAW_CODE 0xfe
 #define SCORPIONL2_PMNC_E       (1 << 0)	/* Enable all counters */
 #define SCORPION_L2_EVT_PREFIX 3
 #define SCORPION_MAX_L2_REG 4
 
+#define L2_EVT_MASK 0xfffff
+#define L2_EVT_PREFIX_MASK 0xf0000
+#define L2_EVT_PREFIX_SHIFT 16
+#define L2_SLAVE_EVT_PREFIX 4
+
+#define PMCR_NUM_EV_SHIFT 11
+#define PMCR_NUM_EV_MASK 0x1f
+
+/*
+ * The L2 PMU is shared between all CPU's, so protect
+ * its bitmap access.
+ */
+struct pmu_constraints {
+	u64 pmu_bitmap;
+	raw_spinlock_t lock;
+} l2_pmu_constraints = {
+	.pmu_bitmap = 0,
+	.lock = __RAW_SPIN_LOCK_UNLOCKED(l2_pmu_constraints.lock),
+};
+
+/* NRCCG format for perf RAW codes. */
+PMU_FORMAT_ATTR(l2_prefix,	"config:16-19");
+PMU_FORMAT_ATTR(l2_reg,		"config:12-15");
+PMU_FORMAT_ATTR(l2_code,	"config:4-11");
+PMU_FORMAT_ATTR(l2_grp,		"config:0-3");
+
+static struct attribute *msm_l2_ev_formats[] = {
+	&format_attr_l2_prefix.attr,
+	&format_attr_l2_reg.attr,
+	&format_attr_l2_code.attr,
+	&format_attr_l2_grp.attr,
+	NULL,
+};
+
+/*
+ * Format group is essential to access PMU's from userspace
+ * via their .name field.
+ */
+static struct attribute_group msm_l2_pmu_format_group = {
+	.name = "format",
+	.attrs = msm_l2_ev_formats,
+};
+
+static const struct attribute_group *msm_l2_pmu_attr_grps[] = {
+	&msm_l2_pmu_format_group,
+	NULL,
+};
+
+static u32 total_l2_ctrs;
+static u32 l2_cycle_ctr_idx;
+
 static u32 pmu_type;
 
 static struct arm_pmu scorpion_l2_pmu;
 
+static u32 l2_orig_filter_prefix = 0x000f0030;
+
+/* L2 slave port traffic filtering */
+static u32 l2_slv_filter_prefix = 0x000f0010;
+
 static struct perf_event *l2_events[MAX_SCORPION_L2_CTRS];
 static unsigned long l2_used_mask[BITS_TO_LONGS(MAX_SCORPION_L2_CTRS)];
 
@@ -376,7 +433,8 @@
 	u8 group;
 
 	prefix = (evt_type & 0xF0000) >> 16;
-	if (prefix == SCORPION_L2_EVT_PREFIX) {
+	if (prefix == SCORPION_L2_EVT_PREFIX ||
+			prefix == L2_SLAVE_EVT_PREFIX) {
 		reg   = (evt_type & 0x0F000) >> 12;
 		code  = (evt_type & 0x00FF0) >> 4;
 		group =  evt_type & 0x0000F;
@@ -433,23 +491,29 @@
 	asm volatile ("mcr p15, 3, %0, c15, c6, 7" : : "r" (val));
 }
 
-static void scorpion_l2_set_evfilter_task_mode(void)
+static void scorpion_l2_set_evfilter_task_mode(unsigned int is_slv)
 {
-	u32 filter_val = 0x000f0030 | 1 << smp_processor_id();
+	u32 filter_val = l2_orig_filter_prefix | 1 << smp_processor_id();
+
+	if (is_slv)
+		filter_val = l2_slv_filter_prefix;
 
 	asm volatile ("mcr p15, 3, %0, c15, c6, 3" : : "r" (filter_val));
 }
 
-static void scorpion_l2_set_evfilter_sys_mode(void)
+static void scorpion_l2_set_evfilter_sys_mode(unsigned int is_slv)
 {
-	u32 filter_val = 0x000f003f;
+	u32 filter_val = l2_orig_filter_prefix | 0xf;
+
+	if (is_slv)
+		filter_val = l2_slv_filter_prefix;
 
 	asm volatile ("mcr p15, 3, %0, c15, c6, 3" : : "r" (filter_val));
 }
 
 static void scorpion_l2_enable_intenset(u32 idx)
 {
-	if (idx == SCORPION_L2CYCLE_CTR_EVENT_IDX) {
+	if (idx == l2_cycle_ctr_idx) {
 		asm volatile ("mcr p15, 3, %0, c15, c5, 1" : : "r"
 			      (1 << SCORPION_L2CYCLE_CTR_BIT));
 	} else {
@@ -459,7 +523,7 @@
 
 static void scorpion_l2_disable_intenclr(u32 idx)
 {
-	if (idx == SCORPION_L2CYCLE_CTR_EVENT_IDX) {
+	if (idx == l2_cycle_ctr_idx) {
 		asm volatile ("mcr p15, 3, %0, c15, c5, 0" : : "r"
 			      (1 << SCORPION_L2CYCLE_CTR_BIT));
 	} else {
@@ -469,7 +533,7 @@
 
 static void scorpion_l2_enable_counter(u32 idx)
 {
-	if (idx == SCORPION_L2CYCLE_CTR_EVENT_IDX) {
+	if (idx == l2_cycle_ctr_idx) {
 		asm volatile ("mcr p15, 3, %0, c15, c4, 3" : : "r"
 			      (1 << SCORPION_L2CYCLE_CTR_BIT));
 	} else {
@@ -479,7 +543,7 @@
 
 static void scorpion_l2_disable_counter(u32 idx)
 {
-	if (idx == SCORPION_L2CYCLE_CTR_EVENT_IDX) {
+	if (idx == l2_cycle_ctr_idx) {
 		asm volatile ("mcr p15, 3, %0, c15, c4, 2" : : "r"
 			      (1 << SCORPION_L2CYCLE_CTR_BIT));
 	} else {
@@ -492,7 +556,7 @@
 	u32 val;
 	unsigned long iflags;
 
-	if (idx == SCORPION_L2CYCLE_CTR_EVENT_IDX) {
+	if (idx == l2_cycle_ctr_idx) {
 		asm volatile ("mrc p15, 3, %0, c15, c4, 5" : "=r" (val));
 	} else {
 		raw_spin_lock_irqsave(&scorpion_l2_pmu_hw_events.pmu_lock,
@@ -512,7 +576,7 @@
 {
 	unsigned long iflags;
 
-	if (idx == SCORPION_L2CYCLE_CTR_EVENT_IDX) {
+	if (idx == l2_cycle_ctr_idx) {
 		asm volatile ("mcr p15, 3, %0, c15, c4, 5" : : "r" (val));
 	} else {
 		raw_spin_lock_irqsave(&scorpion_l2_pmu_hw_events.pmu_lock,
@@ -542,12 +606,21 @@
 	int evtype = hwc->config_base;
 	int ev_typer;
 	unsigned long iflags;
+	unsigned int is_slv = 0;
+	unsigned int evt_prefix;
 
 	raw_spin_lock_irqsave(&scorpion_l2_pmu_hw_events.pmu_lock, iflags);
 
 	if (hwc->config_base == SCORPION_L2CYCLE_CTR_RAW_CODE)
 		goto out;
 
+	/* Check if user requested any special origin filtering. */
+	evt_prefix = (hwc->config_base &
+			L2_EVT_PREFIX_MASK) >> L2_EVT_PREFIX_SHIFT;
+
+	if (evt_prefix == L2_SLAVE_EVT_PREFIX)
+		is_slv = 1;
+
 	memset(&evtinfo, 0, sizeof(evtinfo));
 
 	ev_typer = get_scorpion_l2_evtinfo(evtype, &evtinfo);
@@ -557,9 +630,9 @@
 	scorpion_l2_set_evcntcr();
 
 	if (cpu < 0)
-		scorpion_l2_set_evfilter_task_mode();
+		scorpion_l2_set_evfilter_task_mode(is_slv);
 	else
-		scorpion_l2_set_evfilter_sys_mode();
+		scorpion_l2_set_evfilter_sys_mode(is_slv);
 
 	scorpion_l2_evt_setup(evtinfo.grp, evtinfo.val);
 
@@ -594,12 +667,12 @@
 	int ctr = 0;
 
 	if (hwc->config_base == SCORPION_L2CYCLE_CTR_RAW_CODE) {
-		if (!test_and_set_bit(SCORPION_L2CYCLE_CTR_EVENT_IDX,
+		if (!test_and_set_bit(l2_cycle_ctr_idx,
 					cpuc->used_mask))
-			return SCORPION_L2CYCLE_CTR_EVENT_IDX;
+			return l2_cycle_ctr_idx;
 	}
 
-	for (ctr = 0; ctr < MAX_SCORPION_L2_CTRS - 1; ctr++) {
+	for (ctr = 0; ctr < total_l2_ctrs - 1; ctr++) {
 		if (!test_and_set_bit(ctr, cpuc->used_mask))
 			return ctr;
 	}
@@ -658,7 +731,7 @@
 		bitp = __ffs(pmovsr);
 
 		if (bitp == SCORPION_L2CYCLE_CTR_BIT)
-			idx = SCORPION_L2CYCLE_CTR_EVENT_IDX;
+			idx = l2_cycle_ctr_idx;
 		else
 			idx = bitp;
 
@@ -693,7 +766,7 @@
 static int scorpion_l2_map_event(struct perf_event *event)
 {
 	if (pmu_type > 0 && pmu_type == event->attr.type)
-		return event->attr.config & 0xfffff;
+		return event->attr.config & L2_EVT_MASK;
 	else
 		return -ENOENT;
 }
@@ -713,6 +786,71 @@
 		free_irq(irq, NULL);
 }
 
+static int msm_l2_test_set_ev_constraint(struct perf_event *event)
+{
+	u32 evt_type = event->attr.config & L2_EVT_MASK;
+	u8 prefix = (evt_type & 0xF0000) >> 16;
+	u8 reg   = (evt_type & 0x0F000) >> 12;
+	u8 group =  evt_type & 0x0000F;
+	unsigned long flags;
+	u32 err = 0;
+	u64 bitmap_t;
+
+	if (!prefix)
+		return 0;
+
+	raw_spin_lock_irqsave(&l2_pmu_constraints.lock, flags);
+
+	bitmap_t = 1 << ((reg * 4) + group);
+
+	if (!(l2_pmu_constraints.pmu_bitmap & bitmap_t)) {
+		l2_pmu_constraints.pmu_bitmap |= bitmap_t;
+		goto out;
+	}
+
+	/* Bit is already set. Constraint failed. */
+	err = -EPERM;
+
+out:
+	raw_spin_unlock_irqrestore(&l2_pmu_constraints.lock, flags);
+	return err;
+}
+
+static int msm_l2_clear_ev_constraint(struct perf_event *event)
+{
+	u32 evt_type = event->attr.config & L2_EVT_MASK;
+	u8 prefix = (evt_type & 0xF0000) >> 16;
+	u8 reg   = (evt_type & 0x0F000) >> 12;
+	u8 group =  evt_type & 0x0000F;
+	unsigned long flags;
+	u64 bitmap_t;
+
+	if (!prefix)
+		return 0;
+
+	raw_spin_lock_irqsave(&l2_pmu_constraints.lock, flags);
+
+	bitmap_t = 1 << ((reg * 4) + group);
+
+	/* Clear constraint bit. */
+	l2_pmu_constraints.pmu_bitmap &= ~bitmap_t;
+
+	raw_spin_unlock_irqrestore(&l2_pmu_constraints.lock, flags);
+	return 1;
+}
+
+static int get_num_events(void)
+{
+	int val;
+
+	val = scorpion_l2_pmnc_read();
+	/*
+	 * Read bits 15:11 of the L2PMCR and add 1
+	 * for the cycle counter.
+	 */
+	return ((val >> PMCR_NUM_EV_SHIFT) & PMCR_NUM_EV_MASK) + 1;
+}
+
 static struct arm_pmu scorpion_l2_pmu = {
 	.id		=	ARM_PERF_PMU_ID_SCORPIONMP_L2,
 	.type		=	ARM_PMU_DEVICE_L2CC,
@@ -730,14 +868,16 @@
 	.map_event	=	scorpion_l2_map_event,
 	.max_period	=	(1LLU << 32) - 1,
 	.get_hw_events	=	scorpion_l2_get_hw_events,
-	.num_events	=	MAX_SCORPION_L2_CTRS,
+	.test_set_event_constraints	= msm_l2_test_set_ev_constraint,
+	.clear_event_constraints	= msm_l2_clear_ev_constraint,
+	.pmu.attr_groups		= msm_l2_pmu_attr_grps,
 };
 
 static int __devinit scorpion_l2_pmu_device_probe(struct platform_device *pdev)
 {
 	scorpion_l2_pmu.plat_device = pdev;
 
-	if (!armpmu_register(&scorpion_l2_pmu, "scorpion-l2", -1))
+	if (!armpmu_register(&scorpion_l2_pmu, "scorpionl2", -1))
 		pmu_type = scorpion_l2_pmu.pmu.type;
 
 	return 0;
@@ -755,6 +895,20 @@
 	/* Avoid spurious interrupt if any */
 	scorpion_l2_get_reset_pmovsr();
 
+	total_l2_ctrs = get_num_events();
+	scorpion_l2_pmu.num_events = total_l2_ctrs;
+
+	pr_info("Detected %d counters on the L2CC PMU.\n",
+			total_l2_ctrs);
+
+	/*
+	 * The L2 cycle counter index in the used_mask
+	 * bit stream is always after the other counters.
+	 * Counter indexes begin from 0 to keep it consistent
+	 * with the h/w.
+	 */
+	l2_cycle_ctr_idx = total_l2_ctrs - 1;
+
 	return platform_driver_register(&scorpion_l2_pmu_driver);
 }
 device_initcall(register_scorpion_l2_pmu_driver);
diff --git a/arch/arm/mach-msm/peripheral-loader.c b/arch/arm/mach-msm/peripheral-loader.c
index bfbf4bc..3f6eb95 100644
--- a/arch/arm/mach-msm/peripheral-loader.c
+++ b/arch/arm/mach-msm/peripheral-loader.c
@@ -114,11 +114,15 @@
 
 static int pil_proxy_vote(struct pil_device *pil)
 {
+	int ret = 0;
+
 	if (pil->desc->ops->proxy_vote) {
 		wake_lock(&pil->wlock);
-		return pil->desc->ops->proxy_vote(pil->desc);
+		ret = pil->desc->ops->proxy_vote(pil->desc);
+		if (ret)
+			wake_unlock(&pil->wlock);
 	}
-	return 0;
+	return ret;
 }
 
 static void pil_proxy_unvote(struct pil_device *pil, unsigned long timeout)
@@ -226,7 +230,7 @@
 
 static int segment_is_loadable(const struct elf32_phdr *p)
 {
-	return (p->p_type & PT_LOAD) && !segment_is_hash(p->p_flags);
+	return (p->p_type == PT_LOAD) && !segment_is_hash(p->p_flags);
 }
 
 /* Sychronize request_firmware() with suspend */
diff --git a/arch/arm/mach-msm/pil-gss.c b/arch/arm/mach-msm/pil-gss.c
index dc7baa1..73248db 100644
--- a/arch/arm/mach-msm/pil-gss.c
+++ b/arch/arm/mach-msm/pil-gss.c
@@ -356,6 +356,8 @@
 		desc->ops = &pil_gss_ops;
 		dev_info(&pdev->dev, "using non-secure boot\n");
 	}
+	/* Force into low power mode because hardware doesn't do this */
+	desc->ops->shutdown(desc);
 
 	drv->pil = msm_pil_register(desc);
 	if (IS_ERR(drv->pil)) {
diff --git a/arch/arm/mach-msm/pil-venus.c b/arch/arm/mach-msm/pil-venus.c
new file mode 100644
index 0000000..6a0aeaa
--- /dev/null
+++ b/arch/arm/mach-msm/pil-venus.c
@@ -0,0 +1,464 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/elf.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <asm/page.h>
+#include <asm/sizes.h>
+
+#include <mach/iommu.h>
+#include <mach/iommu_domains.h>
+
+#include "peripheral-loader.h"
+#include "scm-pas.h"
+
+/* VENUS WRAPPER registers */
+#define VENUS_WRAPPER_CLOCK_CONFIG			0x4
+#define VENUS_WRAPPER_VBIF_SS_SEC_CPA_START_ADDR	0x1018
+#define VENUS_WRAPPER_VBIF_SS_SEC_CPA_END_ADDR		0x101C
+#define VENUS_WRAPPER_VBIF_SS_SEC_FW_START_ADDR		0x1020
+#define VENUS_WRAPPER_VBIF_SS_SEC_FW_END_ADDR		0x1024
+#define VENUS_WRAPPER_CPU_CLOCK_CONFIG			0x2000
+#define VENUS_WRAPPER_SW_RESET				0x3000
+
+/* VENUS VBIF registers */
+#define VENUS_VBIF_AXI_HALT_CTRL0			0x0
+#define VENUS_VBIF_AXI_HALT_CTRL0_HALT_REQ		BIT(0)
+
+#define VENUS_VBIF_AXI_HALT_CTRL1			0x4
+#define VENUS_VBIF_AXI_HALT_CTRL1_HALT_ACK		BIT(0)
+#define VENUS_VBIF_AXI_HALT_ACK_TIMEOUT_US		500000
+
+/* PIL proxy vote timeout */
+#define VENUS_PROXY_TIMEOUT				10000
+
+/* Poll interval in uS */
+#define POLL_INTERVAL_US				50
+
+static const char * const clk_names[] = {
+	"core_clk",
+	"iface_clk",
+	"bus_clk",
+	"mem_clk",
+};
+
+struct venus_data {
+	void __iomem *venus_wrapper_base;
+	void __iomem *venus_vbif_base;
+	struct pil_device *pil;
+	struct regulator *gdsc;
+	phys_addr_t start_addr;
+	struct clk *clks[ARRAY_SIZE(clk_names)];
+	struct device *iommu_fw_ctx;
+	struct iommu_domain *iommu_fw_domain;
+	int venus_domain_num;
+	bool is_booted;
+	u32 fw_sz;
+	u32 fw_min_paddr;
+	u32 fw_max_paddr;
+};
+
+static int venus_register_domain(u32 fw_max_sz)
+{
+	struct msm_iova_partition venus_fw_partition = {
+		.start = 0,
+		.size = fw_max_sz,
+	};
+	struct msm_iova_layout venus_fw_layout = {
+		.partitions = &venus_fw_partition,
+		.npartitions = 1,
+		.client_name = "pil_venus",
+		.domain_flags = 0,
+	};
+
+	return msm_register_domain(&venus_fw_layout);
+}
+
+/* Get venus clocks and set rates for rate-settable clocks */
+static int venus_clock_setup(struct device *dev)
+{
+	struct venus_data *drv = dev_get_drvdata(dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(drv->clks); i++) {
+		drv->clks[i] = devm_clk_get(dev, clk_names[i]);
+		if (IS_ERR(drv->clks[i])) {
+			dev_err(dev, "failed to get %s\n",
+				clk_names[i]);
+			return PTR_ERR(drv->clks[i]);
+		}
+		/* Make sure rate-settable clocks' rates are set */
+		if (clk_get_rate(drv->clks[i]) == 0)
+			clk_set_rate(drv->clks[i],
+				     clk_round_rate(drv->clks[i], 0));
+	}
+
+	return 0;
+}
+
+static int venus_clock_prepare_enable(struct device *dev)
+{
+	struct venus_data *drv = dev_get_drvdata(dev);
+	int rc, i;
+
+	for (i = 0; i < ARRAY_SIZE(drv->clks); i++) {
+		rc = clk_prepare_enable(drv->clks[i]);
+		if (rc) {
+			dev_err(dev, "failed to enable %s\n",
+				clk_names[i]);
+			for (i--; i >= 0; i--)
+				clk_disable_unprepare(drv->clks[i]);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static void venus_clock_disable_unprepare(struct device *dev)
+{
+	struct venus_data *drv = dev_get_drvdata(dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(drv->clks); i++)
+		clk_disable_unprepare(drv->clks[i]);
+}
+
+static int pil_venus_make_proxy_vote(struct pil_desc *pil)
+{
+	struct venus_data *drv = dev_get_drvdata(pil->dev);
+	int rc;
+
+	/*
+	 * Clocks need to be proxy voted to be able to pass control
+	 * of clocks from PIL driver to the Venus driver. But GDSC
+	 * needs to be turned on before clocks can be turned on. So
+	 * enable the GDSC here.
+	 */
+	rc = regulator_enable(drv->gdsc);
+	if (rc) {
+		dev_err(pil->dev, "GDSC enable failed\n");
+		return rc;
+	}
+
+	rc = venus_clock_prepare_enable(pil->dev);
+	if (rc)
+		regulator_disable(drv->gdsc);
+
+	return rc;
+}
+
+static void pil_venus_remove_proxy_vote(struct pil_desc *pil)
+{
+	struct venus_data *drv = dev_get_drvdata(pil->dev);
+
+	venus_clock_disable_unprepare(pil->dev);
+
+	/* Disable GDSC */
+	regulator_disable(drv->gdsc);
+}
+
+static int pil_venus_init_image(struct pil_desc *pil, const u8 *metadata,
+		size_t size)
+{
+	const struct elf32_hdr *ehdr = (struct elf32_hdr *)metadata;
+	struct venus_data *drv = dev_get_drvdata(pil->dev);
+
+	drv->start_addr = ehdr->e_entry;
+
+	if (drv->start_addr < drv->fw_min_paddr ||
+	    drv->start_addr >= drv->fw_max_paddr) {
+		dev_err(pil->dev, "fw start addr is not within valid range\n");
+		return -EINVAL;
+	}
+
+	drv->fw_sz = drv->fw_max_paddr - drv->start_addr;
+
+	return 0;
+}
+
+static int pil_venus_reset(struct pil_desc *pil)
+{
+	int rc;
+	struct venus_data *drv = dev_get_drvdata(pil->dev);
+	void __iomem *wrapper_base = drv->venus_wrapper_base;
+	phys_addr_t pa = drv->start_addr;
+	unsigned long iova;
+
+	/*
+	 * GDSC needs to remain on till Venus is shutdown. So, enable
+	 * the GDSC here again to make sure it remains on beyond the
+	 * expiry of the proxy vote timer.
+	 */
+	rc = regulator_enable(drv->gdsc);
+	if (rc) {
+		dev_err(pil->dev, "GDSC enable failed\n");
+		return rc;
+	}
+
+	/* Program CPA start and end address */
+	writel_relaxed(0, wrapper_base +
+			VENUS_WRAPPER_VBIF_SS_SEC_CPA_START_ADDR);
+	writel_relaxed(drv->fw_sz, wrapper_base +
+			VENUS_WRAPPER_VBIF_SS_SEC_CPA_END_ADDR);
+
+	/* Program FW start and end address */
+	writel_relaxed(0, wrapper_base +
+			VENUS_WRAPPER_VBIF_SS_SEC_FW_START_ADDR);
+	writel_relaxed(drv->fw_sz, wrapper_base +
+			VENUS_WRAPPER_VBIF_SS_SEC_FW_END_ADDR);
+
+	rc = iommu_attach_device(drv->iommu_fw_domain, drv->iommu_fw_ctx);
+	if (rc) {
+		dev_err(pil->dev, "venus fw iommu attach failed\n");
+		goto err_iommu_attach;
+	}
+
+	/* Enable all Venus internal clocks */
+	writel_relaxed(0, wrapper_base + VENUS_WRAPPER_CLOCK_CONFIG);
+	writel_relaxed(0, wrapper_base + VENUS_WRAPPER_CPU_CLOCK_CONFIG);
+
+	/* Make sure clocks are enabled */
+	mb();
+
+	/*
+	 * Need to wait 10 cycles of internal clocks before bringing ARM9
+	 * out of reset.
+	 */
+	udelay(1);
+
+	/* Map virtual addr space 0 - fw_sz to firmware physical addr space */
+	rc = msm_iommu_map_contig_buffer(pa, drv->venus_domain_num, 0,
+					 drv->fw_sz, SZ_4K, 0, &iova);
+
+	if (rc || (iova != 0)) {
+		dev_err(pil->dev, "Failed to setup IOMMU\n");
+		goto err_iommu_map;
+	}
+
+	/* Bring Arm9 out of reset */
+	writel_relaxed(0, wrapper_base + VENUS_WRAPPER_SW_RESET);
+
+	drv->is_booted = 1;
+
+	return 0;
+
+err_iommu_map:
+	iommu_detach_device(drv->iommu_fw_domain, drv->iommu_fw_ctx);
+
+err_iommu_attach:
+	regulator_disable(drv->gdsc);
+
+	return rc;
+}
+
+static int pil_venus_shutdown(struct pil_desc *pil)
+{
+	struct venus_data *drv = dev_get_drvdata(pil->dev);
+	void __iomem *vbif_base = drv->venus_vbif_base;
+	void __iomem *wrapper_base = drv->venus_wrapper_base;
+	u32 reg;
+	int rc;
+
+	if (!drv->is_booted)
+		return 0;
+
+	venus_clock_prepare_enable(pil->dev);
+
+	/* Halt AXI and AXI OCMEM VBIF Access */
+	reg = readl_relaxed(vbif_base + VENUS_VBIF_AXI_HALT_CTRL0);
+	reg |= VENUS_VBIF_AXI_HALT_CTRL0_HALT_REQ;
+	writel_relaxed(reg, vbif_base + VENUS_VBIF_AXI_HALT_CTRL0);
+
+	/* Request for AXI bus port halt */
+	rc = readl_poll_timeout(vbif_base + VENUS_VBIF_AXI_HALT_CTRL1,
+			reg, reg & VENUS_VBIF_AXI_HALT_CTRL1_HALT_ACK,
+			POLL_INTERVAL_US,
+			VENUS_VBIF_AXI_HALT_ACK_TIMEOUT_US);
+	if (rc)
+		dev_err(pil->dev, "Port halt timeout\n");
+
+	/* Assert the reset to ARM9 */
+	reg = readl_relaxed(wrapper_base + VENUS_WRAPPER_SW_RESET);
+	reg |= BIT(4);
+	writel_relaxed(reg, wrapper_base + VENUS_WRAPPER_SW_RESET);
+
+	/* Make sure reset is asserted before the mapping is removed */
+	mb();
+
+	msm_iommu_unmap_contig_buffer(0, drv->venus_domain_num,
+				      0, drv->fw_sz);
+
+	iommu_detach_device(drv->iommu_fw_domain, drv->iommu_fw_ctx);
+
+	venus_clock_disable_unprepare(pil->dev);
+
+	regulator_disable(drv->gdsc);
+
+	drv->is_booted = 0;
+
+	return 0;
+}
+
+static struct pil_reset_ops pil_venus_ops = {
+	.init_image = pil_venus_init_image,
+	.auth_and_reset = pil_venus_reset,
+	.shutdown = pil_venus_shutdown,
+	.proxy_vote = pil_venus_make_proxy_vote,
+	.proxy_unvote = pil_venus_remove_proxy_vote,
+};
+
+static int __devinit pil_venus_probe(struct platform_device *pdev)
+{
+	struct venus_data *drv;
+	struct resource *res;
+	struct pil_desc *desc;
+	int rc;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -EINVAL;
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+	if (!drv)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, drv);
+
+	drv->venus_wrapper_base = devm_ioremap(&pdev->dev, res->start,
+					resource_size(res));
+	if (!drv->venus_wrapper_base)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!res)
+		return -EINVAL;
+
+	drv->venus_vbif_base = devm_ioremap(&pdev->dev, res->start,
+					resource_size(res));
+	if (!drv->venus_vbif_base)
+		return -ENOMEM;
+
+	drv->gdsc = devm_regulator_get(&pdev->dev, "vdd");
+	if (IS_ERR(drv->gdsc)) {
+		dev_err(&pdev->dev, "Failed to get Venus GDSC\n");
+		return -ENODEV;
+	}
+
+	rc = venus_clock_setup(&pdev->dev);
+	if (rc)
+		return rc;
+
+	drv->iommu_fw_ctx  = msm_iommu_get_ctx("venus_fw");
+	if (!drv->iommu_fw_ctx) {
+		dev_err(&pdev->dev, "No iommu fw context found\n");
+		return -ENODEV;
+	}
+
+	/* Get fw address boundaries */
+	rc = of_property_read_u32(pdev->dev.of_node,
+				  "qcom,firmware-max-paddr",
+				  &drv->fw_max_paddr);
+	if (rc) {
+		dev_err(&pdev->dev, "Failed to get fw max paddr\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+				  "qcom,firmware-min-paddr",
+				  &drv->fw_min_paddr);
+	if (rc) {
+		dev_err(&pdev->dev, "Failed to get fw min paddr\n");
+		return rc;
+	}
+
+	if (drv->fw_max_paddr <= drv->fw_min_paddr) {
+		dev_err(&pdev->dev, "Invalid fw max paddr or min paddr\n");
+		return -EINVAL;
+	}
+
+	drv->venus_domain_num =
+		venus_register_domain(drv->fw_max_paddr - drv->fw_min_paddr);
+	if (drv->venus_domain_num < 0) {
+		dev_err(&pdev->dev, "Venus fw iommu domain register failed\n");
+		return -ENODEV;
+	}
+
+	drv->iommu_fw_domain = msm_get_iommu_domain(drv->venus_domain_num);
+	if (!drv->iommu_fw_domain) {
+		dev_err(&pdev->dev, "No iommu fw domain found\n");
+		return -ENODEV;
+	}
+
+	desc = devm_kzalloc(&pdev->dev, sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+
+	rc = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
+				      &desc->name);
+	if (rc)
+		return rc;
+
+	desc->dev = &pdev->dev;
+	desc->owner = THIS_MODULE;
+	desc->proxy_timeout = VENUS_PROXY_TIMEOUT;
+
+	/* TODO: need to add secure boot when the support is available */
+	desc->ops = &pil_venus_ops;
+	dev_info(&pdev->dev, "using non-secure boot\n");
+
+	drv->pil = msm_pil_register(desc);
+	if (IS_ERR(drv->pil))
+		return PTR_ERR(drv->pil);
+
+	return 0;
+}
+
+static int __devexit pil_venus_remove(struct platform_device *pdev)
+{
+	struct venus_data *drv = platform_get_drvdata(pdev);
+	msm_pil_unregister(drv->pil);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id msm_pil_venus_match[] = {
+	{.compatible = "qcom,pil-venus"},
+	{}
+};
+#endif
+
+static struct platform_driver pil_venus_driver = {
+	.probe = pil_venus_probe,
+	.remove = __devexit_p(pil_venus_remove),
+	.driver = {
+		.name = "pil_venus",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(msm_pil_venus_match),
+	},
+};
+
+module_platform_driver(pil_venus_driver);
+
+MODULE_DESCRIPTION("Support for booting VENUS processors");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/platsmp-8625.c b/arch/arm/mach-msm/platsmp-8625.c
index b31d94b..700f966 100644
--- a/arch/arm/mach-msm/platsmp-8625.c
+++ b/arch/arm/mach-msm/platsmp-8625.c
@@ -18,6 +18,7 @@
 #include <linux/smp.h>
 #include <linux/io.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 
 #include <asm/cacheflush.h>
 #include <asm/hardware/gic.h>
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index 49e63aa..428b998 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -91,7 +91,7 @@
 	if (machine_is_apq8064_sim())
 		writel_relaxed(0xf0000, base_ptr+0x04);
 
-	if (machine_is_copper_sim()) {
+	if (machine_is_msm8974_sim()) {
 		writel_relaxed(0x800, base_ptr+0x04);
 		writel_relaxed(0x3FFF, base_ptr+0x14);
 	}
@@ -139,10 +139,11 @@
 	    machine_is_apq8064_sim())
 		return krait_release_secondary_sim(0x02088000, cpu);
 
-	if (machine_is_copper_sim())
+	if (machine_is_msm8974_sim())
 		return krait_release_secondary_sim(0xf9088000, cpu);
 
-	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_apq8064())
+	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_apq8064() ||
+	    cpu_is_msm8627())
 		return krait_release_secondary(0x02088000, cpu);
 
 	WARN(1, "unknown CPU case in release_secondary\n");
diff --git a/arch/arm/mach-msm/pm-8x60.c b/arch/arm/mach-msm/pm-8x60.c
index 14e6f67..595484e 100644
--- a/arch/arm/mach-msm/pm-8x60.c
+++ b/arch/arm/mach-msm/pm-8x60.c
@@ -568,7 +568,7 @@
 
 static void msm_pm_qtimer_available(void)
 {
-	if (machine_is_copper())
+	if (machine_is_msm8974())
 		msm_pm_use_qtimer = true;
 }
 
@@ -1040,8 +1040,6 @@
 	msm_pm_mode_sysfs_add();
 	msm_pm_add_stats(enable_stats, ARRAY_SIZE(enable_stats));
 
-	msm_spm_allow_x_cpu_set_vdd(false);
-
 	suspend_set_ops(&msm_pm_ops);
 	msm_pm_qtimer_available();
 	msm_cpuidle_init();
diff --git a/arch/arm/mach-msm/qdsp5/adsp.c b/arch/arm/mach-msm/qdsp5/adsp.c
index b485058..eee7e37 100644
--- a/arch/arm/mach-msm/qdsp5/adsp.c
+++ b/arch/arm/mach-msm/qdsp5/adsp.c
@@ -1262,7 +1262,7 @@
 			clk_set_rate(mod->clk, adsp_info.module[i].clk_rate);
 		mod->verify_cmd = adsp_info.module[i].verify_cmd;
 		mod->patch_event = adsp_info.module[i].patch_event;
-		INIT_HLIST_HEAD(&mod->pmem_regions);
+		INIT_HLIST_HEAD(&mod->ion_regions);
 		mod->pdev.name = adsp_info.module[i].pdev_name;
 		mod->pdev.id = -1;
 		adsp_info.id_to_module[i] = mod;
diff --git a/arch/arm/mach-msm/qdsp5/adsp.h b/arch/arm/mach-msm/qdsp5/adsp.h
index 0f16111..06e2f22 100644
--- a/arch/arm/mach-msm/qdsp5/adsp.h
+++ b/arch/arm/mach-msm/qdsp5/adsp.h
@@ -20,12 +20,16 @@
 
 #include <linux/types.h>
 #include <linux/msm_adsp.h>
+#include <linux/ion.h>
 #include <mach/msm_rpcrouter.h>
 #include <mach/msm_adsp.h>
 
 int adsp_pmem_fixup(struct msm_adsp_module *module, void **addr,
 		    unsigned long len);
-int adsp_pmem_fixup_kvaddr(struct msm_adsp_module *module, void **addr,
+int adsp_ion_do_cache_op(struct msm_adsp_module *module, void *addr,
+			void *paddr, unsigned long len,
+			unsigned long offset, int cmd);
+int adsp_ion_fixup_kvaddr(struct msm_adsp_module *module, void **addr,
 			   unsigned long *kvaddr, unsigned long len,
 			   struct file **filp, unsigned long *offset);
 int adsp_pmem_paddr_fixup(struct msm_adsp_module *module, void **addr);
@@ -276,8 +280,8 @@
 	struct clk *clk;
 	int open_count;
 
-	struct mutex pmem_regions_lock;
-	struct hlist_head pmem_regions;
+	struct mutex ion_regions_lock;
+	struct hlist_head ion_regions;
 	int (*verify_cmd) (struct msm_adsp_module*, unsigned int, void *,
 			   size_t);
 	int (*patch_event) (struct msm_adsp_module*, struct adsp_event *);
diff --git a/arch/arm/mach-msm/qdsp5/adsp_driver.c b/arch/arm/mach-msm/qdsp5/adsp_driver.c
index 6860d84..9d261ae 100644
--- a/arch/arm/mach-msm/qdsp5/adsp_driver.c
+++ b/arch/arm/mach-msm/qdsp5/adsp_driver.c
@@ -1,7 +1,7 @@
 /* arch/arm/mach-msm/qdsp5/adsp_driver.c
  *
  * Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2009, 2012 Code Aurora Forum. All rights reserved.
  * Author: Iliyan Malchev <ibm@android.com>
  *
  * This software is licensed under the terms of the GNU General Public
@@ -23,25 +23,27 @@
 #include <linux/uaccess.h>
 #include <linux/slab.h>
 #include <linux/module.h>
-
 #include "adsp.h"
-
 #include <linux/msm_adsp.h>
 #include <linux/android_pmem.h>
 #include <mach/debug_mm.h>
 
-struct adsp_pmem_info {
+struct adsp_ion_info {
 	int fd;
 	void *vaddr;
 };
 
-struct adsp_pmem_region {
+struct adsp_ion_region {
 	struct hlist_node list;
 	void *vaddr;
 	unsigned long paddr;
 	unsigned long kvaddr;
 	unsigned long len;
+	unsigned long ion_flag;
 	struct file *file;
+	struct ion_handle *handle;
+	struct ion_client *client;
+	int fd;
 };
 
 struct adsp_device {
@@ -90,14 +92,14 @@
 	res;							\
 })
 
-static int adsp_pmem_check(struct msm_adsp_module *module,
+static int adsp_ion_check(struct msm_adsp_module *module,
 		void *vaddr, unsigned long len)
 {
-	struct adsp_pmem_region *region_elt;
+	struct adsp_ion_region *region_elt;
 	struct hlist_node *node;
-	struct adsp_pmem_region t = { .vaddr = vaddr, .len = len };
+	struct adsp_ion_region t = { .vaddr = vaddr, .len = len };
 
-	hlist_for_each_entry(region_elt, node, &module->pmem_regions, list) {
+	hlist_for_each_entry(region_elt, node, &module->ion_regions, list) {
 		if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) ||
 		    OVERLAPS(region_elt, &t)) {
 			MM_ERR("module %s:"
@@ -116,58 +118,109 @@
 	return 0;
 }
 
-static int adsp_pmem_add(struct msm_adsp_module *module,
-			 struct adsp_pmem_info *info)
+static int get_ion_region_info(int fd, struct adsp_ion_region *region)
 {
-	unsigned long paddr, kvaddr, len;
-	struct file *file;
-	struct adsp_pmem_region *region;
+	unsigned long ionflag;
+	void *temp_ptr;
 	int rc = -EINVAL;
 
-	mutex_lock(&module->pmem_regions_lock);
-	region = kmalloc(sizeof(*region), GFP_KERNEL);
+	region->client = msm_ion_client_create(UINT_MAX, "Video_Client");
+	if (IS_ERR_OR_NULL(region->client)) {
+		pr_err("Unable to create ION client\n");
+		goto client_error;
+	}
+	region->handle = ion_import_dma_buf(region->client, fd);
+	if (IS_ERR_OR_NULL(region->handle)) {
+		pr_err("%s: could not get handle of the given fd\n", __func__);
+		goto import_error;
+	}
+	rc = ion_handle_get_flags(region->client, region->handle, &ionflag);
+	if (rc) {
+		pr_err("%s: could not get flags for the handle\n", __func__);
+		goto flag_error;
+	}
+	temp_ptr = ion_map_kernel(region->client, region->handle, ionflag);
+	if (IS_ERR_OR_NULL(temp_ptr)) {
+		pr_err("%s: could not get virtual address\n", __func__);
+		goto map_error;
+	}
+	region->kvaddr = (unsigned long) temp_ptr;
+	region->ion_flag = (unsigned long) ionflag;
+
+	rc = ion_phys(region->client, region->handle, &region->paddr,
+					(size_t *)(&region->len));
+	if (rc) {
+		pr_err("%s: could not get physical address\n", __func__);
+		goto ion_error;
+	}
+	return rc;
+ion_error:
+	ion_unmap_kernel(region->client, region->handle);
+map_error:
+	ion_free(region->client, region->handle);
+flag_error:
+import_error:
+	ion_client_destroy(region->client);
+client_error:
+	return -EINVAL;
+}
+
+static void free_ion_region(struct ion_client *client,
+			struct ion_handle *handle)
+{
+	ion_unmap_kernel(client, handle);
+	ion_free(client, handle);
+	ion_client_destroy(client);
+}
+
+static int adsp_ion_add(struct msm_adsp_module *module,
+			 struct adsp_ion_info *info)
+{
+	struct adsp_ion_region *region;
+	int rc = -EINVAL;
+	mutex_lock(&module->ion_regions_lock);
+	region = kmalloc(sizeof(struct adsp_ion_region), GFP_KERNEL);
 	if (!region) {
 		rc = -ENOMEM;
 		goto end;
 	}
 	INIT_HLIST_NODE(&region->list);
-	if (get_pmem_file(info->fd, &paddr, &kvaddr, &len, &file)) {
+	if (get_ion_region_info(info->fd, region)) {
 		kfree(region);
 		goto end;
 	}
 
-	rc = adsp_pmem_check(module, info->vaddr, len);
+	rc = adsp_ion_check(module, info->vaddr, region->len);
 	if (rc < 0) {
-		put_pmem_file(file);
+		free_ion_region(region->client, region->handle);
 		kfree(region);
 		goto end;
 	}
-
 	region->vaddr = info->vaddr;
-	region->paddr = paddr;
-	region->kvaddr = kvaddr;
-	region->len = len;
-	region->file = file;
-
-	hlist_add_head(&region->list, &module->pmem_regions);
+	region->fd = info->fd;
+	region->file = NULL;
+	MM_INFO("adsp_ion_add: module %s: fd %d, vaddr Ox%x, len %d\n",
+			module->name, region->fd, (unsigned int)region->vaddr,
+			(int)region->len);
+	hlist_add_head(&region->list, &module->ion_regions);
 end:
-	mutex_unlock(&module->pmem_regions_lock);
+	mutex_unlock(&module->ion_regions_lock);
 	return rc;
 }
 
-static int adsp_pmem_lookup_vaddr(struct msm_adsp_module *module, void **addr,
-		     unsigned long len, struct adsp_pmem_region **region)
+static int adsp_ion_lookup_vaddr(struct msm_adsp_module *module, void **addr,
+		     unsigned long len, struct adsp_ion_region **region)
 {
 	struct hlist_node *node;
 	void *vaddr = *addr;
-	struct adsp_pmem_region *region_elt;
+	struct adsp_ion_region *region_elt;
 
 	int match_count = 0;
 
 	*region = NULL;
 
 	/* returns physical address or zero */
-	hlist_for_each_entry(region_elt, node, &module->pmem_regions, list) {
+	hlist_for_each_entry(region_elt, node, &module->ion_regions, list) {
 		if (vaddr >= region_elt->vaddr &&
 		    vaddr < region_elt->vaddr + region_elt->len &&
 		    vaddr + len <= region_elt->vaddr + region_elt->len) {
@@ -186,7 +239,7 @@
 			"multiple hits for vaddr %p, len %ld\n",
 			module->name, vaddr, len);
 		hlist_for_each_entry(region_elt, node,
-				&module->pmem_regions, list) {
+				&module->ion_regions, list) {
 			if (vaddr >= region_elt->vaddr &&
 			    vaddr < region_elt->vaddr + region_elt->len &&
 			    vaddr + len <= region_elt->vaddr + region_elt->len)
@@ -200,16 +253,37 @@
 	return *region ? 0 : -1;
 }
 
-int adsp_pmem_fixup_kvaddr(struct msm_adsp_module *module, void **addr,
+int adsp_ion_do_cache_op(struct msm_adsp_module *module,
+				void *addr, void *paddr, unsigned long len,
+				unsigned long offset, int cmd)
+{
+	struct adsp_ion_region   *region;
+	void *vaddr = addr;
+	int ret;
+	ret = adsp_ion_lookup_vaddr(module, &vaddr, len, &region);
+	if (ret) {
+		MM_ERR("not patching %s (paddr & kvaddr)," \
+			" lookup (%p, %ld) failed\n",
+			module->name, vaddr, len);
+		return ret;
+	}
+	if ((region->ion_flag == CACHED) && region->handle) {
+		len = ((((len) + 31) & (~31)) + 32);
+		ret = msm_ion_do_cache_op(region->client, region->handle,
+				(void *)paddr, len, cmd);
+	}
+	return ret;
+}
+int adsp_ion_fixup_kvaddr(struct msm_adsp_module *module, void **addr,
 			   unsigned long *kvaddr, unsigned long len,
 			   struct file **filp, unsigned long *offset)
 {
-	struct adsp_pmem_region *region;
+	struct adsp_ion_region *region;
 	void *vaddr = *addr;
 	unsigned long *paddr = (unsigned long *)addr;
 	int ret;
 
-	ret = adsp_pmem_lookup_vaddr(module, addr, len, &region);
+	ret = adsp_ion_lookup_vaddr(module, addr, len, &region);
 	if (ret) {
 		MM_ERR("not patching %s (paddr & kvaddr),"
 			" lookup (%p, %ld) failed\n",
@@ -228,12 +302,12 @@
 int adsp_pmem_fixup(struct msm_adsp_module *module, void **addr,
 		    unsigned long len)
 {
-	struct adsp_pmem_region *region;
+	struct adsp_ion_region *region;
 	void *vaddr = *addr;
 	unsigned long *paddr = (unsigned long *)addr;
 	int ret;
 
-	ret = adsp_pmem_lookup_vaddr(module, addr, len, &region);
+	ret = adsp_ion_lookup_vaddr(module, addr, len, &region);
 	if (ret) {
 		MM_ERR("not patching %s, lookup (%p, %ld) failed\n",
 			module->name, vaddr, len);
@@ -281,7 +355,7 @@
 		goto end;
 	}
 
-	mutex_lock(&adev->module->pmem_regions_lock);
+	mutex_lock(&adev->module->ion_regions_lock);
 	if (adsp_verify_cmd(adev->module, cmd.queue, cmd_data, cmd.len)) {
 		MM_ERR("module %s: verify failed.\n", adev->module->name);
 		rc = -EINVAL;
@@ -291,7 +365,7 @@
 	wmb();
 	rc = msm_adsp_write(adev->module, cmd.queue, cmd_data, cmd.len);
 end:
-	mutex_unlock(&adev->module->pmem_regions_lock);
+	mutex_unlock(&adev->module->ion_regions_lock);
 
 	if (cmd.len > 256)
 		kfree(cmd_data);
@@ -309,14 +383,14 @@
 	return yes || adev->abort;
 }
 
-static int adsp_pmem_lookup_paddr(struct msm_adsp_module *module, void **addr,
-		     struct adsp_pmem_region **region)
+static int adsp_ion_lookup_paddr(struct msm_adsp_module *module, void **addr,
+		     struct adsp_ion_region **region)
 {
 	struct hlist_node *node;
 	unsigned long paddr = (unsigned long)(*addr);
-	struct adsp_pmem_region *region_elt;
+	struct adsp_ion_region *region_elt;
 
-	hlist_for_each_entry(region_elt, node, &module->pmem_regions, list) {
+	hlist_for_each_entry(region_elt, node, &module->ion_regions, list) {
 		if (paddr >= region_elt->paddr &&
 		    paddr < region_elt->paddr + region_elt->len) {
 			*region = region_elt;
@@ -328,12 +402,12 @@
 
 int adsp_pmem_paddr_fixup(struct msm_adsp_module *module, void **addr)
 {
-	struct adsp_pmem_region *region;
+	struct adsp_ion_region *region;
 	unsigned long paddr = (unsigned long)(*addr);
 	unsigned long *vaddr = (unsigned long *)addr;
 	int ret;
 
-	ret = adsp_pmem_lookup_paddr(module, addr, &region);
+	ret = adsp_ion_lookup_paddr(module, addr, &region);
 	if (ret) {
 		MM_ERR("not patching %s, paddr %p lookup failed\n",
 			module->name, vaddr);
@@ -429,20 +503,23 @@
 	return rc;
 }
 
-static int adsp_pmem_del(struct msm_adsp_module *module)
+static int adsp_ion_del(struct msm_adsp_module *module)
 {
 	struct hlist_node *node, *tmp;
-	struct adsp_pmem_region *region;
+	struct adsp_ion_region *region;
 
-	mutex_lock(&module->pmem_regions_lock);
-	hlist_for_each_safe(node, tmp, &module->pmem_regions) {
-		region = hlist_entry(node, struct adsp_pmem_region, list);
+	mutex_lock(&module->ion_regions_lock);
+	hlist_for_each_safe(node, tmp, &module->ion_regions) {
+		region = hlist_entry(node, struct adsp_ion_region, list);
 		hlist_del(node);
-		put_pmem_file(region->file);
+		MM_INFO("adsp_ion_del: module %s: fd %d, vaddr Ox%x, len %d\n",
+			module->name, region->fd, (unsigned int)region->vaddr,
+			(int)region->len);
+		free_ion_region(region->client, region->handle);
 		kfree(region);
 	}
-	mutex_unlock(&module->pmem_regions_lock);
-	BUG_ON(!hlist_empty(&module->pmem_regions));
+	mutex_unlock(&module->ion_regions_lock);
+	BUG_ON(!hlist_empty(&module->ion_regions));
 
 	return 0;
 }
@@ -479,10 +556,10 @@
 	}
 
 	case ADSP_IOCTL_REGISTER_PMEM: {
-		struct adsp_pmem_info info;
+		struct adsp_ion_info info;
 		if (copy_from_user(&info, (void *) arg, sizeof(info)))
 			return -EFAULT;
-		return adsp_pmem_add(adev->module, &info);
+		return adsp_ion_add(adev->module, &info);
 	}
 
 	case ADSP_IOCTL_ABORT_EVENT_READ:
@@ -491,7 +568,7 @@
 		break;
 
 	case ADSP_IOCTL_UNREGISTER_PMEM:
-		return adsp_pmem_del(adev->module);
+		return adsp_ion_del(adev->module);
 
 	default:
 		break;
@@ -510,7 +587,7 @@
 	/* clear module before putting it to avoid race with open() */
 	adev->module = NULL;
 
-	rc = adsp_pmem_del(module);
+	rc = adsp_ion_del(module);
 
 	msm_adsp_put(module);
 	return rc;
@@ -581,8 +658,8 @@
 	MM_INFO("opened module '%s' adev %p\n", adev->name, adev);
 	filp->private_data = adev;
 	adev->abort = 0;
-	INIT_HLIST_HEAD(&adev->module->pmem_regions);
-	mutex_init(&adev->module->pmem_regions_lock);
+	INIT_HLIST_HEAD(&adev->module->ion_regions);
+	mutex_init(&adev->module->ion_regions_lock);
 
 	return 0;
 }
diff --git a/arch/arm/mach-msm/qdsp5/adsp_video_verify_cmd.c b/arch/arm/mach-msm/qdsp5/adsp_video_verify_cmd.c
index 492fa0e..af259b5 100644
--- a/arch/arm/mach-msm/qdsp5/adsp_video_verify_cmd.c
+++ b/arch/arm/mach-msm/qdsp5/adsp_video_verify_cmd.c
@@ -3,7 +3,7 @@
  * Verificion code for aDSP VDEC packets from userspace.
  *
  * Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2008-2010, 2012 Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -55,7 +55,7 @@
 	MM_DBG("virt %x %x\n", (unsigned int)phys_addr,
 			(unsigned int)phys_size);
 	if (phys_addr) {
-		if (adsp_pmem_fixup_kvaddr(module, &phys_addr,
+		if (adsp_ion_fixup_kvaddr(module, &phys_addr,
 			 &kvaddr, phys_size, filp, offset)) {
 			MM_ERR("ah%x al%x sh%x sl%x addr %x size %x\n",
 					*high, *low, size_high,
@@ -77,6 +77,7 @@
 static int verify_vdec_pkt_cmd(struct msm_adsp_module *module,
 			       void *cmd_data, size_t cmd_size)
 {
+	void *phys_addr;
 	unsigned short cmd_id = ((unsigned short *)cmd_data)[0];
 	viddec_cmd_subframe_pkt *pkt;
 	unsigned long subframe_pkt_addr;
@@ -89,7 +90,6 @@
 	unsigned short frame_buffer_size_high, frame_buffer_size_low;
 	struct file *filp = NULL;
 	unsigned long offset = 0;
-	struct pmem_addr pmem_addr;
 	unsigned long Codec_Id = 0;
 
 	MM_DBG("cmd_size %d cmd_id %d cmd_data %x\n", cmd_size, cmd_id,
@@ -102,6 +102,8 @@
 		return -1;
 
 	pkt = (viddec_cmd_subframe_pkt *)cmd_data;
+	phys_addr = high_low_short_to_ptr(pkt->subframe_packet_high,
+				pkt->subframe_packet_low);
 
 	if (pmem_fixup_high_low(&(pkt->subframe_packet_high),
 				&(pkt->subframe_packet_low),
@@ -114,16 +116,12 @@
 		return -1;
 	Codec_Id = pkt->codec_selection_word;
 	/*Invalidate cache before accessing the cached pmem buffer*/
-	if (filp) {
-		pmem_addr.vaddr = subframe_pkt_addr;
-		pmem_addr.length = (((subframe_pkt_size*2) + 31) & (~31)) + 32;
-		pmem_addr.offset = offset;
-		if (pmem_cache_maint (filp, PMEM_INV_CACHES,  &pmem_addr)) {
-			MM_ERR("Cache operation failed for phys addr high %x"
-				" addr low %x\n", pkt->subframe_packet_high,
-				pkt->subframe_packet_low);
-			return -EINVAL;
-		}
+	if (adsp_ion_do_cache_op(module, phys_addr, (void *)subframe_pkt_addr,
+		subframe_pkt_size*2, offset, ION_IOC_INV_CACHES)){
+		MM_ERR("Cache operation failed for" \
+			" phys addr high %x addr low %x\n",
+			pkt->subframe_packet_high, pkt->subframe_packet_low);
+		return -EINVAL;
 	}
 	/* deref those ptrs and check if they are a frame header packet */
 	frame_header_pkt = (unsigned short *)subframe_pkt_addr;
@@ -241,19 +239,14 @@
 			frame_buffer_low += 2;
 		}
 	}
-	/*Flush the cached pmem subframe packet before sending to DSP*/
-	if (filp) {
-		pmem_addr.vaddr = subframe_pkt_addr;
-		pmem_addr.length = MAX_FLUSH_SIZE;
-		pmem_addr.offset = offset;
-		if (pmem_cache_maint(filp, PMEM_CLEAN_CACHES, &pmem_addr)) {
-			MM_ERR("Cache operation failed for phys addr high %x"
-				" addr low %x\n", pkt->subframe_packet_high,
-				pkt->subframe_packet_low);
-			return -1;
-		}
+	/*Flush the cached mem subframe packet before sending to DSP*/
+	if (adsp_ion_do_cache_op(module,  phys_addr, (void *)subframe_pkt_addr,
+		MAX_FLUSH_SIZE, offset, ION_IOC_CLEAN_CACHES)){
+		MM_ERR("Cache operation failed for" \
+			" phys addr high %x addr low %x\n",
+			pkt->subframe_packet_high, pkt->subframe_packet_low);
+		return -EINVAL;
 	}
-
 	return 0;
 }
 
diff --git a/arch/arm/mach-msm/qdsp5/adsp_videoenc_verify_cmd.c b/arch/arm/mach-msm/qdsp5/adsp_videoenc_verify_cmd.c
index 936b7af..290a14c 100644
--- a/arch/arm/mach-msm/qdsp5/adsp_videoenc_verify_cmd.c
+++ b/arch/arm/mach-msm/qdsp5/adsp_videoenc_verify_cmd.c
@@ -3,7 +3,7 @@
  * Verificion code for aDSP VENC packets from userspace.
  *
  * Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2008-2009, 2012 Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -53,7 +53,7 @@
 	phys_size = (unsigned long)high_low_short_to_ptr(size_high, size_low);
 	MM_DBG("virt %x %x\n", (unsigned int)phys_addr,
 			(unsigned int)phys_size);
-	if (adsp_pmem_fixup_kvaddr(module, &phys_addr, &kvaddr, phys_size,
+	if (adsp_ion_fixup_kvaddr(module, &phys_addr, &kvaddr, phys_size,
 				NULL, NULL)) {
 		MM_ERR("ah%x al%x sh%x sl%x addr %x size %x\n",
 			*high, *low, size_high,
diff --git a/arch/arm/mach-msm/qdsp5/audio_aac.c b/arch/arm/mach-msm/qdsp5/audio_aac.c
index 725819f..15e0590 100644
--- a/arch/arm/mach-msm/qdsp5/audio_aac.c
+++ b/arch/arm/mach-msm/qdsp5/audio_aac.c
@@ -31,10 +31,10 @@
 #include <linux/delay.h>
 #include <linux/list.h>
 #include <linux/earlysuspend.h>
-#include <linux/android_pmem.h>
 #include <linux/slab.h>
 #include <linux/msm_audio_aac.h>
 #include <linux/memory_alloc.h>
+#include <linux/ion.h>
 
 #include <mach/msm_adsp.h>
 #include <mach/iommu.h>
@@ -181,6 +181,9 @@
 	int eq_needs_commit;
 	audpp_cmd_cfg_object_params_eqalizer eq;
 	audpp_cmd_cfg_object_params_volume vol_pan;
+	struct ion_client *client;
+	struct ion_handle *input_buff_handle;
+	struct ion_handle *output_buff_handle;
 };
 
 static int auddec_dsp_config(struct audio *audio, int enable);
@@ -1508,10 +1511,11 @@
 	audio->event_abort = 1;
 	wake_up(&audio->event_wait);
 	audaac_reset_event_queue(audio);
-	iounmap(audio->map_v_write);
-	free_contiguous_memory_by_paddr(audio->phys);
-	iounmap(audio->map_v_read);
-	free_contiguous_memory_by_paddr(audio->read_phys);
+	ion_unmap_kernel(audio->client, audio->output_buff_handle);
+	ion_free(audio->client, audio->output_buff_handle);
+	ion_unmap_kernel(audio->client, audio->input_buff_handle);
+	ion_free(audio->client, audio->input_buff_handle);
+	ion_client_destroy(audio->client);
 	mutex_unlock(&audio->lock);
 #ifdef CONFIG_DEBUG_FS
 	if (audio->dentry)
@@ -1653,8 +1657,13 @@
 {
 	struct audio *audio = NULL;
 	int rc, dec_attrb, decid, index, offset = 0;
-	unsigned pmem_sz = DMASZ;
+	unsigned mem_sz = DMASZ;
 	struct audaac_event *e_node = NULL;
+	int len = 0;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	struct ion_client *client = NULL;
 #ifdef CONFIG_DEBUG_FS
 	/* 4 bytes represents decoder number, 1 byte for terminate string */
 	char name[sizeof "msm_aac_" + 5];
@@ -1696,61 +1705,92 @@
 	}
 	audio->dec_id = decid & MSM_AUD_DECODER_MASK;
 
-	while (pmem_sz >= DMASZ_MIN) {
-		MM_DBG("pmemsz = %d\n", pmem_sz);
-		audio->phys = allocate_contiguous_ebi_nomap(pmem_sz, SZ_4K);
-		if (audio->phys) {
-			audio->map_v_write = ioremap(audio->phys, pmem_sz);
-			if (IS_ERR(audio->map_v_write)) {
-				MM_ERR("could not map write buffers, \
-						freeing instance 0x%08x\n",
-						(int)audio);
-				rc = -ENOMEM;
-				free_contiguous_memory_by_paddr(audio->phys);
-				audpp_adec_free(audio->dec_id);
-				kfree(audio);
-				goto done;
-			}
-			audio->data = audio->map_v_write;
-			MM_DBG("write buf: phy addr 0x%08x kernel addr \
-				0x%08x\n", audio->phys, (int)audio->data);
-			break;
-		} else if (pmem_sz == DMASZ_MIN) {
-			MM_ERR("could not allocate write buffers, freeing \
-					instance 0x%08x\n", (int)audio);
-			rc = -ENOMEM;
-			audpp_adec_free(audio->dec_id);
-			kfree(audio);
-			goto done;
-		} else
-			pmem_sz >>= 1;
-	}
-	audio->out_dma_sz = pmem_sz;
-
-	audio->read_phys = allocate_contiguous_ebi_nomap(PCM_BUFSZ_MIN *
-						PCM_BUF_MAX_COUNT, SZ_4K);
-	if (!audio->read_phys) {
-		MM_ERR("could not allocate read buffers, freeing instance \
-				0x%08x\n", (int)audio);
+	client = msm_ion_client_create(UINT_MAX, "Audio_AAC_client");
+	if (IS_ERR_OR_NULL(client)) {
+		MM_ERR("Unable to create ION client\n");
 		rc = -ENOMEM;
-		iounmap(audio->map_v_write);
-		free_contiguous_memory_by_paddr(audio->phys);
-		audpp_adec_free(audio->dec_id);
-		kfree(audio);
-		goto done;
+		goto client_create_error;
 	}
-	audio->map_v_read = ioremap(audio->read_phys,
-					PCM_BUFSZ_MIN * PCM_BUF_MAX_COUNT);
+	audio->client = client;
+
+	MM_DBG("allocating mem sz = %d\n", mem_sz);
+	handle = ion_alloc(client, mem_sz, SZ_4K,
+		ION_HEAP(ION_AUDIO_HEAP_ID));
+	if (IS_ERR_OR_NULL(handle)) {
+		MM_ERR("Unable to create allocate O/P buffers\n");
+		rc = -ENOMEM;
+		goto output_buff_alloc_error;
+	}
+
+	audio->output_buff_handle = handle;
+
+	rc = ion_phys(client, handle, &addr, &len);
+	if (rc) {
+		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+		goto output_buff_get_phys_error;
+	} else {
+		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+	}
+	audio->phys = (int32_t)addr;
+
+	rc = ion_handle_get_flags(client, handle, &ionflag);
+	if (rc) {
+		MM_ERR("could not get flags for the handle\n");
+		goto output_buff_get_flags_error;
+	}
+
+	audio->map_v_write = ion_map_kernel(client, handle, ionflag);
+	if (IS_ERR(audio->map_v_write)) {
+		MM_ERR("could not map write buffers,freeing instance 0x%08x\n",
+				(int)audio);
+		rc = -ENOMEM;
+		goto output_buff_map_error;
+	}
+	audio->data = audio->map_v_write;
+	MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
+		audio->phys, (int)audio->data);
+
+	audio->out_dma_sz = mem_sz;
+
+	mem_sz = (PCM_BUFSZ_MIN * PCM_BUF_MAX_COUNT);
+	MM_DBG("allocating mem sz = %d\n", mem_sz);
+	handle = ion_alloc(client, mem_sz,
+			SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
+	if (IS_ERR_OR_NULL(handle)) {
+		MM_ERR("Unable to create allocate I/P buffers\n");
+		rc = -ENOMEM;
+		goto input_buff_alloc_error;
+	}
+
+	audio->input_buff_handle = handle;
+
+	rc = ion_phys(client , handle, &addr, &len);
+	if (rc) {
+		MM_ERR("I/P buffers:Invalid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+		goto input_buff_get_phys_error;
+	} else {
+		MM_INFO("out Got valid phy: %x sz: %x\n",
+			(unsigned int) audio->read_phys, (unsigned int) len);
+	}
+	audio->read_phys = (int32_t)addr;
+
+	rc = ion_handle_get_flags(client,
+		handle, &ionflag);
+	if (rc) {
+		MM_ERR("could not get flags for the handle\n");
+		goto input_buff_get_flags_error;
+	}
+
+	audio->map_v_read = ion_map_kernel(client,
+		handle, ionflag);
 	if (IS_ERR(audio->map_v_read)) {
 		MM_ERR("could not map read buffers, freeing instance \
 				0x%08x\n", (int)audio);
 		rc = -ENOMEM;
-		iounmap(audio->map_v_write);
-		free_contiguous_memory_by_paddr(audio->phys);
-		free_contiguous_memory_by_paddr(audio->read_phys);
-		audpp_adec_free(audio->dec_id);
-		kfree(audio);
-		goto done;
+		goto input_buff_map_error;
 	}
 	audio->read_data = audio->map_v_read;
 	MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n",
@@ -1873,10 +1913,20 @@
 done:
 	return rc;
 err:
-	iounmap(audio->map_v_write);
-	free_contiguous_memory_by_paddr(audio->phys);
-	iounmap(audio->map_v_read);
-	free_contiguous_memory_by_paddr(audio->read_phys);
+	ion_unmap_kernel(client, audio->input_buff_handle);
+input_buff_map_error:
+input_buff_get_flags_error:
+input_buff_get_phys_error:
+	ion_free(client, audio->input_buff_handle);
+input_buff_alloc_error:
+	ion_unmap_kernel(client, audio->output_buff_handle);
+output_buff_map_error:
+output_buff_get_phys_error:
+output_buff_get_flags_error:
+	ion_free(client, audio->output_buff_handle);
+output_buff_alloc_error:
+	ion_client_destroy(client);
+client_create_error:
 	audpp_adec_free(audio->dec_id);
 	kfree(audio);
 	return rc;
diff --git a/arch/arm/mach-msm/qdsp5/audio_aac_in.c b/arch/arm/mach-msm/qdsp5/audio_aac_in.c
index 79a828a..99e10a4 100644
--- a/arch/arm/mach-msm/qdsp5/audio_aac_in.c
+++ b/arch/arm/mach-msm/qdsp5/audio_aac_in.c
@@ -32,8 +32,8 @@
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
 #include <linux/msm_audio_aac.h>
-#include <linux/android_pmem.h>
 #include <linux/memory_alloc.h>
+#include <linux/ion.h>
 
 #include "audmgr.h"
 
@@ -147,6 +147,9 @@
 	int enabled;
 	int running;
 	int stopped; /* set when stopped, cleared on flush */
+	struct ion_client *client;
+	struct ion_handle *input_buff_handle;
+	struct ion_handle *output_buff_handle;
 };
 
 struct audio_frame {
@@ -1248,16 +1251,17 @@
 
 	if ((audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) && \
 	   (audio->out_data)) {
-		iounmap(audio->map_v_write);
-		free_contiguous_memory_by_paddr(audio->out_phys);
+		ion_unmap_kernel(audio->client, audio->input_buff_handle);
+		ion_free(audio->client, audio->input_buff_handle);
 		audio->out_data = NULL;
 	}
 
 	if (audio->data) {
-		iounmap(audio->map_v_read);
-		free_contiguous_memory_by_paddr(audio->phys);
+		ion_unmap_kernel(audio->client, audio->output_buff_handle);
+		ion_free(audio->client, audio->output_buff_handle);
 		audio->data = NULL;
 	}
+	ion_client_destroy(audio->client);
 	mutex_unlock(&audio->lock);
 	return 0;
 }
@@ -1270,6 +1274,11 @@
 	int rc;
 	int encid;
 	int dma_size = 0;
+	int len = 0;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	struct ion_client *client = NULL;
 
 	mutex_lock(&audio->lock);
 	if (audio->opened) {
@@ -1352,52 +1361,103 @@
 	audaac_in_flush(audio);
 	audaac_out_flush(audio);
 
-	audio->phys = allocate_contiguous_ebi_nomap(dma_size, SZ_4K);
-	if (audio->phys) {
-		audio->map_v_read = ioremap(
-					audio->phys, dma_size);
-		if (IS_ERR(audio->map_v_read)) {
-			MM_ERR("could not map DMA buffers\n");
-			rc = -ENOMEM;
-			free_contiguous_memory_by_paddr(audio->phys);
-			goto evt_error;
-		}
-		audio->data = audio->map_v_read;
-	} else {
-		MM_ERR("could not allocate read buffers\n");
+
+	client = msm_ion_client_create(UINT_MAX, "Audio_AAC_in_client");
+	if (IS_ERR_OR_NULL(client)) {
+		MM_ERR("Unable to create ION client\n");
 		rc = -ENOMEM;
-		goto evt_error;
+		goto client_create_error;
 	}
-	MM_DBG("Memory addr = 0x%8x  phy addr = 0x%8x\n",\
-		(int) audio->data, (int) audio->phys);
+	audio->client = client;
+
+	MM_DBG("allocating mem sz = %d\n", dma_size);
+	handle = ion_alloc(client, dma_size, SZ_4K,
+		ION_HEAP(ION_AUDIO_HEAP_ID));
+	if (IS_ERR_OR_NULL(handle)) {
+		MM_ERR("Unable to create allocate O/P buffers\n");
+		rc = -ENOMEM;
+		goto output_buff_alloc_error;
+	}
+
+	audio->output_buff_handle = handle;
+
+	rc = ion_phys(client , handle, &addr, &len);
+	if (rc) {
+		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+		rc = -ENOMEM;
+		goto output_buff_get_phys_error;
+	} else {
+		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+	}
+	audio->phys = (int32_t)addr;
+
+	rc = ion_handle_get_flags(client, handle, &ionflag);
+	if (rc) {
+		MM_ERR("could not get flags for the handle\n");
+		rc = -ENOMEM;
+		goto output_buff_get_flags_error;
+	}
+
+	audio->map_v_read = ion_map_kernel(client, handle, ionflag);
+	if (IS_ERR(audio->map_v_read)) {
+		MM_ERR("could not map read buffers,freeing instance 0x%08x\n",
+				(int)audio);
+		rc = -ENOMEM;
+		goto output_buff_map_error;
+	}
+	audio->data = audio->map_v_read;
+	MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n",
+		audio->phys, (int)audio->data);
 
 	audio->out_data = NULL;
 	if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) {
-		audio->out_phys = allocate_contiguous_ebi_nomap(BUFFER_SIZE,
-								SZ_4K);
-		if (!audio->out_phys) {
-			MM_ERR("could not allocate write buffers\n");
+
+		MM_DBG("allocating BUFFER_SIZE  %d\n", BUFFER_SIZE);
+		handle = ion_alloc(client, BUFFER_SIZE,
+				SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
+		if (IS_ERR_OR_NULL(handle)) {
+			MM_ERR("Unable to create allocate I/P buffers\n");
 			rc = -ENOMEM;
-			iounmap(audio->map_v_read);
-			free_contiguous_memory_by_paddr(audio->phys);
-			goto evt_error;
-		} else {
-			audio->map_v_write = ioremap(
-					audio->out_phys, BUFFER_SIZE);
-			if (IS_ERR(audio->map_v_write)) {
-				MM_ERR("could not map write phys address\n");
-				rc = -ENOMEM;
-				iounmap(audio->map_v_read);
-				free_contiguous_memory_by_paddr(audio->phys);
-				free_contiguous_memory_by_paddr(\
-						audio->out_phys);
-				goto evt_error;
-			}
-			audio->out_data = audio->map_v_write;
-			MM_DBG("wr buf: phy addr 0x%08x kernel addr 0x%08x\n",
-					audio->out_phys, (int)audio->out_data);
+			goto input_buff_alloc_error;
 		}
 
+		audio->input_buff_handle = handle;
+
+		rc = ion_phys(client , handle, &addr, &len);
+		if (rc) {
+			MM_ERR("I/P buffers:Invalid phy: %x sz: %x\n",
+				(unsigned int) addr, (unsigned int) len);
+			rc = -ENOMEM;
+			goto input_buff_get_phys_error;
+		} else {
+			MM_INFO("Got valid phy: %x sz: %x\n",
+				(unsigned int) addr,
+				(unsigned int) len);
+		}
+		audio->out_phys = (int32_t)addr;
+
+		rc = ion_handle_get_flags(client,
+			handle, &ionflag);
+		if (rc) {
+			MM_ERR("could not get flags for the handle\n");
+			rc = -ENOMEM;
+			goto input_buff_get_flags_error;
+		}
+
+		audio->map_v_write = ion_map_kernel(client,
+			handle, ionflag);
+		if (IS_ERR(audio->map_v_write)) {
+			MM_ERR("could not map write buffers\n");
+			rc = -ENOMEM;
+			goto input_buff_map_error;
+		}
+		audio->out_data = audio->map_v_write;
+		MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
+					(unsigned int)addr,
+					(unsigned int)audio->out_data);
+
 		/* Initialize buffer */
 		audio->out[0].data = audio->out_data + 0;
 		audio->out[0].addr = audio->out_phys + 0;
@@ -1419,7 +1479,19 @@
 done:
 	mutex_unlock(&audio->lock);
 	return rc;
-evt_error:
+input_buff_map_error:
+input_buff_get_flags_error:
+input_buff_get_phys_error:
+	ion_free(client, audio->input_buff_handle);
+input_buff_alloc_error:
+	ion_unmap_kernel(client, audio->output_buff_handle);
+output_buff_map_error:
+output_buff_get_phys_error:
+output_buff_get_flags_error:
+	ion_free(client, audio->output_buff_handle);
+output_buff_alloc_error:
+	ion_client_destroy(client);
+client_create_error:
 	msm_adsp_put(audio->audrec);
 	if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL)
 		msm_adsp_put(audio->audpre);
diff --git a/arch/arm/mach-msm/qdsp5/audio_amrnb.c b/arch/arm/mach-msm/qdsp5/audio_amrnb.c
index 1a1002e..45fa045 100644
--- a/arch/arm/mach-msm/qdsp5/audio_amrnb.c
+++ b/arch/arm/mach-msm/qdsp5/audio_amrnb.c
@@ -39,10 +39,10 @@
 #include <linux/delay.h>
 #include <linux/list.h>
 #include <linux/earlysuspend.h>
-#include <linux/android_pmem.h>
 #include <linux/slab.h>
 #include <linux/msm_audio.h>
 #include <linux/memory_alloc.h>
+#include <linux/ion.h>
 
 #include <mach/msm_adsp.h>
 #include <mach/iommu.h>
@@ -179,6 +179,9 @@
 	int eq_needs_commit;
 	audpp_cmd_cfg_object_params_eqalizer eq;
 	audpp_cmd_cfg_object_params_volume vol_pan;
+	struct ion_client *client;
+	struct ion_handle *input_buff_handle;
+	struct ion_handle *output_buff_handle;
 };
 
 struct audpp_cmd_cfg_adec_params_amrnb {
@@ -784,6 +787,10 @@
 	uint16_t enable_mask;
 	int enable;
 	int prev_state;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	int len = 0;
 
 	MM_DBG("cmd = %d\n", cmd);
 
@@ -970,23 +977,53 @@
 			MM_DBG("allocate PCM buf %d\n",
 					config.buffer_count *
 					config.buffer_size);
-			audio->read_phys = allocate_contiguous_ebi_nomap(
-						config.buffer_size *
-						config.buffer_count,
-						SZ_4K);
-			if (!audio->read_phys) {
+				handle = ion_alloc(audio->client,
+					(config.buffer_size *
+					config.buffer_count),
+					SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
+				if (IS_ERR_OR_NULL(handle)) {
+					MM_ERR("Unable to alloc I/P buffs\n");
+					audio->input_buff_handle = NULL;
 					rc = -ENOMEM;
 					break;
-			}
-			audio->map_v_read = ioremap(
-						audio->read_phys,
-						config.buffer_size *
-						config.buffer_count);
+				}
+
+				audio->input_buff_handle = handle;
+
+				rc = ion_phys(audio->client ,
+					handle, &addr, &len);
+				if (rc) {
+					MM_ERR("Invalid phy: %x sz: %x\n",
+						(unsigned int) addr,
+						(unsigned int) len);
+					ion_free(audio->client, handle);
+					audio->input_buff_handle = NULL;
+					rc = -ENOMEM;
+					break;
+				} else {
+					MM_INFO("Got valid phy: %x sz: %x\n",
+						(unsigned int) audio->read_phys,
+						(unsigned int) len);
+				}
+				audio->read_phys = (int32_t)addr;
+
+				rc = ion_handle_get_flags(audio->client,
+					handle, &ionflag);
+				if (rc) {
+					MM_ERR("could not get flags\n");
+					ion_free(audio->client, handle);
+					audio->input_buff_handle = NULL;
+					rc = -ENOMEM;
+					break;
+				}
+				audio->map_v_read = ion_map_kernel(
+					audio->client,
+					handle, ionflag);
 			if (IS_ERR(audio->map_v_read)) {
 				MM_ERR("failed to map read buf\n");
+				ion_free(audio->client, handle);
+				audio->input_buff_handle = NULL;
 				rc = -ENOMEM;
-				free_contiguous_memory_by_paddr(
-							audio->read_phys);
 			} else {
 				uint8_t index;
 				uint32_t offset = 0;
@@ -1295,12 +1332,13 @@
 	audio->event_abort = 1;
 	wake_up(&audio->event_wait);
 	audamrnb_reset_event_queue(audio);
-	iounmap(audio->map_v_write);
-	free_contiguous_memory_by_paddr(audio->phys);
-	if (audio->read_data) {
-		iounmap(audio->map_v_read);
-		free_contiguous_memory_by_paddr(audio->read_phys);
+	ion_unmap_kernel(audio->client, audio->output_buff_handle);
+	ion_free(audio->client, audio->output_buff_handle);
+	if (audio->input_buff_handle != NULL) {
+		ion_unmap_kernel(audio->client, audio->input_buff_handle);
+		ion_free(audio->client, audio->input_buff_handle);
 	}
+	ion_client_destroy(audio->client);
 	mutex_unlock(&audio->lock);
 #ifdef CONFIG_DEBUG_FS
 	if (audio->dentry)
@@ -1439,6 +1477,12 @@
 	struct audio *audio = NULL;
 	int rc, dec_attrb, decid, i;
 	struct audamrnb_event *e_node = NULL;
+	unsigned mem_sz = DMASZ;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	struct ion_client *client = NULL;
+	int len = 0;
 #ifdef CONFIG_DEBUG_FS
 	/* 4 bytes represents decoder number, 1 byte for terminate string */
 	char name[sizeof "msm_amrnb_" + 5];
@@ -1482,30 +1526,50 @@
 
 	audio->dec_id = decid & MSM_AUD_DECODER_MASK;
 
-	audio->phys = allocate_contiguous_ebi_nomap(DMASZ, SZ_4K);
-	if (!audio->phys) {
-		MM_ERR("could not allocate write buffers, freeing instance \
-				0x%08x\n", (int)audio);
+	client = msm_ion_client_create(UINT_MAX, "Audio_AMR_NB_Client");
+	if (IS_ERR_OR_NULL(client)) {
+		pr_err("Unable to create ION client\n");
 		rc = -ENOMEM;
-		audpp_adec_free(audio->dec_id);
-		kfree(audio);
-		goto done;
-	} else {
-		audio->map_v_write = ioremap(
-					audio->phys, DMASZ);
-		if (IS_ERR(audio->map_v_write)) {
-			MM_ERR("could not map write buffers, freeing \
-					instance 0x%08x freeing\n", (int)audio);
-			rc = -ENOMEM;
-			free_contiguous_memory_by_paddr(audio->phys);
-			audpp_adec_free(audio->dec_id);
-			kfree(audio);
-			goto done;
-		}
-		audio->data = audio->map_v_write;
-		MM_DBG("write buf: phy addr 0x%08x kernel addr \
-				0x%08x\n", audio->phys, (int)audio->data);
+		goto client_create_error;
 	}
+	audio->client = client;
+
+	handle = ion_alloc(client, mem_sz, SZ_4K,
+		ION_HEAP(ION_AUDIO_HEAP_ID));
+	if (IS_ERR_OR_NULL(handle)) {
+		MM_ERR("Unable to create allocate O/P buffers\n");
+		rc = -ENOMEM;
+		goto output_buff_alloc_error;
+	}
+	audio->output_buff_handle = handle;
+
+	rc = ion_phys(client, handle, &addr, &len);
+	if (rc) {
+		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+		goto output_buff_get_phys_error;
+	} else {
+		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+	}
+	audio->phys = (int32_t)addr;
+
+
+	rc = ion_handle_get_flags(client, handle, &ionflag);
+	if (rc) {
+		MM_ERR("could not get flags for the handle\n");
+		goto output_buff_get_flags_error;
+	}
+
+	audio->map_v_write = ion_map_kernel(client, handle, ionflag);
+	if (IS_ERR(audio->map_v_write)) {
+		MM_ERR("could not map write buffers\n");
+		rc = -ENOMEM;
+		goto output_buff_map_error;
+	}
+	audio->data = audio->map_v_write;
+	MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
+		audio->phys, (int)audio->data);
 
 	if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) {
 		rc = audmgr_open(&audio->audmgr);
@@ -1536,6 +1600,8 @@
 		goto err;
 	}
 
+	audio->input_buff_handle = NULL;
+
 	mutex_init(&audio->lock);
 	mutex_init(&audio->write_lock);
 	mutex_init(&audio->read_lock);
@@ -1590,8 +1656,14 @@
 done:
 	return rc;
 err:
-	iounmap(audio->map_v_write);
-	free_contiguous_memory_by_paddr(audio->phys);
+	ion_unmap_kernel(client, audio->output_buff_handle);
+output_buff_map_error:
+output_buff_get_phys_error:
+output_buff_get_flags_error:
+	ion_free(client, audio->output_buff_handle);
+output_buff_alloc_error:
+	ion_client_destroy(client);
+client_create_error:
 	audpp_adec_free(audio->dec_id);
 	kfree(audio);
 	return rc;
diff --git a/arch/arm/mach-msm/qdsp5/audio_amrnb_in.c b/arch/arm/mach-msm/qdsp5/audio_amrnb_in.c
index ac2b5ca..39578c1 100644
--- a/arch/arm/mach-msm/qdsp5/audio_amrnb_in.c
+++ b/arch/arm/mach-msm/qdsp5/audio_amrnb_in.c
@@ -36,8 +36,8 @@
 #include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/msm_audio_amrnb.h>
-#include <linux/android_pmem.h>
 #include <linux/memory_alloc.h>
+#include <linux/ion.h>
 
 #include "audmgr.h"
 
@@ -151,6 +151,9 @@
 	uint8_t enabled;
 	uint8_t running;
 	uint8_t stopped; /* set when stopped, cleared on flush */
+	struct ion_client *client;
+	struct ion_handle *input_buff_handle;
+
 };
 
 struct audio_frame {
@@ -1207,8 +1210,8 @@
 	audio->opened = 0;
 	if ((audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) && \
 	   (audio->out_data)) {
-		iounmap(audio->map_v_write);
-		free_contiguous_memory_by_paddr(audio->out_phys);
+		ion_unmap_kernel(audio->client, audio->input_buff_handle);
+		ion_free(audio->client, audio->input_buff_handle);
 		audio->out_data = NULL;
 	}
 	if (audio->data) {
@@ -1221,6 +1224,7 @@
 			dma_size, audio->data, audio->phys);
 		audio->data = NULL;
 	}
+	ion_client_destroy(audio->client);
 	mutex_unlock(&audio->lock);
 	return 0;
 }
@@ -1233,6 +1237,11 @@
 	int32_t rc;
 	int encid;
 	int32_t dma_size = 0;
+	int len = 0;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	struct ion_client *client = NULL;
 
 	mutex_lock(&audio->lock);
 	if (audio->opened) {
@@ -1320,34 +1329,60 @@
 		goto evt_error;
 	}
 
+	client = msm_ion_client_create(UINT_MAX, "Audio_AMRNB_in_client");
+	if (IS_ERR_OR_NULL(client)) {
+		MM_ERR("Unable to create ION client\n");
+		rc = -ENOMEM;
+		goto client_create_error;
+	}
+	audio->client = client;
+
 	audio->out_data = NULL;
 	if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) {
-		audio->out_phys = allocate_contiguous_ebi_nomap(BUFFER_SIZE,
-								SZ_4K);
-		if (!audio->out_phys) {
-			MM_ERR("could not allocate write buffers\n");
+		MM_DBG("allocating BUFFER_SIZE  %d\n", BUFFER_SIZE);
+		handle = ion_alloc(client, BUFFER_SIZE,
+				SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
+		if (IS_ERR_OR_NULL(handle)) {
+			MM_ERR("Unable to create allocate write buffers\n");
 			rc = -ENOMEM;
-			dma_free_coherent(NULL,
-				dma_size, audio->data, audio->phys);
-			goto evt_error;
-		} else {
-			audio->map_v_write = ioremap(
-					audio->out_phys, BUFFER_SIZE);
-			if (IS_ERR(audio->map_v_write)) {
-				MM_ERR("could not map write phys address\n");
-				rc = -ENOMEM;
-				dma_free_coherent(NULL,
-					dma_size, audio->data, audio->phys);
-				free_contiguous_memory_by_paddr(\
-						audio->out_phys);
-				goto evt_error;
-			}
-			audio->out_data = audio->map_v_write;
-			MM_DBG("wr buf: phy addr 0x%08x kernel addr 0x%08x\n",
-					audio->out_phys,
-					(uint32_t)audio->out_data);
+			goto input_buff_alloc_error;
 		}
 
+		audio->input_buff_handle = handle;
+
+		rc = ion_phys(client , handle, &addr, &len);
+		if (rc) {
+			MM_ERR("I/P buffers:Invalid phy: %x sz: %x\n",
+				(unsigned int) addr, (unsigned int) len);
+			rc = -ENOMEM;
+			goto input_buff_get_phys_error;
+		} else {
+			MM_INFO("Got valid phy: %x sz: %x\n",
+				(unsigned int) addr,
+				(unsigned int) len);
+		}
+		audio->out_phys = (int32_t)addr;
+
+		rc = ion_handle_get_flags(client,
+			handle, &ionflag);
+		if (rc) {
+			MM_ERR("could not get flags for the handle\n");
+			rc = -ENOMEM;
+			goto input_buff_get_flags_error;
+		}
+
+		audio->map_v_write = ion_map_kernel(client,
+			handle, ionflag);
+		if (IS_ERR(audio->map_v_write)) {
+			MM_ERR("could not map write buffers\n");
+			rc = -ENOMEM;
+			goto input_buff_map_error;
+		}
+		audio->out_data = audio->map_v_write;
+		MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
+					(unsigned int)addr,
+					(unsigned int)audio->out_data);
+
 		/* Initialize buffer */
 		audio->out[0].data = audio->out_data + 0;
 		audio->out[0].addr = audio->out_phys + 0;
@@ -1369,6 +1404,14 @@
 done:
 	mutex_unlock(&audio->lock);
 	return rc;
+input_buff_map_error:
+input_buff_get_phys_error:
+input_buff_get_flags_error:
+	ion_free(client, audio->input_buff_handle);
+input_buff_alloc_error:
+	ion_client_destroy(client);
+client_create_error:
+	dma_free_coherent(NULL, dma_size, audio->data, audio->phys);
 evt_error:
 	msm_adsp_put(audio->audrec);
 	if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL)
diff --git a/arch/arm/mach-msm/qdsp5/audio_amrwb.c b/arch/arm/mach-msm/qdsp5/audio_amrwb.c
index b85b153..66b9354 100644
--- a/arch/arm/mach-msm/qdsp5/audio_amrwb.c
+++ b/arch/arm/mach-msm/qdsp5/audio_amrwb.c
@@ -38,10 +38,10 @@
 #include <linux/delay.h>
 #include <linux/list.h>
 #include <linux/earlysuspend.h>
-#include <linux/android_pmem.h>
 #include <linux/slab.h>
 #include <linux/msm_audio.h>
 #include <linux/memory_alloc.h>
+#include <linux/ion.h>
 
 #include <mach/msm_adsp.h>
 #include <mach/iommu.h>
@@ -180,6 +180,9 @@
 	int eq_needs_commit;
 	audpp_cmd_cfg_object_params_eqalizer eq;
 	audpp_cmd_cfg_object_params_volume vol_pan;
+	struct ion_client *client;
+	struct ion_handle *input_buff_handle;
+	struct ion_handle *output_buff_handle;
 };
 
 static int auddec_dsp_config(struct audio *audio, int enable);
@@ -773,6 +776,10 @@
 	uint16_t enable_mask;
 	int enable;
 	int prev_state;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	int len = 0;
 
 	MM_DBG("cmd = %d\n", cmd);
 
@@ -966,23 +973,53 @@
 		if ((config.pcm_feedback) && (!audio->read_data)) {
 			MM_DBG("allocate PCM buf %d\n", config.buffer_count *
 					config.buffer_size);
-			audio->read_phys = allocate_contiguous_ebi_nomap(
-						config.buffer_size *
-						config.buffer_count,
-						SZ_4K);
-			if (!audio->read_phys) {
+				handle = ion_alloc(audio->client,
+					(config.buffer_size *
+					config.buffer_count),
+					SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
+				if (IS_ERR_OR_NULL(handle)) {
+					MM_ERR("Unable to alloc I/P buffs\n");
+					audio->input_buff_handle = NULL;
 					rc = -ENOMEM;
 					break;
-			}
-			audio->map_v_read = ioremap(
-						audio->read_phys,
-						config.buffer_size *
-						config.buffer_count);
+				}
+
+				audio->input_buff_handle = handle;
+
+				rc = ion_phys(audio->client ,
+					handle, &addr, &len);
+				if (rc) {
+					MM_ERR("Invalid phy: %x sz: %x\n",
+						(unsigned int) addr,
+						(unsigned int) len);
+					ion_free(audio->client, handle);
+					audio->input_buff_handle = NULL;
+					rc = -ENOMEM;
+					break;
+				} else {
+					MM_INFO("Got valid phy: %x sz: %x\n",
+						(unsigned int) audio->read_phys,
+						(unsigned int) len);
+				}
+				audio->read_phys = (int32_t)addr;
+
+				rc = ion_handle_get_flags(audio->client,
+					handle, &ionflag);
+				if (rc) {
+					MM_ERR("could not get flags\n");
+					ion_free(audio->client, handle);
+					audio->input_buff_handle = NULL;
+					rc = -ENOMEM;
+					break;
+				}
+				audio->map_v_read = ion_map_kernel(
+					audio->client,
+					handle, ionflag);
 			if (IS_ERR(audio->map_v_read)) {
 				MM_ERR("failed to map mem for read buf\n");
+				ion_free(audio->client, handle);
+				audio->input_buff_handle = NULL;
 				rc = -ENOMEM;
-				free_contiguous_memory_by_paddr(
-							audio->read_phys);
 			} else {
 				uint8_t index;
 				uint32_t offset = 0;
@@ -1024,7 +1061,8 @@
 }
 
 /* Only useful in tunnel-mode */
-static int audamrwb_fsync(struct file *file, loff_t a, loff_t b, int datasync)
+static int audamrwb_fsync(struct file *file, loff_t a, loff_t b,
+	int datasync)
 {
 	struct audio *audio = file->private_data;
 	struct buffer *frame;
@@ -1363,12 +1401,13 @@
 	audio->event_abort = 1;
 	wake_up(&audio->event_wait);
 	audamrwb_reset_event_queue(audio);
-	iounmap(audio->map_v_write);
-	free_contiguous_memory_by_paddr(audio->phys);
-	if (audio->read_data) {
-		iounmap(audio->map_v_read);
-		free_contiguous_memory_by_paddr(audio->read_phys);
+	ion_unmap_kernel(audio->client, audio->output_buff_handle);
+	ion_free(audio->client, audio->output_buff_handle);
+	if (audio->input_buff_handle != NULL) {
+		ion_unmap_kernel(audio->client, audio->input_buff_handle);
+		ion_free(audio->client, audio->input_buff_handle);
 	}
+	ion_client_destroy(audio->client);
 	mutex_unlock(&audio->lock);
 #ifdef CONFIG_DEBUG_FS
 	if (audio->dentry)
@@ -1509,8 +1548,14 @@
 static int audamrwb_open(struct inode *inode, struct file *file)
 {
 	struct audio *audio = NULL;
-	int rc, dec_attrb, decid, i;
+	int rc = 0, dec_attrb, decid, i;
 	struct audamrwb_event *e_node = NULL;
+	unsigned mem_sz = DMASZ;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	struct ion_client *client = NULL;
+	int len = 0;
 #ifdef CONFIG_DEBUG_FS
 	/* 4 bytes represents decoder number, 1 byte for terminate string */
 	char name[sizeof "msm_amrwb_" + 5];
@@ -1545,30 +1590,49 @@
 
 	audio->dec_id = decid & MSM_AUD_DECODER_MASK;
 
-	audio->phys = allocate_contiguous_ebi_nomap(DMASZ, SZ_4K);
-	if (!audio->phys) {
-		MM_ERR("could not allocate write buffers, freeing instance \
-				0x%08x\n", (int)audio);
+	client = msm_ion_client_create(UINT_MAX, "Audio_AMR_WB_Client");
+	if (IS_ERR_OR_NULL(client)) {
+		pr_err("Unable to create ION client\n");
 		rc = -ENOMEM;
-		audpp_adec_free(audio->dec_id);
-		kfree(audio);
-		goto done;
-	} else {
-		audio->map_v_write = ioremap(audio->phys, DMASZ);
-
-		if (IS_ERR(audio->map_v_write)) {
-			MM_ERR("could not map write buffers, freeing \
-					instance 0x%08x\n", (int)audio);
-			rc = -ENOMEM;
-			free_contiguous_memory_by_paddr(audio->phys);
-			audpp_adec_free(audio->dec_id);
-			kfree(audio);
-			goto done;
-		}
-		audio->data = audio->map_v_write;
-		MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
-				audio->phys, (int)audio->data);
+		goto client_create_error;
 	}
+	audio->client = client;
+
+	handle = ion_alloc(client, mem_sz, SZ_4K,
+		ION_HEAP(ION_AUDIO_HEAP_ID));
+	if (IS_ERR_OR_NULL(handle)) {
+		MM_ERR("Unable to create allocate O/P buffers\n");
+		goto output_buff_alloc_error;
+	}
+	audio->output_buff_handle = handle;
+
+	rc = ion_phys(client, handle, &addr, &len);
+	if (rc) {
+		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+		goto output_buff_get_phys_error;
+	} else {
+		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+	}
+	audio->phys = (int32_t)addr;
+
+
+	rc = ion_handle_get_flags(client, handle, &ionflag);
+	if (rc) {
+		MM_ERR("could not get flags for the handle\n");
+		goto output_buff_get_flags_error;
+	}
+
+	audio->map_v_write = ion_map_kernel(client, handle, ionflag);
+	if (IS_ERR(audio->map_v_write)) {
+		MM_ERR("could not map write buffers\n");
+		rc = -ENOMEM;
+		goto output_buff_map_error;
+	}
+	audio->data = audio->map_v_write;
+	MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
+		audio->phys, (int)audio->data);
 
 	if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) {
 		rc = audmgr_open(&audio->audmgr);
@@ -1599,6 +1663,7 @@
 		goto err;
 	}
 
+	audio->input_buff_handle = NULL;
 	mutex_init(&audio->lock);
 	mutex_init(&audio->write_lock);
 	mutex_init(&audio->read_lock);
@@ -1658,8 +1723,14 @@
 done:
 	return rc;
 err:
-	iounmap(audio->map_v_write);
-	free_contiguous_memory_by_paddr(audio->phys);
+	ion_unmap_kernel(client, audio->output_buff_handle);
+output_buff_map_error:
+output_buff_get_phys_error:
+output_buff_get_flags_error:
+	ion_free(client, audio->output_buff_handle);
+output_buff_alloc_error:
+	ion_client_destroy(client);
+client_create_error:
 	audpp_adec_free(audio->dec_id);
 	kfree(audio);
 	return rc;
diff --git a/arch/arm/mach-msm/qdsp5/audio_evrc.c b/arch/arm/mach-msm/qdsp5/audio_evrc.c
index 489929b..1d4148a 100644
--- a/arch/arm/mach-msm/qdsp5/audio_evrc.c
+++ b/arch/arm/mach-msm/qdsp5/audio_evrc.c
@@ -33,10 +33,10 @@
 #include <linux/delay.h>
 #include <linux/list.h>
 #include <linux/earlysuspend.h>
-#include <linux/android_pmem.h>
 #include <linux/slab.h>
 #include <linux/msm_audio.h>
 #include <linux/memory_alloc.h>
+#include <linux/ion.h>
 
 #include <mach/msm_adsp.h>
 #include <mach/iommu.h>
@@ -175,6 +175,9 @@
 	int eq_needs_commit;
 	audpp_cmd_cfg_object_params_eqalizer eq;
 	audpp_cmd_cfg_object_params_volume vol_pan;
+	struct ion_client *client;
+	struct ion_handle *input_buff_handle;
+	struct ion_handle *output_buff_handle;
 };
 
 static int auddec_dsp_config(struct audio *audio, int enable);
@@ -770,6 +773,10 @@
 	uint16_t enable_mask;
 	int enable;
 	int prev_state;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	int len = 0;
 
 	MM_DBG("cmd = %d\n", cmd);
 
@@ -955,25 +962,54 @@
 				MM_DBG("allocate PCM buf %d\n",
 					config.buffer_count *
 					config.buffer_size);
-				audio->read_phys =
-						allocate_contiguous_ebi_nomap(
-							config.buffer_size *
-							config.buffer_count,
-							SZ_4K);
-				if (!audio->read_phys) {
+				handle = ion_alloc(audio->client,
+					(config.buffer_size *
+					config.buffer_count),
+					SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
+				if (IS_ERR_OR_NULL(handle)) {
+					MM_ERR("Unable to alloc I/P buffs\n");
+					audio->input_buff_handle = NULL;
 					rc = -ENOMEM;
 					break;
 				}
-				audio->map_v_read = ioremap(
-							audio->read_phys,
-							config.buffer_size *
-							config.buffer_count);
+
+				audio->input_buff_handle = handle;
+
+				rc = ion_phys(audio->client ,
+					handle, &addr, &len);
+				if (rc) {
+					MM_ERR("Invalid phy: %x sz: %x\n",
+						(unsigned int) addr,
+						(unsigned int) len);
+					ion_free(audio->client, handle);
+					audio->input_buff_handle = NULL;
+					rc = -ENOMEM;
+					break;
+				} else {
+					MM_INFO("Got valid phy: %x sz: %x\n",
+						(unsigned int) audio->read_phys,
+						(unsigned int) len);
+				}
+				audio->read_phys = (int32_t)addr;
+
+				rc = ion_handle_get_flags(audio->client,
+					handle, &ionflag);
+				if (rc) {
+					MM_ERR("could not get flags\n");
+					ion_free(audio->client, handle);
+					audio->input_buff_handle = NULL;
+					rc = -ENOMEM;
+					break;
+				}
+				audio->map_v_read = ion_map_kernel(
+					audio->client,
+					handle, ionflag);
 				if (IS_ERR(audio->map_v_read)) {
 					MM_ERR("failed to map mem"
 							" for read buf\n");
+					ion_free(audio->client, handle);
+					audio->input_buff_handle = NULL;
 					rc = -ENOMEM;
-					free_contiguous_memory_by_paddr(
-							audio->read_phys);
 				} else {
 					uint8_t index;
 					uint32_t offset = 0;
@@ -1287,12 +1323,13 @@
 	audio->event_abort = 1;
 	wake_up(&audio->event_wait);
 	audevrc_reset_event_queue(audio);
-	iounmap(audio->map_v_write);
-	free_contiguous_memory_by_paddr(audio->phys);
-	if (audio->read_data) {
-		iounmap(audio->map_v_read);
-		free_contiguous_memory_by_paddr(audio->read_phys);
+	ion_unmap_kernel(audio->client, audio->output_buff_handle);
+	ion_free(audio->client, audio->output_buff_handle);
+	if (audio->input_buff_handle != NULL) {
+		ion_unmap_kernel(audio->client, audio->input_buff_handle);
+		ion_free(audio->client, audio->input_buff_handle);
 	}
+	ion_client_destroy(audio->client);
 	mutex_unlock(&audio->lock);
 #ifdef CONFIG_DEBUG_FS
 	if (audio->dentry)
@@ -1431,6 +1468,13 @@
 	struct audio *audio = NULL;
 	int rc, dec_attrb, decid, i;
 	struct audevrc_event *e_node = NULL;
+	unsigned mem_sz = DMASZ;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	struct ion_client *client = NULL;
+	int len = 0;
+
 #ifdef CONFIG_DEBUG_FS
 	/* 4 bytes represents decoder number, 1 byte for terminate string */
 	char name[sizeof "msm_evrc_" + 5];
@@ -1473,29 +1517,50 @@
 
 	audio->dec_id = decid & MSM_AUD_DECODER_MASK;
 
-	audio->phys = allocate_contiguous_ebi_nomap(DMASZ, SZ_4K);
-	if (!audio->phys) {
-		MM_ERR("could not allocate write buffers, freeing instance \
-				0x%08x\n", (int)audio);
+	client = msm_ion_client_create(UINT_MAX, "Audio_EVRC_Client");
+	if (IS_ERR_OR_NULL(client)) {
+		pr_err("Unable to create ION client\n");
 		rc = -ENOMEM;
-		audpp_adec_free(audio->dec_id);
-		kfree(audio);
-		goto done;
-	} else {
-		audio->map_v_write = ioremap(audio->phys, DMASZ);
-		if (IS_ERR(audio->map_v_write)) {
-			MM_ERR("could not map write buffers, freeing \
-					instance 0x%08x\n", (int)audio);
-			rc = -ENOMEM;
-			free_contiguous_memory_by_paddr(audio->phys);
-			audpp_adec_free(audio->dec_id);
-			kfree(audio);
-			goto done;
-		}
-		audio->data = audio->map_v_write;
-		MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
-				audio->phys, (int)audio->data);
+		goto client_create_error;
 	}
+	audio->client = client;
+
+	handle = ion_alloc(client, mem_sz, SZ_4K,
+		ION_HEAP(ION_AUDIO_HEAP_ID));
+	if (IS_ERR_OR_NULL(handle)) {
+		MM_ERR("Unable to create allocate O/P buffers\n");
+		rc = -ENOMEM;
+		goto output_buff_alloc_error;
+	}
+	audio->output_buff_handle = handle;
+
+	rc = ion_phys(client, handle, &addr, &len);
+	if (rc) {
+		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+		goto output_buff_get_phys_error;
+	} else {
+		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+	}
+	audio->phys = (int32_t)addr;
+
+
+	rc = ion_handle_get_flags(client, handle, &ionflag);
+	if (rc) {
+		MM_ERR("could not get flags for the handle\n");
+		goto output_buff_get_flags_error;
+	}
+
+	audio->map_v_write = ion_map_kernel(client, handle, ionflag);
+	if (IS_ERR(audio->map_v_write)) {
+		MM_ERR("could not map write buffers\n");
+		rc = -ENOMEM;
+		goto output_buff_map_error;
+	}
+	audio->data = audio->map_v_write;
+	MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
+		audio->phys, (int)audio->data);
 
 	if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) {
 		rc = audmgr_open(&audio->audmgr);
@@ -1527,6 +1592,8 @@
 		goto err;
 	}
 
+	audio->input_buff_handle = NULL;
+
 	/* Initialize all locks of audio instance */
 	mutex_init(&audio->lock);
 	mutex_init(&audio->write_lock);
@@ -1582,8 +1649,14 @@
 done:
 	return rc;
 err:
-	iounmap(audio->map_v_write);
-	free_contiguous_memory_by_paddr(audio->phys);
+	ion_unmap_kernel(client, audio->output_buff_handle);
+output_buff_map_error:
+output_buff_get_phys_error:
+output_buff_get_flags_error:
+	ion_free(client, audio->output_buff_handle);
+output_buff_alloc_error:
+	ion_client_destroy(client);
+client_create_error:
 	audpp_adec_free(audio->dec_id);
 	kfree(audio);
 	return rc;
diff --git a/arch/arm/mach-msm/qdsp5/audio_evrc_in.c b/arch/arm/mach-msm/qdsp5/audio_evrc_in.c
index e13b072..e955c4b 100644
--- a/arch/arm/mach-msm/qdsp5/audio_evrc_in.c
+++ b/arch/arm/mach-msm/qdsp5/audio_evrc_in.c
@@ -33,6 +33,7 @@
 
 
 #include <linux/memory_alloc.h>
+#include <linux/ion.h>
 
 #include <asm/atomic.h>
 #include <asm/ioctls.h>
@@ -150,6 +151,9 @@
 	int enabled;
 	int running;
 	int stopped; /* set when stopped, cleared on flush */
+	struct ion_client *client;
+	struct ion_handle *input_buff_handle;
+	struct ion_handle *output_buff_handle;
 };
 
 struct audio_frame {
@@ -1003,7 +1007,8 @@
 	spin_unlock_irqrestore(&audio->dsp_lock, flags);
 }
 
-static int audevrc_in_fsync(struct file *file,loff_t a, loff_t b, int datasync)
+static int audevrc_in_fsync(struct file *file, loff_t a, loff_t b,
+	int datasync)
 
 {
 	struct audio_evrc_in *audio = file->private_data;
@@ -1190,15 +1195,16 @@
 	audio->opened = 0;
 	if ((audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) && \
 	   (audio->out_data)) {
-		iounmap(audio->map_v_write);
-		free_contiguous_memory_by_paddr(audio->out_phys);
+		ion_unmap_kernel(audio->client, audio->input_buff_handle);
+		ion_free(audio->client, audio->input_buff_handle);
 		audio->out_data = NULL;
 	}
 	if (audio->data) {
-		iounmap(audio->map_v_read);
-		free_contiguous_memory_by_paddr(audio->phys);
+		ion_unmap_kernel(audio->client, audio->output_buff_handle);
+		ion_free(audio->client, audio->output_buff_handle);
 		audio->data = NULL;
 	}
+	ion_client_destroy(audio->client);
 	mutex_unlock(&audio->lock);
 	return 0;
 }
@@ -1211,6 +1217,11 @@
 	int rc;
 	int encid;
 	int dma_size = 0;
+	int len = 0;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	struct ion_client *client = NULL;
 
 	mutex_lock(&audio->lock);
 	if (audio->opened) {
@@ -1290,51 +1301,101 @@
 	audevrc_in_flush(audio);
 	audevrc_out_flush(audio);
 
-	audio->phys = allocate_contiguous_ebi_nomap(dma_size, SZ_4K);
-	if (!audio->phys) {
-		MM_ERR("could not allocate physical read buffers\n");
+	client = msm_ion_client_create(UINT_MAX, "Audio_EVRC_in_client");
+	if (IS_ERR_OR_NULL(client)) {
+		MM_ERR("Unable to create ION client\n");
 		rc = -ENOMEM;
-		goto evt_error;
-	} else {
-		audio->map_v_read = ioremap(audio->phys, dma_size);
-		if (IS_ERR(audio->map_v_read)) {
-			MM_ERR("could not map physical address\n");
-			rc = -ENOMEM;
-			free_contiguous_memory_by_paddr(audio->phys);
-			goto evt_error;
-		}
-		audio->data = audio->map_v_read;
-		MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n",
-				audio->phys, (int)audio->data);
+		goto client_create_error;
 	}
+	audio->client = client;
+
+	MM_DBG("allocating mem sz = %d\n", dma_size);
+	handle = ion_alloc(client, dma_size, SZ_4K,
+		ION_HEAP(ION_AUDIO_HEAP_ID));
+	if (IS_ERR_OR_NULL(handle)) {
+		MM_ERR("Unable to create allocate O/P buffers\n");
+		rc = -ENOMEM;
+		goto output_buff_alloc_error;
+	}
+
+	audio->output_buff_handle = handle;
+
+	rc = ion_phys(client , handle, &addr, &len);
+	if (rc) {
+		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+		rc = -ENOMEM;
+		goto output_buff_get_phys_error;
+	} else {
+		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+	}
+	audio->phys = (int32_t)addr;
+
+	rc = ion_handle_get_flags(client, handle, &ionflag);
+	if (rc) {
+		MM_ERR("could not get flags for the handle\n");
+		rc = -ENOMEM;
+		goto output_buff_get_flags_error;
+	}
+
+	audio->map_v_read = ion_map_kernel(client, handle, ionflag);
+	if (IS_ERR(audio->map_v_read)) {
+		MM_ERR("could not map read buffers,freeing instance 0x%08x\n",
+				(int)audio);
+		rc = -ENOMEM;
+		goto output_buff_map_error;
+	}
+	audio->data = audio->map_v_read;
+	MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n",
+		audio->phys, (int)audio->data);
+
 	audio->out_data = NULL;
 	if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) {
-		audio->out_phys = allocate_contiguous_ebi_nomap(BUFFER_SIZE,
-						SZ_4K);
-		if (!audio->out_phys) {
-			MM_ERR("could not allocate physical write buffers\n");
+		MM_DBG("allocating BUFFER_SIZE  %d\n", BUFFER_SIZE);
+		handle = ion_alloc(client, BUFFER_SIZE,
+				SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
+		if (IS_ERR_OR_NULL(handle)) {
+			MM_ERR("Unable to create allocate I/P buffers\n");
 			rc = -ENOMEM;
-			iounmap(audio->map_v_read);
-			free_contiguous_memory_by_paddr(audio->phys);
-			goto evt_error;
-		} else {
-			audio->map_v_write = ioremap(
-						audio->out_phys, BUFFER_SIZE);
-
-			if (IS_ERR(audio->map_v_write)) {
-				MM_ERR("could not map write phys address\n");
-				rc = -ENOMEM;
-				iounmap(audio->map_v_read);
-				free_contiguous_memory_by_paddr(audio->phys);
-				free_contiguous_memory_by_paddr(\
-							audio->out_phys);
-				goto evt_error;
-			}
-			audio->out_data = audio->map_v_write;
-			MM_DBG("wr buf: phy addr 0x%08x kernel addr 0x%08x\n",
-					audio->out_phys, (int)audio->out_data);
+			goto input_buff_alloc_error;
 		}
 
+		audio->input_buff_handle = handle;
+
+		rc = ion_phys(client , handle, &addr, &len);
+		if (rc) {
+			MM_ERR("I/P buffers:Invalid phy: %x sz: %x\n",
+				(unsigned int) addr, (unsigned int) len);
+			rc = -ENOMEM;
+			goto input_buff_alloc_error;
+		} else {
+			MM_INFO("Got valid phy: %x sz: %x\n",
+				(unsigned int) addr,
+				(unsigned int) len);
+		}
+		audio->out_phys = (int32_t)addr;
+
+		rc = ion_handle_get_flags(client,
+			handle, &ionflag);
+		if (rc) {
+			MM_ERR("could not get flags for the handle\n");
+			rc = -ENOMEM;
+			goto input_buff_alloc_error;
+		}
+
+		audio->map_v_write = ion_map_kernel(client,
+			handle, ionflag);
+		if (IS_ERR(audio->map_v_write)) {
+			MM_ERR("could not map write buffers\n");
+			rc = -ENOMEM;
+			goto input_buff_map_error;
+		}
+		audio->out_data = audio->map_v_write;
+		MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
+					(unsigned int)addr,
+					(unsigned int)audio->out_data);
+
 		/* Initialize buffer */
 		audio->out[0].data = audio->out_data + 0;
 		audio->out[0].addr = audio->out_phys + 0;
@@ -1356,7 +1417,17 @@
 done:
 	mutex_unlock(&audio->lock);
 	return rc;
-evt_error:
+input_buff_map_error:
+	ion_free(client, audio->input_buff_handle);
+input_buff_alloc_error:
+	ion_unmap_kernel(client, audio->output_buff_handle);
+output_buff_map_error:
+output_buff_get_phys_error:
+output_buff_get_flags_error:
+	ion_free(client, audio->output_buff_handle);
+output_buff_alloc_error:
+	ion_client_destroy(client);
+client_create_error:
 	msm_adsp_put(audio->audrec);
 	if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL)
 		msm_adsp_put(audio->audpre);
diff --git a/arch/arm/mach-msm/qdsp5/audio_lpa.c b/arch/arm/mach-msm/qdsp5/audio_lpa.c
index 8754337..14a9e57 100644
--- a/arch/arm/mach-msm/qdsp5/audio_lpa.c
+++ b/arch/arm/mach-msm/qdsp5/audio_lpa.c
@@ -37,8 +37,8 @@
 #include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/earlysuspend.h>
+#include <linux/ion.h>
 #include <linux/list.h>
-#include <linux/android_pmem.h>
 #include <linux/slab.h>
 #include <linux/msm_audio.h>
 
@@ -137,9 +137,9 @@
 	union msm_audio_event_payload payload;
 };
 
-struct audlpa_pmem_region {
+struct audlpa_ion_region {
 	struct list_head list;
-	struct file *file;
+	struct ion_handle *handle;
 	int fd;
 	void *vaddr;
 	unsigned long paddr;
@@ -217,9 +217,10 @@
 	struct mutex get_event_lock;
 	int event_abort;
 
-	struct list_head pmem_region_queue;
+	struct list_head ion_region_queue;
 	int buffer_count;
 	int buffer_size;
+	struct ion_client *client;
 };
 
 static int auddec_dsp_config(struct audio *audio, int enable);
@@ -227,7 +228,7 @@
 static void audio_dsp_event(void *private, unsigned id, uint16_t *msg);
 static void audpcm_post_event(struct audio *audio, int type,
 	union msm_audio_event_payload payload);
-static unsigned long audlpa_pmem_fixup(struct audio *audio, void *addr,
+static unsigned long audlpa_ion_fixup(struct audio *audio, void *addr,
 	unsigned long len, int ref_up);
 static void audpcm_async_send_data(struct audio *audio,
 	unsigned needed);
@@ -679,7 +680,7 @@
 
 	if (drv_evt && drv_evt->event_type == AUDIO_EVENT_WRITE_DONE) {
 		mutex_lock(&audio->lock);
-		audlpa_pmem_fixup(audio, drv_evt->payload.aio_buf.buf_addr,
+		audlpa_ion_fixup(audio, drv_evt->payload.aio_buf.buf_addr,
 				  drv_evt->payload.aio_buf.buf_len, 0);
 		mutex_unlock(&audio->lock);
 	}
@@ -689,94 +690,118 @@
 	return rc;
 }
 
-static int audlpa_pmem_check(struct audio *audio,
+static int audlpa_ion_check(struct audio *audio,
 		void *vaddr, unsigned long len)
 {
-	struct audlpa_pmem_region *region_elt;
-	struct audlpa_pmem_region t = { .vaddr = vaddr, .len = len };
+	struct audlpa_ion_region *region_elt;
+	struct audlpa_ion_region t = {.vaddr = vaddr, .len = len };
 
-	list_for_each_entry(region_elt, &audio->pmem_region_queue, list) {
+	list_for_each_entry(region_elt, &audio->ion_region_queue, list) {
 		if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) ||
 		    OVERLAPS(region_elt, &t)) {
-			MM_ERR("region (vaddr %p len %ld)"
+			MM_ERR("[%p]:region (vaddr %p len %ld)"
 				" clashes with registered region"
 				" (vaddr %p paddr %p len %ld)\n",
-				vaddr, len,
+				audio, vaddr, len,
 				region_elt->vaddr,
-				(void *)region_elt->paddr,
-				region_elt->len);
+				(void *)region_elt->paddr, region_elt->len);
 			return -EINVAL;
 		}
 	}
 
 	return 0;
 }
-
-static int audlpa_pmem_add(struct audio *audio,
-	struct msm_audio_pmem_info *info)
+static int audlpa_ion_add(struct audio *audio,
+			struct msm_audio_ion_info *info)
 {
-	unsigned long paddr, kvaddr, len;
-	struct file *file;
-	struct audlpa_pmem_region *region;
+	ion_phys_addr_t paddr;
+	size_t len;
+	unsigned long kvaddr;
+	struct audlpa_ion_region *region;
 	int rc = -EINVAL;
+	struct ion_handle *handle;
+	unsigned long ionflag;
 
-	MM_DBG("\n"); /* Macro prints the file name and function */
+	MM_ERR("\n"); /* Macro prints the file name and function */
 	region = kmalloc(sizeof(*region), GFP_KERNEL);
 
 	if (!region) {
 		rc = -ENOMEM;
 		goto end;
 	}
-
-	if (get_pmem_file(info->fd, &paddr, &kvaddr, &len, &file)) {
-		kfree(region);
-		goto end;
+	handle = ion_import_dma_buf(audio->client, info->fd);
+	if (IS_ERR_OR_NULL(handle)) {
+		pr_err("%s: could not get handle of the given fd\n", __func__);
+		goto import_error;
 	}
-
-	rc = audlpa_pmem_check(audio, info->vaddr, len);
+	rc = ion_handle_get_flags(audio->client, handle, &ionflag);
+	if (rc) {
+		pr_err("%s: could not get flags for the handle\n", __func__);
+		goto flag_error;
+	}
+	kvaddr = (unsigned long)ion_map_kernel(audio->client, handle, ionflag);
+	if (IS_ERR_OR_NULL((void *)kvaddr)) {
+		pr_err("%s: could not get virtual address\n", __func__);
+		goto map_error;
+	}
+	rc = ion_phys(audio->client, handle, &paddr, &len);
+	if (rc) {
+		pr_err("%s: could not get physical address\n", __func__);
+		goto ion_error;
+	}
+	rc = audlpa_ion_check(audio, info->vaddr, len);
 	if (rc < 0) {
-		put_pmem_file(file);
-		kfree(region);
-		goto end;
+		MM_ERR("audpcm_ion_check failed\n");
+		goto ion_error;
 	}
-
+	region->handle = handle;
 	region->vaddr = info->vaddr;
 	region->fd = info->fd;
 	region->paddr = paddr;
 	region->kvaddr = kvaddr;
 	region->len = len;
-	region->file = file;
 	region->ref_cnt = 0;
-	MM_DBG("add region paddr %lx vaddr %p, len %lu\n", region->paddr,
-			region->vaddr, region->len);
-	list_add_tail(&region->list, &audio->pmem_region_queue);
+	MM_DBG("[%p]:add region paddr %lx vaddr %p, len %lu kvaddr %lx\n",
+		audio, region->paddr, region->vaddr,
+		region->len, region->kvaddr);
+	list_add_tail(&region->list, &audio->ion_region_queue);
+
+	return rc;
+
+ion_error:
+	ion_unmap_kernel(audio->client, handle);
+map_error:
+flag_error:
+	ion_free(audio->client, handle);
+import_error:
+	kfree(region);
 end:
 	return rc;
 }
 
-static int audlpa_pmem_remove(struct audio *audio,
-	struct msm_audio_pmem_info *info)
+static int audlpa_ion_remove(struct audio *audio,
+			struct msm_audio_ion_info *info)
 {
-	struct audlpa_pmem_region *region;
+	struct audlpa_ion_region *region;
 	struct list_head *ptr, *next;
 	int rc = -EINVAL;
 
-	MM_DBG("info fd %d vaddr %p\n", info->fd, info->vaddr);
+	list_for_each_safe(ptr, next, &audio->ion_region_queue) {
+		region = list_entry(ptr, struct audlpa_ion_region, list);
 
-	list_for_each_safe(ptr, next, &audio->pmem_region_queue) {
-		region = list_entry(ptr, struct audlpa_pmem_region, list);
-
-		if ((region->fd == info->fd) &&
+		if (region != NULL && (region->fd == info->fd) &&
 		    (region->vaddr == info->vaddr)) {
 			if (region->ref_cnt) {
-				MM_DBG("region %p in use ref_cnt %d\n",
-						region, region->ref_cnt);
+				MM_DBG("%s[%p]:region %p in use ref_cnt %d\n",
+					__func__, audio, region,
+					region->ref_cnt);
 				break;
 			}
 			MM_DBG("remove region fd %d vaddr %p\n",
 				info->fd, info->vaddr);
 			list_del(&region->list);
-			put_pmem_file(region->file);
+			ion_unmap_kernel(audio->client, region->handle);
+			ion_free(audio->client, region->handle);
 			kfree(region);
 			rc = 0;
 			break;
@@ -786,23 +811,20 @@
 	return rc;
 }
 
-static int audlpa_pmem_lookup_vaddr(struct audio *audio, void *addr,
-		     unsigned long len, struct audlpa_pmem_region **region)
+static int audlpa_ion_lookup_vaddr(struct audio *audio, void *addr,
+			unsigned long len, struct audlpa_ion_region **region)
 {
-	struct audlpa_pmem_region *region_elt;
-
+	struct audlpa_ion_region *region_elt;
 	int match_count = 0;
-
 	*region = NULL;
 
 	/* returns physical address or zero */
-	list_for_each_entry(region_elt, &audio->pmem_region_queue,
-		list) {
+	list_for_each_entry(region_elt, &audio->ion_region_queue, list) {
 		if (addr >= region_elt->vaddr &&
 		    addr < region_elt->vaddr + region_elt->len &&
 		    addr + len <= region_elt->vaddr + region_elt->len) {
 			/* offset since we could pass vaddr inside a registerd
-			 * pmem buffer
+			 * ion buffer
 			 */
 
 			match_count++;
@@ -812,31 +834,33 @@
 	}
 
 	if (match_count > 1) {
-		MM_ERR("multiple hits for vaddr %p, len %ld\n", addr, len);
-		list_for_each_entry(region_elt,
-		  &audio->pmem_region_queue, list) {
+		MM_ERR("%s[%p]:multiple hits for vaddr %p, len %ld\n",
+			 __func__, audio, addr, len);
+		list_for_each_entry(region_elt, &audio->ion_region_queue,
+					list) {
 			if (addr >= region_elt->vaddr &&
-			    addr < region_elt->vaddr + region_elt->len &&
-			    addr + len <= region_elt->vaddr + region_elt->len)
-				MM_ERR("\t%p, %ld --> %p\n", region_elt->vaddr,
+			addr < region_elt->vaddr + region_elt->len &&
+			addr + len <= region_elt->vaddr + region_elt->len)
+					MM_ERR("\t%s[%p]:%p, %ld --> %p\n",
+						__func__, audio,
+						region_elt->vaddr,
 						region_elt->len,
 						(void *)region_elt->paddr);
 		}
 	}
-
 	return *region ? 0 : -1;
 }
-
-unsigned long audlpa_pmem_fixup(struct audio *audio, void *addr,
+static unsigned long audlpa_ion_fixup(struct audio *audio, void *addr,
 		    unsigned long len, int ref_up)
 {
-	struct audlpa_pmem_region *region;
+	struct audlpa_ion_region *region;
 	unsigned long paddr;
 	int ret;
 
-	ret = audlpa_pmem_lookup_vaddr(audio, addr, len, &region);
+	ret = audlpa_ion_lookup_vaddr(audio, addr, len, &region);
 	if (ret) {
-		MM_ERR("lookup (%p, %ld) failed\n", addr, len);
+		MM_ERR("%s[%p]:lookup (%p, %ld) failed\n",
+			__func__, audio, addr, len);
 		return 0;
 	}
 	if (ref_up)
@@ -870,10 +894,9 @@
 			buf_node->buf.buf_addr, buf_node->buf.buf_len,
 			buf_node->buf.data_len);
 
-	buf_node->paddr = audlpa_pmem_fixup(
+	buf_node->paddr = audlpa_ion_fixup(
 		audio, buf_node->buf.buf_addr,
 		buf_node->buf.buf_len, 1);
-
 	if (dir) {
 		/* write */
 		if (!buf_node->paddr ||
@@ -1042,23 +1065,23 @@
 		rc = audpp_pause(audio->dec_id, (int) arg);
 		break;
 
-	case AUDIO_REGISTER_PMEM: {
-			struct msm_audio_pmem_info info;
-			MM_DBG("AUDIO_REGISTER_PMEM\n");
+	case AUDIO_REGISTER_ION: {
+		struct msm_audio_ion_info info;
+		MM_ERR("AUDIO_REGISTER_ION\n");
 			if (copy_from_user(&info, (void *) arg, sizeof(info)))
 				rc = -EFAULT;
 			else
-				rc = audlpa_pmem_add(audio, &info);
+				rc = audlpa_ion_add(audio, &info);
 			break;
 		}
 
-	case AUDIO_DEREGISTER_PMEM: {
-			struct msm_audio_pmem_info info;
-			MM_DBG("AUDIO_DEREGISTER_PMEM\n");
+	case AUDIO_DEREGISTER_ION: {
+		struct msm_audio_ion_info info;
+		MM_ERR("AUDIO_DEREGISTER_ION\n");
 			if (copy_from_user(&info, (void *) arg, sizeof(info)))
 				rc = -EFAULT;
 			else
-				rc = audlpa_pmem_remove(audio, &info);
+				rc = audlpa_ion_remove(audio, &info);
 			break;
 		}
 
@@ -1175,7 +1198,8 @@
 	return rc;
 }
 
-int audlpa_fsync(struct file *file, loff_t a, loff_t b, int datasync)
+int audlpa_fsync(struct file *file, loff_t a, loff_t b,
+	int datasync)
 {
 	struct audio *audio = file->private_data;
 
@@ -1185,21 +1209,21 @@
 	return audlpa_async_fsync(audio);
 }
 
-static void audpcm_reset_pmem_region(struct audio *audio)
+static void audpcm_reset_ion_region(struct audio *audio)
 {
-	struct audlpa_pmem_region *region;
+	struct audlpa_ion_region *region;
 	struct list_head *ptr, *next;
 
-	list_for_each_safe(ptr, next, &audio->pmem_region_queue) {
-		region = list_entry(ptr, struct audlpa_pmem_region, list);
+	list_for_each_safe(ptr, next, &audio->ion_region_queue) {
+		region = list_entry(ptr, struct audlpa_ion_region, list);
 		list_del(&region->list);
-		put_pmem_file(region->file);
+		ion_unmap_kernel(audio->client, region->handle);
+		ion_free(audio->client, region->handle);
 		kfree(region);
 	}
 
 	return;
 }
-
 static int audio_release(struct inode *inode, struct file *file)
 {
 	struct audio *audio = file->private_data;
@@ -1210,7 +1234,7 @@
 	if (audio->rmt_resource_released == 0)
 		rmt_put_resource(audio);
 	audpcm_async_flush(audio);
-	audpcm_reset_pmem_region(audio);
+	audpcm_reset_ion_region(audio);
 
 	msm_adsp_put(audio->audplay);
 	audpp_adec_free(audio->dec_id);
@@ -1221,16 +1245,12 @@
 	audio->event_abort = 1;
 	wake_up(&audio->event_wait);
 	audpcm_reset_event_queue(audio);
-	MM_DBG("pmem area = 0x%8x\n", (unsigned int)audio->data);
-	if (audio->data) {
-		iounmap(audio->map_v_write);
-		free_contiguous_memory_by_paddr(audio->phys);
-	}
 	mutex_unlock(&audio->lock);
 #ifdef CONFIG_DEBUG_FS
 	if (audio->dentry)
 		debugfs_remove(audio->dentry);
 #endif
+	ion_client_destroy(audio->client);
 	kfree(audio);
 	return 0;
 }
@@ -1416,7 +1436,7 @@
 	spin_lock_init(&audio->dsp_lock);
 	init_waitqueue_head(&audio->write_wait);
 	INIT_LIST_HEAD(&audio->out_queue);
-	INIT_LIST_HEAD(&audio->pmem_region_queue);
+	INIT_LIST_HEAD(&audio->ion_region_queue);
 	INIT_LIST_HEAD(&audio->free_event_queue);
 	INIT_LIST_HEAD(&audio->event_queue);
 	init_waitqueue_head(&audio->wait);
@@ -1456,6 +1476,14 @@
 			break;
 		}
 	}
+
+	audio->client = msm_ion_client_create(UINT_MAX, "Audio_LPA_Client");
+	if (IS_ERR_OR_NULL(audio->client)) {
+		pr_err("Unable to create ION client\n");
+		goto err;
+	}
+	MM_DBG("Ion client created\n");
+
 done:
 	return rc;
 err:
diff --git a/arch/arm/mach-msm/qdsp5/audio_mp3.c b/arch/arm/mach-msm/qdsp5/audio_mp3.c
index 9346107..427dc8f 100644
--- a/arch/arm/mach-msm/qdsp5/audio_mp3.c
+++ b/arch/arm/mach-msm/qdsp5/audio_mp3.c
@@ -31,10 +31,10 @@
 #include <linux/delay.h>
 #include <linux/earlysuspend.h>
 #include <linux/list.h>
-#include <linux/android_pmem.h>
 #include <linux/slab.h>
 #include <linux/msm_audio.h>
 #include <linux/memory_alloc.h>
+#include <linux/ion.h>
 
 #include <mach/msm_adsp.h>
 #include <mach/iommu.h>
@@ -136,9 +136,9 @@
 	union msm_audio_event_payload payload;
 };
 
-struct audmp3_pmem_region {
+struct audmp3_ion_region {
 	struct list_head list;
-	struct file *file;
+	struct ion_handle *handle;
 	int fd;
 	void *vaddr;
 	unsigned long paddr;
@@ -241,13 +241,16 @@
 	struct mutex get_event_lock;
 	int event_abort;
 
-	struct list_head pmem_region_queue; /* protected by lock */
+	struct list_head ion_region_queue; /* protected by lock */
 	struct audmp3_drv_operations drv_ops;
 
 	int eq_enable;
 	int eq_needs_commit;
 	audpp_cmd_cfg_object_params_eqalizer eq;
 	audpp_cmd_cfg_object_params_volume vol_pan;
+	struct ion_client *client;
+	struct ion_handle *input_buff_handle;
+	struct ion_handle *output_buff_handle;
 };
 
 static int auddec_dsp_config(struct audio *audio, int enable);
@@ -259,7 +262,7 @@
 static void audio_dsp_event(void *private, unsigned id, uint16_t *msg);
 static void audmp3_post_event(struct audio *audio, int type,
 	union msm_audio_event_payload payload);
-static unsigned long audmp3_pmem_fixup(struct audio *audio, void *addr,
+static unsigned long audmp3_ion_fixup(struct audio *audio, void *addr,
 				unsigned long len, int ref_up);
 
 static int rmt_put_resource(struct audio *audio)
@@ -1021,7 +1024,7 @@
 	if (drv_evt->event_type == AUDIO_EVENT_WRITE_DONE ||
 	    drv_evt->event_type == AUDIO_EVENT_READ_DONE) {
 		mutex_lock(&audio->lock);
-		audmp3_pmem_fixup(audio, drv_evt->payload.aio_buf.buf_addr,
+		audmp3_ion_fixup(audio, drv_evt->payload.aio_buf.buf_addr,
 				  drv_evt->payload.aio_buf.buf_len, 0);
 		mutex_unlock(&audio->lock);
 	}
@@ -1036,13 +1039,13 @@
 	return rc;
 }
 
-static int audmp3_pmem_check(struct audio *audio,
+static int audmp3_ion_check(struct audio *audio,
 		void *vaddr, unsigned long len)
 {
-	struct audmp3_pmem_region *region_elt;
-	struct audmp3_pmem_region t = { .vaddr = vaddr, .len = len };
+	struct audmp3_ion_region *region_elt;
+	struct audmp3_ion_region t = { .vaddr = vaddr, .len = len };
 
-	list_for_each_entry(region_elt, &audio->pmem_region_queue, list) {
+	list_for_each_entry(region_elt, &audio->ion_region_queue, list) {
 		if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) ||
 		    OVERLAPS(region_elt, &t)) {
 			MM_ERR("region (vaddr %p len %ld)"
@@ -1059,13 +1062,17 @@
 	return 0;
 }
 
-static int audmp3_pmem_add(struct audio *audio,
-	struct msm_audio_pmem_info *info)
+static int audmp3_ion_add(struct audio *audio,
+			struct msm_audio_ion_info *info)
 {
-	unsigned long paddr, kvaddr, len;
-	struct file *file;
-	struct audmp3_pmem_region *region;
+	ion_phys_addr_t paddr;
+	size_t len;
+	unsigned long kvaddr;
+	struct audmp3_ion_region *region;
 	int rc = -EINVAL;
+	struct ion_handle *handle;
+	unsigned long ionflag;
+	void *temp_ptr;
 
 	MM_DBG("\n"); /* Macro prints the file name and function */
 	region = kmalloc(sizeof(*region), GFP_KERNEL);
@@ -1075,55 +1082,84 @@
 		goto end;
 	}
 
-	if (get_pmem_file(info->fd, &paddr, &kvaddr, &len, &file)) {
-		kfree(region);
-		goto end;
+	handle = ion_import_dma_buf(audio->client, info->fd);
+	if (IS_ERR_OR_NULL(handle)) {
+		pr_err("%s: could not get handle of the given fd\n", __func__);
+		goto import_error;
 	}
 
-	rc = audmp3_pmem_check(audio, info->vaddr, len);
+	rc = ion_handle_get_flags(audio->client, handle, &ionflag);
+	if (rc) {
+		pr_err("%s: could not get flags for the handle\n", __func__);
+		goto flag_error;
+	}
+
+	temp_ptr = ion_map_kernel(audio->client, handle, ionflag);
+	if (IS_ERR_OR_NULL(temp_ptr)) {
+		pr_err("%s: could not get virtual address\n", __func__);
+		goto map_error;
+	}
+	kvaddr = (unsigned long) temp_ptr;
+
+	rc = ion_phys(audio->client, handle, &paddr, &len);
+	if (rc) {
+		pr_err("%s: could not get physical address\n", __func__);
+		goto ion_error;
+	}
+
+	rc = audmp3_ion_check(audio, info->vaddr, len);
 	if (rc < 0) {
-		put_pmem_file(file);
-		kfree(region);
-		goto end;
+		MM_ERR("audpcm_ion_check failed\n");
+		goto ion_error;
 	}
 
+	region->handle = handle;
 	region->vaddr = info->vaddr;
 	region->fd = info->fd;
 	region->paddr = paddr;
 	region->kvaddr = kvaddr;
 	region->len = len;
-	region->file = file;
 	region->ref_cnt = 0;
-	MM_DBG("add region paddr %lx vaddr %p, len %lu\n", region->paddr,
-			region->vaddr, region->len);
-	list_add_tail(&region->list, &audio->pmem_region_queue);
+	MM_DBG("[%p]:add region paddr %lx vaddr %p, len %lu kvaddr %lx\n",
+		audio, region->paddr, region->vaddr,
+		region->len, region->kvaddr);
+	list_add_tail(&region->list, &audio->ion_region_queue);
+	return rc;
+
+ion_error:
+	ion_unmap_kernel(audio->client, handle);
+map_error:
+flag_error:
+	ion_free(audio->client, handle);
+import_error:
+	kfree(region);
 end:
 	return rc;
 }
 
-static int audmp3_pmem_remove(struct audio *audio,
-	struct msm_audio_pmem_info *info)
+static int audmp3_ion_remove(struct audio *audio,
+			struct msm_audio_ion_info *info)
 {
-	struct audmp3_pmem_region *region;
+	struct audmp3_ion_region *region;
 	struct list_head *ptr, *next;
 	int rc = -EINVAL;
 
-	MM_DBG("info fd %d vaddr %p\n", info->fd, info->vaddr);
+	list_for_each_safe(ptr, next, &audio->ion_region_queue) {
+		region = list_entry(ptr, struct audmp3_ion_region, list);
 
-	list_for_each_safe(ptr, next, &audio->pmem_region_queue) {
-		region = list_entry(ptr, struct audmp3_pmem_region, list);
-
-		if ((region->fd == info->fd) &&
+		if (region != NULL && (region->fd == info->fd) &&
 		    (region->vaddr == info->vaddr)) {
 			if (region->ref_cnt) {
-				MM_DBG("region %p in use ref_cnt %d\n",
-						region, region->ref_cnt);
+				MM_DBG("%s[%p]:region %p in use ref_cnt %d\n",
+					__func__, audio, region,
+					region->ref_cnt);
 				break;
 			}
-			MM_DBG("remove region fd %d vaddr %p \n",
-					info->fd, info->vaddr);
+			MM_DBG("remove region fd %d vaddr %p\n",
+				info->fd, info->vaddr);
 			list_del(&region->list);
-			put_pmem_file(region->file);
+			ion_unmap_kernel(audio->client, region->handle);
+			ion_free(audio->client, region->handle);
 			kfree(region);
 			rc = 0;
 			break;
@@ -1133,23 +1169,20 @@
 	return rc;
 }
 
-static int audmp3_pmem_lookup_vaddr(struct audio *audio, void *addr,
-		     unsigned long len, struct audmp3_pmem_region **region)
+static int audmp3_ion_lookup_vaddr(struct audio *audio, void *addr,
+			unsigned long len, struct audmp3_ion_region **region)
 {
-	struct audmp3_pmem_region *region_elt;
-
+	struct audmp3_ion_region *region_elt;
 	int match_count = 0;
-
 	*region = NULL;
 
 	/* returns physical address or zero */
-	list_for_each_entry(region_elt, &audio->pmem_region_queue,
-		list) {
+	list_for_each_entry(region_elt, &audio->ion_region_queue, list) {
 		if (addr >= region_elt->vaddr &&
 		    addr < region_elt->vaddr + region_elt->len &&
 		    addr + len <= region_elt->vaddr + region_elt->len) {
 			/* offset since we could pass vaddr inside a registerd
-			 * pmem buffer
+			 * ion buffer
 			 */
 
 			match_count++;
@@ -1159,29 +1192,31 @@
 	}
 
 	if (match_count > 1) {
-		MM_ERR("multiple hits for vaddr %p, len %ld\n", addr, len);
-		list_for_each_entry(region_elt,
-		  &audio->pmem_region_queue, list) {
+		MM_ERR("%s[%p]:multiple hits for vaddr %p, len %ld\n",
+			 __func__, audio, addr, len);
+		list_for_each_entry(region_elt, &audio->ion_region_queue,
+					list) {
 			if (addr >= region_elt->vaddr &&
-			    addr < region_elt->vaddr + region_elt->len &&
-			    addr + len <= region_elt->vaddr + region_elt->len)
-				MM_ERR("\t%p, %ld --> %p\n", region_elt->vaddr,
+			addr < region_elt->vaddr + region_elt->len &&
+			addr + len <= region_elt->vaddr + region_elt->len)
+					MM_ERR("\t%s[%p]:%p, %ld --> %p\n",
+						__func__, audio,
+						region_elt->vaddr,
 						region_elt->len,
 						(void *)region_elt->paddr);
 		}
 	}
-
 	return *region ? 0 : -1;
 }
 
-unsigned long audmp3_pmem_fixup(struct audio *audio, void *addr,
+unsigned long audmp3_ion_fixup(struct audio *audio, void *addr,
 		    unsigned long len, int ref_up)
 {
-	struct audmp3_pmem_region *region;
+	struct audmp3_ion_region *region;
 	unsigned long paddr;
 	int ret;
 
-	ret = audmp3_pmem_lookup_vaddr(audio, addr, len, &region);
+	ret = audmp3_ion_lookup_vaddr(audio, addr, len, &region);
 	if (ret) {
 		MM_ERR("lookup (%p, %ld) failed\n", addr, len);
 		return 0;
@@ -1217,7 +1252,7 @@
 			buf_node->buf.buf_addr, buf_node->buf.buf_len,
 			buf_node->buf.data_len);
 
-	buf_node->paddr = audmp3_pmem_fixup(
+	buf_node->paddr = audmp3_ion_fixup(
 		audio, buf_node->buf.buf_addr,
 		buf_node->buf.buf_len, 1);
 
@@ -1276,6 +1311,10 @@
 	uint16_t enable_mask;
 	int enable;
 	int prev_state;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	int len = 0;
 
 	MM_DBG("cmd = %d\n", cmd);
 
@@ -1481,25 +1520,50 @@
 				MM_DBG("allocate PCM buffer %d\n",
 					config.buffer_count *
 					config.buffer_size);
-				audio->read_phys =
-						allocate_contiguous_ebi_nomap(
-							config.buffer_size *
-							config.buffer_count,
-							SZ_4K);
-				if (!audio->read_phys) {
+
+				handle = ion_alloc(audio->client,
+					(config.buffer_size *
+					config.buffer_count),
+					SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
+				if (IS_ERR_OR_NULL(handle)) {
+					MM_ERR("Unable to alloc I/P buffs\n");
 					rc = -ENOMEM;
 					break;
 				}
-				audio->map_v_read = ioremap(
-							audio->read_phys,
-							config.buffer_size *
-							config.buffer_count);
+
+				audio->input_buff_handle = handle;
+
+				rc = ion_phys(audio->client ,
+					handle, &addr, &len);
+				if (rc) {
+					MM_ERR("Invalid phy: %x sz: %x\n",
+						(unsigned int) addr,
+						(unsigned int) len);
+					rc = -ENOMEM;
+					break;
+				} else {
+					MM_INFO("Got valid phy: %x sz: %x\n",
+						(unsigned int) audio->read_phys,
+						(unsigned int) len);
+				}
+				audio->read_phys = (int32_t)addr;
+
+				rc = ion_handle_get_flags(audio->client,
+					handle, &ionflag);
+				if (rc) {
+					MM_ERR("could not get flags\n");
+					rc = -ENOMEM;
+					break;
+				}
+
+				audio->map_v_read = ion_map_kernel(
+					audio->client,
+					handle, ionflag);
 
 				if (IS_ERR(audio->map_v_read)) {
 					MM_ERR("map of read buf failed\n");
 					rc = -ENOMEM;
-					free_contiguous_memory_by_paddr(
-							audio->read_phys);
+					ion_free(audio->client, handle);
 				} else {
 					uint8_t index;
 					uint32_t offset = 0;
@@ -1539,23 +1603,23 @@
 		rc = audpp_pause(audio->dec_id, (int) arg);
 		break;
 
-	case AUDIO_REGISTER_PMEM: {
-			struct msm_audio_pmem_info info;
-			MM_DBG("AUDIO_REGISTER_PMEM\n");
+	case AUDIO_REGISTER_ION: {
+			struct msm_audio_ion_info info;
+			MM_DBG("AUDIO_REGISTER_ION\n");
 			if (copy_from_user(&info, (void *) arg, sizeof(info)))
 				rc = -EFAULT;
 			else
-				rc = audmp3_pmem_add(audio, &info);
+				rc = audmp3_ion_add(audio, &info);
 			break;
 		}
 
-	case AUDIO_DEREGISTER_PMEM: {
-			struct msm_audio_pmem_info info;
-			MM_DBG("AUDIO_DEREGISTER_PMEM\n");
+	case AUDIO_DEREGISTER_ION: {
+			struct msm_audio_ion_info info;
+			MM_DBG("AUDIO_DEREGISTER_ION\n");
 			if (copy_from_user(&info, (void *) arg, sizeof(info)))
 				rc = -EFAULT;
 			else
-				rc = audmp3_pmem_remove(audio, &info);
+				rc = audmp3_ion_remove(audio, &info);
 			break;
 		}
 	case AUDIO_ASYNC_WRITE:
@@ -1939,16 +2003,16 @@
 	}
 	return rc;
 }
-
-static void audmp3_reset_pmem_region(struct audio *audio)
+static void audmp3_reset_ion_region(struct audio *audio)
 {
-	struct audmp3_pmem_region *region;
+	struct audmp3_ion_region *region;
 	struct list_head *ptr, *next;
 
-	list_for_each_safe(ptr, next, &audio->pmem_region_queue) {
-		region = list_entry(ptr, struct audmp3_pmem_region, list);
+	list_for_each_safe(ptr, next, &audio->ion_region_queue) {
+		region = list_entry(ptr, struct audmp3_ion_region, list);
 		list_del(&region->list);
-		put_pmem_file(region->file);
+		ion_unmap_kernel(audio->client, region->handle);
+		ion_free(audio->client, region->handle);
 		kfree(region);
 	}
 
@@ -1966,7 +2030,7 @@
 		rmt_put_resource(audio);
 	audio->drv_ops.out_flush(audio);
 	audio->drv_ops.in_flush(audio);
-	audmp3_reset_pmem_region(audio);
+	audmp3_reset_ion_region(audio);
 
 	msm_adsp_put(audio->audplay);
 	audpp_adec_free(audio->dec_id);
@@ -1977,20 +2041,18 @@
 	audio->event_abort = 1;
 	wake_up(&audio->event_wait);
 	audmp3_reset_event_queue(audio);
-	MM_DBG("pmem area = 0x%8x\n", (unsigned int)audio->data);
-	if (audio->data) {
-		iounmap(audio->map_v_write);
-		free_contiguous_memory_by_paddr(audio->phys);
-	}
-	if (audio->read_data) {
-		iounmap(audio->map_v_read);
-		free_contiguous_memory_by_paddr(audio->read_phys);
-	}
 	mutex_unlock(&audio->lock);
 #ifdef CONFIG_DEBUG_FS
 	if (audio->dentry)
 		debugfs_remove(audio->dentry);
 #endif
+	if (!(audio->drv_status & ADRV_STATUS_AIO_INTF)) {
+		ion_unmap_kernel(audio->client, audio->output_buff_handle);
+		ion_free(audio->client, audio->output_buff_handle);
+		ion_unmap_kernel(audio->client, audio->input_buff_handle);
+		ion_free(audio->client, audio->input_buff_handle);
+	}
+	ion_client_destroy(audio->client);
 	kfree(audio);
 	return 0;
 }
@@ -2129,7 +2191,13 @@
 	struct audio *audio = NULL;
 	int rc, i, dec_attrb, decid;
 	struct audmp3_event *e_node = NULL;
-	unsigned pmem_sz = DMASZ_MAX;
+	unsigned mem_sz = DMASZ_MAX;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	struct ion_client *client = NULL;
+	int len = 0;
+
 #ifdef CONFIG_DEBUG_FS
 	/* 4 bytes represents decoder number, 1 byte for terminate string */
 	char name[sizeof "msm_mp3_" + 5];
@@ -2171,43 +2239,57 @@
 	}
 	audio->dec_id = decid & MSM_AUD_DECODER_MASK;
 
+	client = msm_ion_client_create(UINT_MAX, "Audio_MP3_Client");
+	if (IS_ERR_OR_NULL(client)) {
+		pr_err("Unable to create ION client\n");
+		rc = -ENOMEM;
+		goto client_create_error;
+	}
+	audio->client = client;
+
 	/* Non AIO interface */
 	if (!(file->f_flags & O_NONBLOCK)) {
-		while (pmem_sz >= DMASZ_MIN) {
-			MM_DBG("pmemsz = %d\n", pmem_sz);
-			audio->phys = allocate_contiguous_ebi_nomap(pmem_sz,
-								SZ_4K);
-			if (audio->phys) {
-				audio->map_v_write = ioremap(
-							audio->phys, pmem_sz);
-				if (IS_ERR(audio->map_v_write)) {
-					MM_ERR("could not map write \
-						buffers, freeing instance \
-						0x%08x\n", (int)audio);
-					rc = -ENOMEM;
-					free_contiguous_memory_by_paddr(
-								audio->phys);
-					audpp_adec_free(audio->dec_id);
-					kfree(audio);
-					goto done;
-				}
-				audio->data = audio->map_v_write;
-				MM_DBG("write buf: phy addr 0x%08x kernel addr\
-					0x%08x\n", audio->phys,\
-					(int)audio->data);
-				break;
-			} else if (pmem_sz == DMASZ_MIN) {
-				MM_ERR("could not allocate write buffers, \
-						freeing instance 0x%08x\n",
-						(int)audio);
-				rc = -ENOMEM;
-				audpp_adec_free(audio->dec_id);
-				kfree(audio);
-				goto done;
-			} else
-				pmem_sz >>= 1;
+
+		MM_DBG("memsz = %d\n", mem_sz);
+
+		handle = ion_alloc(client, mem_sz, SZ_4K,
+			ION_HEAP(ION_AUDIO_HEAP_ID));
+		if (IS_ERR_OR_NULL(handle)) {
+			MM_ERR("Unable to create allocate O/P buffers\n");
+			rc = -ENOMEM;
+			goto output_buff_alloc_error;
 		}
-		audio->out_dma_sz = pmem_sz;
+		audio->output_buff_handle = handle;
+
+		rc = ion_phys(client , handle, &addr, &len);
+		if (rc) {
+			MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
+				(unsigned int) addr, (unsigned int) len);
+			goto output_buff_get_phys_error;
+		} else {
+			MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
+				(unsigned int) addr, (unsigned int) len);
+		}
+		audio->phys = (int32_t)addr;
+
+
+		rc = ion_handle_get_flags(client, handle, &ionflag);
+		if (rc) {
+			MM_ERR("could not get flags for the handle\n");
+			goto output_buff_get_flags_error;
+		}
+
+		audio->map_v_write = ion_map_kernel(client, handle, ionflag);
+		if (IS_ERR(audio->map_v_write)) {
+			MM_ERR("could not map write buffers\n");
+			rc = -ENOMEM;
+			goto output_buff_map_error;
+		}
+		audio->data = audio->map_v_write;
+		MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
+			audio->phys, (int)audio->data);
+
+		audio->out_dma_sz = mem_sz;
 	}
 
 	if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) {
@@ -2276,7 +2358,7 @@
 	init_waitqueue_head(&audio->read_wait);
 	INIT_LIST_HEAD(&audio->out_queue);
 	INIT_LIST_HEAD(&audio->in_queue);
-	INIT_LIST_HEAD(&audio->pmem_region_queue);
+	INIT_LIST_HEAD(&audio->ion_region_queue);
 	INIT_LIST_HEAD(&audio->free_event_queue);
 	INIT_LIST_HEAD(&audio->event_queue);
 	init_waitqueue_head(&audio->wait);
@@ -2315,13 +2397,18 @@
 			break;
 		}
 	}
+
 done:
 	return rc;
 err:
-	if (audio->data) {
-		iounmap(audio->map_v_write);
-		free_contiguous_memory_by_paddr(audio->phys);
-	}
+	ion_unmap_kernel(client, audio->output_buff_handle);
+output_buff_map_error:
+output_buff_get_flags_error:
+output_buff_get_phys_error:
+	ion_free(client, audio->output_buff_handle);
+output_buff_alloc_error:
+	ion_client_destroy(client);
+client_create_error:
 	audpp_adec_free(audio->dec_id);
 	kfree(audio);
 	return rc;
diff --git a/arch/arm/mach-msm/qdsp5/audio_pcm.c b/arch/arm/mach-msm/qdsp5/audio_pcm.c
index 49c781f..880de09 100644
--- a/arch/arm/mach-msm/qdsp5/audio_pcm.c
+++ b/arch/arm/mach-msm/qdsp5/audio_pcm.c
@@ -38,7 +38,7 @@
 #include <linux/delay.h>
 #include <linux/earlysuspend.h>
 #include <linux/list.h>
-#include <linux/android_pmem.h>
+#include <linux/ion.h>
 #include <linux/slab.h>
 #include <linux/msm_audio.h>
 
@@ -134,11 +134,10 @@
 	union msm_audio_event_payload payload;
 };
 
-struct audpcm_pmem_region {
+struct audpcm_ion_region {
 	struct list_head list;
-	struct file *file;
+	struct ion_handle *handle;
 	int fd;
-	void *vaddr_ref;
 	void *vaddr;
 	unsigned long paddr;
 	unsigned long kvaddr;
@@ -222,8 +221,10 @@
 	struct mutex get_event_lock;
 	int event_abort;
 
-	struct list_head pmem_region_queue;
+	struct list_head ion_region_queue;
 	struct audpcm_drv_operations drv_ops;
+	struct ion_client *client;
+	struct ion_handle *output_buff_handle;
 };
 
 static int auddec_dsp_config(struct audio *audio, int enable);
@@ -232,7 +233,7 @@
 static void audio_dsp_event(void *private, unsigned id, uint16_t *msg);
 static void audpcm_post_event(struct audio *audio, int type,
 	union msm_audio_event_payload payload);
-static unsigned long audpcm_pmem_fixup(struct audio *audio, void *addr,
+static unsigned long audpcm_ion_fixup(struct audio *audio, void *addr,
 	unsigned long len, int ref_up);
 
 static int rmt_put_resource(struct audio *audio)
@@ -762,7 +763,7 @@
 
 	if (drv_evt && drv_evt->event_type == AUDIO_EVENT_WRITE_DONE) {
 		mutex_lock(&audio->lock);
-		audpcm_pmem_fixup(audio, drv_evt->payload.aio_buf.buf_addr,
+		audpcm_ion_fixup(audio, drv_evt->payload.aio_buf.buf_addr,
 				  drv_evt->payload.aio_buf.buf_len, 0);
 		mutex_unlock(&audio->lock);
 	}
@@ -772,104 +773,118 @@
 	return rc;
 }
 
-static int audpcm_pmem_check(struct audio *audio,
+static int audpcm_ion_check(struct audio *audio,
 		void *vaddr, unsigned long len)
 {
-	struct audpcm_pmem_region *region_elt;
-	struct audpcm_pmem_region t = { .vaddr = vaddr, .len = len };
+	struct audpcm_ion_region *region_elt;
+	struct audpcm_ion_region t = {.vaddr = vaddr, .len = len };
 
-	list_for_each_entry(region_elt, &audio->pmem_region_queue, list) {
+	list_for_each_entry(region_elt, &audio->ion_region_queue, list) {
 		if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) ||
 		    OVERLAPS(region_elt, &t)) {
-			MM_ERR("region (vaddr %p len %ld)"
+			MM_ERR("[%p]:region (vaddr %p len %ld)"
 				" clashes with registered region"
 				" (vaddr %p paddr %p len %ld)\n",
-				vaddr, len,
+				audio, vaddr, len,
 				region_elt->vaddr,
-				(void *)region_elt->paddr,
-				region_elt->len);
+				(void *)region_elt->paddr, region_elt->len);
 			return -EINVAL;
 		}
 	}
 
 	return 0;
 }
-
-static int audpcm_pmem_add(struct audio *audio,
-	struct msm_audio_pmem_info *info)
+static int audpcm_ion_add(struct audio *audio,
+			struct msm_audio_ion_info *info)
 {
-	unsigned long paddr, kvaddr, len;
-	struct file *file;
-	struct audpcm_pmem_region *region;
-	struct vm_area_struct *vma;
+	ion_phys_addr_t paddr;
+	size_t len;
+	unsigned long kvaddr;
+	struct audpcm_ion_region *region;
 	int rc = -EINVAL;
+	struct ion_handle *handle;
+	unsigned long ionflag;
 
-	MM_DBG("\n"); /* Macro prints the file name and function */
+	MM_ERR("\n"); /* Macro prints the file name and function */
 	region = kmalloc(sizeof(*region), GFP_KERNEL);
-	if (!region)
-		return -ENOMEM;
 
-	if (get_pmem_file(info->fd, &paddr, &kvaddr, &len, &file)) {
-		kfree(region);
-		return -EINVAL;
+	if (!region) {
+		rc = -ENOMEM;
+		goto end;
 	}
-
-	vma = find_vma_intersection(current->active_mm,
-		(unsigned long) info->vaddr, (unsigned long) info->vaddr+1);
-
-	if (vma && ((vma->vm_end - vma->vm_start) == len)) {
-		rc = audpcm_pmem_check(audio, (void *) vma->vm_start, len);
-		if (rc < 0) {
-			put_pmem_file(file);
-			kfree(region);
-			return rc;
-		}
-		region->vaddr = (void *) vma->vm_start;
-		region->vaddr_ref = info->vaddr;
-		MM_DBG("Valid VMA region vma->vm_start = 0x%8x \
-			vma->vm_end = 0x%8x\n", (int) vma->vm_start,
-			(int) vma->vm_end);
-	} else {
-		MM_ERR("No valid VMA region found\n");
-		put_pmem_file(file);
-		kfree(region);
-		return rc;
+	handle = ion_import_dma_buf(audio->client, info->fd);
+	if (IS_ERR_OR_NULL(handle)) {
+		pr_err("%s: could not get handle of the given fd\n", __func__);
+		goto import_error;
 	}
+	rc = ion_handle_get_flags(audio->client, handle, &ionflag);
+	if (rc) {
+		pr_err("%s: could not get flags for the handle\n", __func__);
+		goto flag_error;
+	}
+	kvaddr = (unsigned long)ion_map_kernel(audio->client, handle, ionflag);
+	if (IS_ERR_OR_NULL((void *)kvaddr)) {
+		pr_err("%s: could not get virtual address\n", __func__);
+		goto map_error;
+	}
+	rc = ion_phys(audio->client, handle, &paddr, &len);
+	if (rc) {
+		pr_err("%s: could not get physical address\n", __func__);
+		goto ion_error;
+	}
+	rc = audpcm_ion_check(audio, info->vaddr, len);
+	if (rc < 0) {
+		MM_ERR("audpcm_ion_check failed\n");
+		goto ion_error;
+	}
+	region->handle = handle;
+	region->vaddr = info->vaddr;
 	region->fd = info->fd;
 	region->paddr = paddr;
 	region->kvaddr = kvaddr;
 	region->len = len;
-	region->file = file;
 	region->ref_cnt = 0;
-	MM_DBG("add region paddr %lx vaddr %p, len %lu\n", region->paddr,
-			region->vaddr, region->len);
-	list_add_tail(&region->list, &audio->pmem_region_queue);
+	MM_DBG("[%p]:add region paddr %lx vaddr %p, len %lu kvaddr %lx\n",
+		audio, region->paddr, region->vaddr,
+		region->len, region->kvaddr);
+	list_add_tail(&region->list, &audio->ion_region_queue);
+
+	return rc;
+
+ion_error:
+	ion_unmap_kernel(audio->client, handle);
+map_error:
+flag_error:
+	ion_free(audio->client, handle);
+import_error:
+	kfree(region);
+end:
 	return rc;
 }
 
-static int audpcm_pmem_remove(struct audio *audio,
-	struct msm_audio_pmem_info *info)
+static int audpcm_ion_remove(struct audio *audio,
+			struct msm_audio_ion_info *info)
 {
-	struct audpcm_pmem_region *region;
+	struct audpcm_ion_region *region;
 	struct list_head *ptr, *next;
 	int rc = -EINVAL;
 
-	MM_DBG("info fd %d vaddr %p\n",	info->fd, info->vaddr);
+	list_for_each_safe(ptr, next, &audio->ion_region_queue) {
+		region = list_entry(ptr, struct audpcm_ion_region, list);
 
-	list_for_each_safe(ptr, next, &audio->pmem_region_queue) {
-		region = list_entry(ptr, struct audpcm_pmem_region, list);
-
-		if ((region->fd == info->fd) &&
-		    (region->vaddr_ref == info->vaddr)) {
+		if (region != NULL && (region->fd == info->fd) &&
+		    (region->vaddr == info->vaddr)) {
 			if (region->ref_cnt) {
-				MM_DBG("region %p in use ref_cnt %d\n",
-						region, region->ref_cnt);
+				MM_DBG("%s[%p]:region %p in use ref_cnt %d\n",
+					__func__, audio, region,
+					region->ref_cnt);
 				break;
 			}
-			MM_DBG("remove region fd %d vaddr %p \n", info->fd,
-					info->vaddr);
+			MM_DBG("remove region fd %d vaddr %p\n",
+				info->fd, info->vaddr);
 			list_del(&region->list);
-			put_pmem_file(region->file);
+			ion_unmap_kernel(audio->client, region->handle);
+			ion_free(audio->client, region->handle);
 			kfree(region);
 			rc = 0;
 			break;
@@ -879,24 +894,22 @@
 	return rc;
 }
 
-static int audpcm_pmem_lookup_vaddr(struct audio *audio, void *addr,
-		     unsigned long len, struct audpcm_pmem_region **region)
+static int audpcm_ion_lookup_vaddr(struct audio *audio, void *addr,
+			unsigned long len, struct audpcm_ion_region **region)
 {
-	struct audpcm_pmem_region *region_elt;
-
+	struct audpcm_ion_region *region_elt;
 	int match_count = 0;
-
 	*region = NULL;
 
 	/* returns physical address or zero */
-	list_for_each_entry(region_elt, &audio->pmem_region_queue,
-		list) {
+	list_for_each_entry(region_elt, &audio->ion_region_queue, list) {
 		if (addr >= region_elt->vaddr &&
 		    addr < region_elt->vaddr + region_elt->len &&
 		    addr + len <= region_elt->vaddr + region_elt->len) {
 			/* offset since we could pass vaddr inside a registerd
-			 * pmem buffer
+			 * ion buffer
 			 */
+
 			match_count++;
 			if (!*region)
 				*region = region_elt;
@@ -904,31 +917,33 @@
 	}
 
 	if (match_count > 1) {
-		MM_ERR("multiple hits for vaddr %p, len %ld\n", addr, len);
-		list_for_each_entry(region_elt,
-		  &audio->pmem_region_queue, list) {
+		MM_ERR("%s[%p]:multiple hits for vaddr %p, len %ld\n",
+			 __func__, audio, addr, len);
+		list_for_each_entry(region_elt, &audio->ion_region_queue,
+					list) {
 			if (addr >= region_elt->vaddr &&
 			    addr < region_elt->vaddr + region_elt->len &&
 			    addr + len <= region_elt->vaddr + region_elt->len)
-				MM_ERR("\t%p, %ld --> %p\n", region_elt->vaddr,
+					MM_ERR("\t%s[%p]:%p, %ld --> %p\n",
+						__func__, audio,
+						region_elt->vaddr,
 						region_elt->len,
 						(void *)region_elt->paddr);
 		}
 	}
-
 	return *region ? 0 : -1;
 }
-
-static unsigned long audpcm_pmem_fixup(struct audio *audio, void *addr,
+static unsigned long audpcm_ion_fixup(struct audio *audio, void *addr,
 		    unsigned long len, int ref_up)
 {
-	struct audpcm_pmem_region *region;
+	struct audpcm_ion_region *region;
 	unsigned long paddr;
 	int ret;
 
-	ret = audpcm_pmem_lookup_vaddr(audio, addr, len, &region);
+	ret = audpcm_ion_lookup_vaddr(audio, addr, len, &region);
 	if (ret) {
-		MM_ERR("lookup (%p, %ld) failed\n", addr, len);
+		MM_ERR("%s[%p]:lookup (%p, %ld) failed\n",
+			__func__, audio, addr, len);
 		return 0;
 	}
 	if (ref_up)
@@ -961,7 +976,7 @@
 			buf_node, dir, buf_node->buf.buf_addr,
 			buf_node->buf.buf_len, buf_node->buf.data_len);
 
-	buf_node->paddr = audpcm_pmem_fixup(
+	buf_node->paddr = audpcm_ion_fixup(
 		audio, buf_node->buf.buf_addr,
 		buf_node->buf.buf_len, 1);
 	if (dir) {
@@ -1125,23 +1140,23 @@
 		rc = audpp_pause(audio->dec_id, (int) arg);
 		break;
 
-	case AUDIO_REGISTER_PMEM: {
-			struct msm_audio_pmem_info info;
-			MM_DBG("AUDIO_REGISTER_PMEM\n");
+	case AUDIO_REGISTER_ION: {
+		struct msm_audio_ion_info info;
+		MM_ERR("AUDIO_REGISTER_ION\n");
 			if (copy_from_user(&info, (void *) arg, sizeof(info)))
 				rc = -EFAULT;
 			else
-				rc = audpcm_pmem_add(audio, &info);
+				rc = audpcm_ion_add(audio, &info);
 			break;
 		}
 
-	case AUDIO_DEREGISTER_PMEM: {
-			struct msm_audio_pmem_info info;
-			MM_DBG("AUDIO_DEREGISTER_PMEM\n");
+	case AUDIO_DEREGISTER_ION: {
+		struct msm_audio_ion_info info;
+		MM_ERR("AUDIO_DEREGISTER_ION\n");
 			if (copy_from_user(&info, (void *) arg, sizeof(info)))
 				rc = -EFAULT;
 			else
-				rc = audpcm_pmem_remove(audio, &info);
+				rc = audpcm_ion_remove(audio, &info);
 			break;
 		}
 
@@ -1340,15 +1355,16 @@
 	return rc;
 }
 
-static void audpcm_reset_pmem_region(struct audio *audio)
+static void audpcm_reset_ion_region(struct audio *audio)
 {
-	struct audpcm_pmem_region *region;
+	struct audpcm_ion_region *region;
 	struct list_head *ptr, *next;
 
-	list_for_each_safe(ptr, next, &audio->pmem_region_queue) {
-		region = list_entry(ptr, struct audpcm_pmem_region, list);
+	list_for_each_safe(ptr, next, &audio->ion_region_queue) {
+		region = list_entry(ptr, struct audpcm_ion_region, list);
 		list_del(&region->list);
-		put_pmem_file(region->file);
+		ion_unmap_kernel(audio->client, region->handle);
+		ion_free(audio->client, region->handle);
 		kfree(region);
 	}
 
@@ -1365,7 +1381,7 @@
 	if (audio->rmt_resource_released == 0)
 		rmt_put_resource(audio);
 	audio->drv_ops.out_flush(audio);
-	audpcm_reset_pmem_region(audio);
+	audpcm_reset_ion_region(audio);
 
 	msm_adsp_put(audio->audplay);
 	audpp_adec_free(audio->dec_id);
@@ -1376,16 +1392,12 @@
 	audio->event_abort = 1;
 	wake_up(&audio->event_wait);
 	audpcm_reset_event_queue(audio);
-	MM_DBG("pmem area = 0x%8x\n", (unsigned int)audio->data);
-	if (audio->data) {
-		iounmap(audio->map_v_write);
-		free_contiguous_memory_by_paddr(audio->phys);
-	}
 	mutex_unlock(&audio->lock);
 #ifdef CONFIG_DEBUG_FS
 	if (audio->dentry)
 		debugfs_remove(audio->dentry);
 #endif
+	ion_client_destroy(audio->client);
 	kfree(audio);
 	return 0;
 }
@@ -1506,7 +1518,13 @@
 	struct audio *audio = NULL;
 	int rc, i, dec_attrb, decid;
 	struct audpcm_event *e_node = NULL;
-	unsigned pmem_sz = DMASZ_MAX;
+	unsigned mem_sz = DMASZ_MAX;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	struct ion_client *client = NULL;
+	int len = 0;
+
 
 #ifdef CONFIG_DEBUG_FS
 	/* 4 bytes represents decoder number, 1 byte for terminate string */
@@ -1543,44 +1561,57 @@
 	}
 	audio->dec_id = decid & MSM_AUD_DECODER_MASK;
 
+	client = msm_ion_client_create(UINT_MAX, "Audio_PCM_Client");
+	if (IS_ERR_OR_NULL(client)) {
+		pr_err("Unable to create ION client\n");
+		rc = -ENOMEM;
+		goto client_create_error;
+	}
+	audio->client = client;
+
 	/* Non AIO interface */
 	if (!(file->f_flags & O_NONBLOCK)) {
-		while (pmem_sz >= DMASZ_MIN) {
-			MM_DBG("pmemsz = %d\n", pmem_sz);
-			audio->phys = allocate_contiguous_ebi_nomap(pmem_sz,
-								SZ_4K);
-			if (audio->phys) {
-				audio->map_v_write = ioremap(
-							audio->phys, pmem_sz);
-				if (IS_ERR(audio->map_v_write)) {
-					MM_ERR("could not map write\
-							buffers\n");
-					rc = -ENOMEM;
-					free_contiguous_memory_by_paddr(
-								audio->phys);
-					audpp_adec_free(audio->dec_id);
-					MM_DBG("audio instance 0x%08x\
-						freeing\n", (int)audio);
-					kfree(audio);
-					goto done;
-				}
-				audio->data = audio->map_v_write;
-				MM_DBG("write buf: phy addr 0x%08x kernel addr\
-					0x%08x\n", audio->phys,\
-					(int)audio->data);
-				break;
-			} else if (pmem_sz == DMASZ_MIN) {
-				MM_ERR("could not allocate write buffers\n");
-				rc = -ENOMEM;
-				audpp_adec_free(audio->dec_id);
-				MM_DBG("audio instance 0x%08x freeing\n",\
-					(int)audio);
-				kfree(audio);
-				goto done;
-			} else
-				pmem_sz >>= 1;
+
+		MM_DBG("memsz = %d\n", mem_sz);
+
+		handle = ion_alloc(client, mem_sz, SZ_4K,
+			ION_HEAP(ION_AUDIO_HEAP_ID));
+		if (IS_ERR_OR_NULL(handle)) {
+			MM_ERR("Unable to create allocate O/P buffers\n");
+			rc = -ENOMEM;
+			goto output_buff_alloc_error;
 		}
-		audio->out_dma_sz = pmem_sz;
+		audio->output_buff_handle = handle;
+
+		rc = ion_phys(client , handle, &addr, &len);
+		if (rc) {
+			MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
+				(unsigned int) addr, (unsigned int) len);
+			goto output_buff_get_phys_error;
+		} else {
+			MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
+				(unsigned int) addr, (unsigned int) len);
+		}
+		audio->phys = (int32_t)addr;
+
+
+		rc = ion_handle_get_flags(client, handle, &ionflag);
+		if (rc) {
+			MM_ERR("could not get flags for the handle\n");
+			goto output_buff_get_flags_error;
+		}
+
+		audio->map_v_write = ion_map_kernel(client, handle, ionflag);
+		if (IS_ERR(audio->map_v_write)) {
+			MM_ERR("could not map write buffers\n");
+			rc = -ENOMEM;
+			goto output_buff_map_error;
+		}
+		audio->data = audio->map_v_write;
+		MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
+			audio->phys, (int)audio->data);
+
+		audio->out_dma_sz = mem_sz;
 	}
 
 	rc = audmgr_open(&audio->audmgr);
@@ -1631,7 +1662,7 @@
 	spin_lock_init(&audio->dsp_lock);
 	init_waitqueue_head(&audio->write_wait);
 	INIT_LIST_HEAD(&audio->out_queue);
-	INIT_LIST_HEAD(&audio->pmem_region_queue);
+	INIT_LIST_HEAD(&audio->ion_region_queue);
 	INIT_LIST_HEAD(&audio->free_event_queue);
 	INIT_LIST_HEAD(&audio->event_queue);
 	init_waitqueue_head(&audio->wait);
@@ -1674,10 +1705,14 @@
 done:
 	return rc;
 err:
-	if (audio->data) {
-		iounmap(audio->map_v_write);
-		free_contiguous_memory_by_paddr(audio->phys);
-	}
+	ion_unmap_kernel(client, audio->output_buff_handle);
+output_buff_map_error:
+output_buff_get_flags_error:
+output_buff_get_phys_error:
+	ion_free(client, audio->output_buff_handle);
+output_buff_alloc_error:
+	ion_client_destroy(client);
+client_create_error:
 	audpp_adec_free(audio->dec_id);
 	MM_DBG("audio instance 0x%08x freeing\n", (int)audio);
 	kfree(audio);
diff --git a/arch/arm/mach-msm/qdsp5/audio_pcm_in.c b/arch/arm/mach-msm/qdsp5/audio_pcm_in.c
index 851980d..716dbd2 100644
--- a/arch/arm/mach-msm/qdsp5/audio_pcm_in.c
+++ b/arch/arm/mach-msm/qdsp5/audio_pcm_in.c
@@ -2,7 +2,7 @@
  *
  * pcm audio input device
  *
- * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This code is based in part on arch/arm/mach-msm/qdsp5v2/audio_pcm_in.c,
  * Copyright (C) 2008 Google, Inc.
@@ -26,6 +26,7 @@
 #include <linux/kthread.h>
 #include <linux/wait.h>
 #include <linux/dma-mapping.h>
+#include <linux/ion.h>
 
 #include <linux/delay.h>
 
@@ -114,6 +115,8 @@
 	 * All the coeff should be passed from user space	    */
 	int iir_enable;
 	audpreproc_cmd_cfg_iir_tuning_filter_params iir_cfg;
+	struct ion_client *client;
+	struct ion_handle *output_buff_handle;
 };
 
 static int audpcm_in_dsp_enable(struct audio_in *audio, int enable);
@@ -764,9 +767,11 @@
 	audio->audpre = NULL;
 	audio->opened = 0;
 	if (audio->data) {
-		free_contiguous_memory((void *)audio->data);
+		ion_unmap_kernel(audio->client, audio->output_buff_handle);
+		ion_free(audio->client, audio->output_buff_handle);
 		audio->data = NULL;
 	}
+	ion_client_destroy(audio->client);
 	mutex_unlock(&audio->lock);
 	return 0;
 }
@@ -777,6 +782,11 @@
 {
 	struct audio_in *audio = &the_audio_in;
 	int rc;
+	int len = 0;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	struct ion_client *client = NULL;
 
 	int encid;
 	mutex_lock(&audio->lock);
@@ -827,23 +837,53 @@
 
 	audpcm_in_flush(audio);
 
-	audio->data = allocate_contiguous_memory(DMASZ, MEMTYPE_EBI1,
-				SZ_4K, 0);
-	if (!audio->data) {
-		MM_ERR("could not allocate read buffers\n");
+	client = msm_ion_client_create(UINT_MAX, "Audio_PCM_in_client");
+	if (IS_ERR_OR_NULL(client)) {
+		MM_ERR("Unable to create ION client\n");
 		rc = -ENOMEM;
-		goto evt_error;
-	} else {
-		audio->phys = memory_pool_node_paddr(audio->data);
-		if (!audio->phys) {
-			MM_ERR("could not get physical address\n");
-			rc = -ENOMEM;
-			free_contiguous_memory(audio->data);
-			goto evt_error;
-		}
-		MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n",
-				audio->phys, (int)audio->data);
+		goto client_create_error;
 	}
+	audio->client = client;
+
+	MM_DBG("allocating mem sz = %d\n", DMASZ);
+	handle = ion_alloc(client, DMASZ, SZ_4K,
+		ION_HEAP(ION_AUDIO_HEAP_ID));
+	if (IS_ERR_OR_NULL(handle)) {
+		MM_ERR("Unable to create allocate O/P buffers\n");
+		rc = -ENOMEM;
+		goto output_buff_alloc_error;
+	}
+
+	audio->output_buff_handle = handle;
+
+	rc = ion_phys(client , handle, &addr, &len);
+	if (rc) {
+		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+		rc = -ENOMEM;
+		goto output_buff_get_phys_error;
+	} else {
+		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+	}
+	audio->phys = (int32_t)addr;
+
+	rc = ion_handle_get_flags(client, handle, &ionflag);
+	if (rc) {
+		MM_ERR("could not get flags for the handle\n");
+		rc = -ENOMEM;
+		goto output_buff_get_flags_error;
+	}
+
+	audio->data = ion_map_kernel(client, handle, ionflag);
+	if (IS_ERR(audio->data)) {
+		MM_ERR("could not map read buffers,freeing instance 0x%08x\n",
+				(int)audio);
+		rc = -ENOMEM;
+		goto output_buff_map_error;
+	}
+	MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n",
+		audio->phys, (int)audio->data);
 
 	file->private_data = audio;
 	audio->opened = 1;
@@ -851,7 +891,13 @@
 done:
 	mutex_unlock(&audio->lock);
 	return rc;
-evt_error:
+output_buff_map_error:
+output_buff_get_phys_error:
+output_buff_get_flags_error:
+	ion_free(client, audio->output_buff_handle);
+output_buff_alloc_error:
+	ion_client_destroy(client);
+client_create_error:
 	msm_adsp_put(audio->audrec);
 	msm_adsp_put(audio->audpre);
 	audpreproc_aenc_free(audio->enc_id);
diff --git a/arch/arm/mach-msm/qdsp5/audio_qcelp.c b/arch/arm/mach-msm/qdsp5/audio_qcelp.c
index f436759..dc257cd 100644
--- a/arch/arm/mach-msm/qdsp5/audio_qcelp.c
+++ b/arch/arm/mach-msm/qdsp5/audio_qcelp.c
@@ -34,10 +34,10 @@
 #include <linux/debugfs.h>
 #include <linux/earlysuspend.h>
 #include <linux/list.h>
-#include <linux/android_pmem.h>
 #include <linux/slab.h>
 #include <linux/msm_audio.h>
 #include <linux/memory_alloc.h>
+#include <linux/ion.h>
 
 #include <mach/msm_adsp.h>
 #include <mach/iommu.h>
@@ -170,6 +170,9 @@
 	int eq_needs_commit;
 	audpp_cmd_cfg_object_params_eqalizer eq;
 	audpp_cmd_cfg_object_params_volume vol_pan;
+	struct ion_client *client;
+	struct ion_handle *input_buff_handle;
+	struct ion_handle *output_buff_handle;
 };
 
 static int auddec_dsp_config(struct audio *audio, int enable);
@@ -767,6 +770,10 @@
 	uint16_t enable_mask;
 	int enable;
 	int prev_state;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	int len = 0;
 
 	MM_DBG("cmd = %d\n", cmd);
 
@@ -956,24 +963,53 @@
 			if ((config.pcm_feedback) && (!audio->read_data)) {
 				MM_DBG("allocate PCM buf %d\n",
 				config.buffer_count * config.buffer_size);
-				audio->read_phys =
-						allocate_contiguous_ebi_nomap(
-							config.buffer_size *
-							config.buffer_count,
-							SZ_4K);
-				if (!audio->read_phys) {
+				handle = ion_alloc(audio->client,
+					(config.buffer_size *
+					config.buffer_count),
+					SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
+				if (IS_ERR_OR_NULL(handle)) {
+					MM_ERR("Unable to alloc I/P buffs\n");
+					audio->input_buff_handle = NULL;
 					rc = -ENOMEM;
 					break;
 				}
-				audio->map_v_read = ioremap(
-							audio->read_phys,
-							config.buffer_size *
-							config.buffer_count);
+
+				audio->input_buff_handle = handle;
+
+				rc = ion_phys(audio->client ,
+					handle, &addr, &len);
+				if (rc) {
+					MM_ERR("Invalid phy: %x sz: %x\n",
+						(unsigned int) addr,
+						(unsigned int) len);
+					ion_free(audio->client, handle);
+					audio->input_buff_handle = NULL;
+					rc = -ENOMEM;
+					break;
+				} else {
+					MM_INFO("Got valid phy: %x sz: %x\n",
+						(unsigned int) audio->read_phys,
+						(unsigned int) len);
+				}
+				audio->read_phys = (int32_t)addr;
+
+				rc = ion_handle_get_flags(audio->client,
+					handle, &ionflag);
+				if (rc) {
+					MM_ERR("could not get flags\n");
+					ion_free(audio->client, handle);
+					audio->input_buff_handle = NULL;
+					rc = -ENOMEM;
+					break;
+				}
+				audio->map_v_read = ion_map_kernel(
+					audio->client,
+					handle, ionflag);
 				if (IS_ERR(audio->map_v_read)) {
 					MM_ERR("failed to map read buf\n");
+					ion_free(audio->client, handle);
+					audio->input_buff_handle = NULL;
 					rc = -ENOMEM;
-					free_contiguous_memory_by_paddr(
-							audio->read_phys);
 				} else {
 					uint8_t index;
 					uint32_t offset = 0;
@@ -1019,7 +1055,8 @@
 }
 
 /* Only useful in tunnel-mode */
-static int audqcelp_fsync(struct file *file, loff_t a, loff_t b, int datasync)
+static int audqcelp_fsync(struct file *file, loff_t a, loff_t b,
+	int datasync)
 {
 	struct audio *audio = file->private_data;
 	int rc = 0;
@@ -1287,12 +1324,13 @@
 	audio->event_abort = 1;
 	wake_up(&audio->event_wait);
 	audqcelp_reset_event_queue(audio);
-	iounmap(audio->map_v_write);
-	free_contiguous_memory_by_paddr(audio->phys);
-	if (audio->read_data) {
-		iounmap(audio->map_v_read);
-		free_contiguous_memory_by_paddr(audio->read_phys);
+	ion_unmap_kernel(audio->client, audio->output_buff_handle);
+	ion_free(audio->client, audio->output_buff_handle);
+	if (audio->input_buff_handle != NULL) {
+		ion_unmap_kernel(audio->client, audio->input_buff_handle);
+		ion_free(audio->client, audio->input_buff_handle);
 	}
+	ion_client_destroy(audio->client);
 	mutex_unlock(&audio->lock);
 #ifdef CONFIG_DEBUG_FS
 	if (audio->dentry)
@@ -1431,6 +1469,12 @@
 	struct audio *audio = NULL;
 	int rc, dec_attrb, decid, i;
 	struct audqcelp_event *e_node = NULL;
+	unsigned mem_sz = DMASZ;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	struct ion_client *client = NULL;
+	int len = 0;
 #ifdef CONFIG_DEBUG_FS
 	/* 4 bytes represents decoder number, 1 byte for terminate string */
 	char name[sizeof "msm_qcelp_" + 5];
@@ -1471,29 +1515,50 @@
 	}
 	audio->dec_id = decid & MSM_AUD_DECODER_MASK;
 
-	audio->phys = allocate_contiguous_ebi_nomap(DMASZ, SZ_4K);
-	if (!audio->phys) {
-		MM_ERR("could not allocate write buffers, freeing instance \
-				0x%08x\n", (int)audio);
+	client = msm_ion_client_create(UINT_MAX, "Audio_QCELP_Client");
+	if (IS_ERR_OR_NULL(client)) {
+		pr_err("Unable to create ION client\n");
 		rc = -ENOMEM;
-		audpp_adec_free(audio->dec_id);
-		kfree(audio);
-		goto done;
-	} else {
-		audio->map_v_write = ioremap(audio->phys, DMASZ);
-		if (IS_ERR(audio->map_v_write)) {
-			MM_ERR("could not map write buffers, freeing \
-					instance 0x%08x\n", (int)audio);
-			rc = -ENOMEM;
-			free_contiguous_memory_by_paddr(audio->phys);
-			audpp_adec_free(audio->dec_id);
-			kfree(audio);
-			goto done;
-		}
-		audio->data = audio->map_v_write;
-		MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
-				audio->phys, (int)audio->data);
+		goto client_create_error;
 	}
+	audio->client = client;
+
+	handle = ion_alloc(client, mem_sz, SZ_4K,
+		ION_HEAP(ION_AUDIO_HEAP_ID));
+	if (IS_ERR_OR_NULL(handle)) {
+		MM_ERR("Unable to create allocate O/P buffers\n");
+		rc = -ENOMEM;
+		goto output_buff_alloc_error;
+	}
+	audio->output_buff_handle = handle;
+
+	rc = ion_phys(client, handle, &addr, &len);
+	if (rc) {
+		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+		goto output_buff_get_phys_error;
+	} else {
+		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+	}
+	audio->phys = (int32_t)addr;
+
+
+	rc = ion_handle_get_flags(client, handle, &ionflag);
+	if (rc) {
+		MM_ERR("could not get flags for the handle\n");
+		goto output_buff_get_flags_error;
+	}
+
+	audio->map_v_write = ion_map_kernel(client, handle, ionflag);
+	if (IS_ERR(audio->map_v_write)) {
+		MM_ERR("could not map write buffers\n");
+		rc = -ENOMEM;
+		goto output_buff_map_error;
+	}
+	audio->data = audio->map_v_write;
+	MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
+		audio->phys, (int)audio->data);
 
 	if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) {
 		rc = audmgr_open(&audio->audmgr);
@@ -1524,6 +1589,8 @@
 		goto err;
 	}
 
+	audio->input_buff_handle = NULL;
+
 	/* Initialize all locks of audio instance */
 	mutex_init(&audio->lock);
 	mutex_init(&audio->write_lock);
@@ -1580,8 +1647,14 @@
 done:
 	return rc;
 err:
-	iounmap(audio->map_v_write);
-	free_contiguous_memory_by_paddr(audio->phys);
+	ion_unmap_kernel(client, audio->output_buff_handle);
+output_buff_map_error:
+output_buff_get_phys_error:
+output_buff_get_flags_error:
+	ion_free(client, audio->output_buff_handle);
+output_buff_alloc_error:
+	ion_client_destroy(client);
+client_create_error:
 	audpp_adec_free(audio->dec_id);
 	kfree(audio);
 	return rc;
diff --git a/arch/arm/mach-msm/qdsp5/audio_qcelp_in.c b/arch/arm/mach-msm/qdsp5/audio_qcelp_in.c
index 92036e5..99a169d 100644
--- a/arch/arm/mach-msm/qdsp5/audio_qcelp_in.c
+++ b/arch/arm/mach-msm/qdsp5/audio_qcelp_in.c
@@ -33,6 +33,7 @@
 
 
 #include <linux/memory_alloc.h>
+#include <linux/ion.h>
 
 #include <asm/atomic.h>
 #include <asm/ioctls.h>
@@ -151,6 +152,9 @@
 	int enabled;
 	int running;
 	int stopped; /* set when stopped, cleared on flush */
+	struct ion_client *client;
+	struct ion_handle *input_buff_handle;
+	struct ion_handle *output_buff_handle;
 };
 
 struct audio_frame {
@@ -1004,7 +1008,8 @@
 	spin_unlock_irqrestore(&audio->dsp_lock, flags);
 }
 
-static int audqcelp_in_fsync(struct file *file, loff_t a, loff_t b, int datasync)
+static int audqcelp_in_fsync(struct file *file, loff_t a, loff_t b,
+	int datasync)
 
 {
 	struct audio_qcelp_in *audio = file->private_data;
@@ -1192,16 +1197,17 @@
 
 	if ((audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) && \
 	   (audio->out_data)) {
-		iounmap(audio->map_v_write);
-		free_contiguous_memory_by_paddr(audio->out_phys);
+		ion_unmap_kernel(audio->client, audio->input_buff_handle);
+		ion_free(audio->client, audio->input_buff_handle);
 		audio->out_data = NULL;
 	}
 
 	if (audio->data) {
-		iounmap(audio->map_v_read);
-		free_contiguous_memory_by_paddr(audio->phys);
+		ion_unmap_kernel(audio->client, audio->output_buff_handle);
+		ion_free(audio->client, audio->output_buff_handle);
 		audio->data = NULL;
 	}
+	ion_client_destroy(audio->client);
 	mutex_unlock(&audio->lock);
 	return 0;
 }
@@ -1214,6 +1220,11 @@
 	int rc;
 	int encid;
 	int dma_size = 0;
+	int len = 0;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	struct ion_client *client = NULL;
 
 	mutex_lock(&audio->lock);
 	if (audio->opened) {
@@ -1293,52 +1304,101 @@
 	audqcelp_in_flush(audio);
 	audqcelp_out_flush(audio);
 
-	audio->phys = allocate_contiguous_ebi_nomap(dma_size, SZ_4K);
-	if (!audio->phys) {
-		MM_ERR("could not allocate physical read buffers\n");
+	client = msm_ion_client_create(UINT_MAX, "Audio_QCELP_in_client");
+	if (IS_ERR_OR_NULL(client)) {
+		MM_ERR("Unable to create ION client\n");
 		rc = -ENOMEM;
-		goto evt_error;
-	} else {
-		audio->map_v_read = ioremap(audio->phys, dma_size);
-		if (IS_ERR(audio->map_v_read)) {
-			MM_ERR("could not map physical address\n");
-			rc = -ENOMEM;
-			free_contiguous_memory_by_paddr(audio->phys);
-			goto evt_error;
-		}
-		audio->data = audio->map_v_read;
-		MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n",
-				audio->phys, (int)audio->data);
+		goto client_create_error;
 	}
+	audio->client = client;
+
+	MM_DBG("allocating mem sz = %d\n", dma_size);
+	handle = ion_alloc(client, dma_size, SZ_4K,
+		ION_HEAP(ION_AUDIO_HEAP_ID));
+	if (IS_ERR_OR_NULL(handle)) {
+		MM_ERR("Unable to create allocate O/P buffers\n");
+		rc = -ENOMEM;
+		goto output_buff_alloc_error;
+	}
+
+	audio->output_buff_handle = handle;
+
+	rc = ion_phys(client , handle, &addr, &len);
+	if (rc) {
+		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+		rc = -ENOMEM;
+		goto output_buff_get_phys_error;
+	} else {
+		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+	}
+	audio->phys = (int32_t)addr;
+
+	rc = ion_handle_get_flags(client, handle, &ionflag);
+	if (rc) {
+		MM_ERR("could not get flags for the handle\n");
+		rc = -ENOMEM;
+		goto output_buff_get_flags_error;
+	}
+
+	audio->map_v_read = ion_map_kernel(client, handle, ionflag);
+	if (IS_ERR(audio->map_v_read)) {
+		MM_ERR("could not map read buffers,freeing instance 0x%08x\n",
+				(int)audio);
+		rc = -ENOMEM;
+		goto output_buff_map_error;
+	}
+	audio->data = audio->map_v_read;
+	MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n",
+		audio->phys, (int)audio->data);
 
 	audio->out_data = NULL;
 	if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) {
-		audio->out_phys = allocate_contiguous_ebi_nomap(BUFFER_SIZE,
-						SZ_4K);
-		if (!audio->out_phys) {
-			MM_ERR("could not allocate physical write buffers\n");
+		MM_DBG("allocating BUFFER_SIZE  %d\n", BUFFER_SIZE);
+		handle = ion_alloc(client, BUFFER_SIZE,
+				SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
+		if (IS_ERR_OR_NULL(handle)) {
+			MM_ERR("Unable to create allocate I/P buffers\n");
 			rc = -ENOMEM;
-			iounmap(audio->map_v_read);
-			free_contiguous_memory_by_paddr(audio->phys);
-			goto evt_error;
-		} else {
-			audio->map_v_write = ioremap(
-						audio->out_phys, BUFFER_SIZE);
-
-			if (IS_ERR(audio->map_v_write)) {
-				MM_ERR("could not map write phys address\n");
-				rc = -ENOMEM;
-				iounmap(audio->map_v_read);
-				free_contiguous_memory_by_paddr(audio->phys);
-				free_contiguous_memory_by_paddr(\
-							audio->out_phys);
-				goto evt_error;
-			}
-			audio->out_data = audio->map_v_write;
-			MM_DBG("wr buf: phy addr 0x%08x kernel addr 0x%08x\n",
-					audio->out_phys, (int)audio->out_data);
+			goto input_buff_alloc_error;
 		}
 
+		audio->input_buff_handle = handle;
+
+		rc = ion_phys(client , handle, &addr, &len);
+		if (rc) {
+			MM_ERR("I/P buffers:Invalid phy: %x sz: %x\n",
+				(unsigned int) addr, (unsigned int) len);
+			rc = -ENOMEM;
+			goto input_buff_get_phys_error;
+		} else {
+			MM_INFO("Got valid phy: %x sz: %x\n",
+				(unsigned int) addr,
+				(unsigned int) len);
+		}
+		audio->out_phys = (int32_t)addr;
+
+		rc = ion_handle_get_flags(client,
+			handle, &ionflag);
+		if (rc) {
+			MM_ERR("could not get flags for the handle\n");
+			rc = -ENOMEM;
+			goto input_buff_get_flags_error;
+		}
+
+		audio->map_v_write = ion_map_kernel(client,
+			handle, ionflag);
+		if (IS_ERR(audio->map_v_write)) {
+			MM_ERR("could not map write buffers\n");
+			rc = -ENOMEM;
+			goto input_buff_map_error;
+		}
+		audio->out_data = audio->map_v_write;
+		MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
+					(unsigned int)addr,
+					(unsigned int)audio->out_data);
+
 		/* Initialize buffer */
 		audio->out[0].data = audio->out_data + 0;
 		audio->out[0].addr = audio->out_phys + 0;
@@ -1360,7 +1420,19 @@
 done:
 	mutex_unlock(&audio->lock);
 	return rc;
-evt_error:
+input_buff_map_error:
+input_buff_get_flags_error:
+input_buff_get_phys_error:
+	ion_free(client, audio->input_buff_handle);
+input_buff_alloc_error:
+	ion_unmap_kernel(client, audio->output_buff_handle);
+output_buff_map_error:
+output_buff_get_phys_error:
+output_buff_get_flags_error:
+	ion_free(client, audio->output_buff_handle);
+output_buff_alloc_error:
+	ion_client_destroy(client);
+client_create_error:
 	msm_adsp_put(audio->audrec);
 	if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL)
 		msm_adsp_put(audio->audpre);
diff --git a/arch/arm/mach-msm/qdsp5/audio_wma.c b/arch/arm/mach-msm/qdsp5/audio_wma.c
index 162a6f1..e28e704 100644
--- a/arch/arm/mach-msm/qdsp5/audio_wma.c
+++ b/arch/arm/mach-msm/qdsp5/audio_wma.c
@@ -37,11 +37,11 @@
 #include <linux/delay.h>
 #include <linux/list.h>
 #include <linux/earlysuspend.h>
-#include <linux/android_pmem.h>
 #include <linux/slab.h>
 #include <linux/msm_audio.h>
 #include <linux/msm_audio_wma.h>
 #include <linux/memory_alloc.h>
+#include <linux/ion.h>
 
 #include <mach/msm_adsp.h>
 #include <mach/iommu.h>
@@ -187,6 +187,9 @@
 	int eq_needs_commit;
 	audpp_cmd_cfg_object_params_eqalizer eq;
 	audpp_cmd_cfg_object_params_volume vol_pan;
+	struct ion_client *client;
+	struct ion_handle *input_buff_handle;
+	struct ion_handle *output_buff_handle;
 };
 
 static int auddec_dsp_config(struct audio *audio, int enable);
@@ -815,6 +818,10 @@
 	uint16_t enable_mask;
 	int enable;
 	int prev_state;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	int len = 0;
 
 	MM_DBG("cmd = %d\n", cmd);
 
@@ -1034,24 +1041,54 @@
 				MM_DBG("allocate PCM buffer %d\n",
 						config.buffer_count *
 						config.buffer_size);
-				audio->read_phys =
-						allocate_contiguous_ebi_nomap(
-							config.buffer_size *
-							config.buffer_count,
-							SZ_4K);
-				if (!audio->read_phys) {
+				handle = ion_alloc(audio->client,
+					(config.buffer_size *
+					config.buffer_count),
+					SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
+				if (IS_ERR_OR_NULL(handle)) {
+					MM_ERR("Unable to alloc I/P buffs\n");
+					audio->input_buff_handle = NULL;
 					rc = -ENOMEM;
 					break;
 				}
-				audio->map_v_read = ioremap(
-						audio->read_phys,
-						config.buffer_size *
-						config.buffer_count);
+
+				audio->input_buff_handle = handle;
+
+				rc = ion_phys(audio->client ,
+					handle, &addr, &len);
+				if (rc) {
+					MM_ERR("Invalid phy: %x sz: %x\n",
+						(unsigned int) addr,
+						(unsigned int) len);
+					ion_free(audio->client, handle);
+					audio->input_buff_handle = NULL;
+					rc = -ENOMEM;
+					break;
+				} else {
+					MM_INFO("Got valid phy: %x sz: %x\n",
+						(unsigned int) audio->read_phys,
+						(unsigned int) len);
+				}
+				audio->read_phys = (int32_t)addr;
+
+				rc = ion_handle_get_flags(audio->client,
+					handle, &ionflag);
+				if (rc) {
+					MM_ERR("could not get flags\n");
+					ion_free(audio->client, handle);
+					audio->input_buff_handle = NULL;
+					rc = -ENOMEM;
+					break;
+				}
+
+				audio->map_v_read = ion_map_kernel(
+					audio->client,
+					handle, ionflag);
 				if (IS_ERR(audio->map_v_read)) {
 					MM_ERR("map of read buf failed\n");
+					ion_free(audio->client, handle);
+					audio->input_buff_handle = NULL;
 					rc = -ENOMEM;
-					free_contiguous_memory_by_paddr(
-							audio->read_phys);
 				} else {
 					uint8_t index;
 					uint32_t offset = 0;
@@ -1098,7 +1135,8 @@
 }
 
 /* Only useful in tunnel-mode */
-static int audio_fsync(struct file *file, loff_t a, loff_t b, int datasync)
+static int audio_fsync(struct file *file, loff_t a, loff_t b,
+	int datasync)
 {
 	struct audio *audio = file->private_data;
 	struct buffer *frame;
@@ -1433,12 +1471,13 @@
 	audio->event_abort = 1;
 	wake_up(&audio->event_wait);
 	audwma_reset_event_queue(audio);
-	iounmap(audio->map_v_write);
-	free_contiguous_memory_by_paddr(audio->phys);
-	if (audio->read_data) {
-		iounmap(audio->map_v_read);
-		free_contiguous_memory_by_paddr(audio->read_phys);
+	ion_unmap_kernel(audio->client, audio->output_buff_handle);
+	ion_free(audio->client, audio->output_buff_handle);
+	if (audio->input_buff_handle != NULL) {
+		ion_unmap_kernel(audio->client, audio->input_buff_handle);
+		ion_free(audio->client, audio->input_buff_handle);
 	}
+	ion_client_destroy(audio->client);
 	mutex_unlock(&audio->lock);
 #ifdef CONFIG_DEBUG_FS
 	if (audio->dentry)
@@ -1580,8 +1619,13 @@
 {
 	struct audio *audio = NULL;
 	int rc, dec_attrb, decid, i;
-	unsigned pmem_sz = DMASZ_MAX;
+	unsigned mem_sz = DMASZ_MAX;
 	struct audwma_event *e_node = NULL;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	struct ion_client *client = NULL;
+	int len = 0;
 #ifdef CONFIG_DEBUG_FS
 	/* 4 bytes represents decoder number, 1 byte for terminate string */
 	char name[sizeof "msm_wma_" + 5];
@@ -1624,36 +1668,52 @@
 	}
 	audio->dec_id = decid & MSM_AUD_DECODER_MASK;
 
-	while (pmem_sz >= DMASZ_MIN) {
-		MM_DBG("pmemsz = %d\n", pmem_sz);
-		audio->phys = allocate_contiguous_ebi_nomap(pmem_sz, SZ_4K);
-		if (audio->phys) {
-			audio->map_v_write = ioremap(audio->phys, pmem_sz);
-			if (IS_ERR(audio->map_v_write)) {
-				MM_ERR("could not map write buffers, \
-						freeing instance 0x%08x\n",
-						(int)audio);
-				rc = -ENOMEM;
-				free_contiguous_memory_by_paddr(audio->phys);
-				audpp_adec_free(audio->dec_id);
-				kfree(audio);
-				goto done;
-			}
-			audio->data = audio->map_v_write;
-			MM_DBG("write buf: phy addr 0x%08x kernel addr \
-				0x%08x\n", audio->phys, (int)audio->data);
-			break;
-		} else if (pmem_sz == DMASZ_MIN) {
-			MM_ERR("could not allocate write buffers, freeing \
-					instance 0x%08x\n", (int)audio);
-			rc = -ENOMEM;
-			audpp_adec_free(audio->dec_id);
-			kfree(audio);
-			goto done;
-		} else
-		pmem_sz >>= 1;
+	client = msm_ion_client_create(UINT_MAX, "Audio_WMA_Client");
+	if (IS_ERR_OR_NULL(client)) {
+		pr_err("Unable to create ION client\n");
+		rc = -ENOMEM;
+		goto client_create_error;
 	}
-	audio->out_dma_sz = pmem_sz;
+	audio->client = client;
+
+	handle = ion_alloc(client, mem_sz, SZ_4K,
+		ION_HEAP(ION_AUDIO_HEAP_ID));
+	if (IS_ERR_OR_NULL(handle)) {
+		MM_ERR("Unable to create allocate O/P buffers\n");
+		rc = -ENOMEM;
+		goto output_buff_alloc_error;
+	}
+	audio->output_buff_handle = handle;
+
+	rc = ion_phys(client, handle, &addr, &len);
+	if (rc) {
+		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+		goto output_buff_get_phys_error;
+	} else {
+		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+	}
+	audio->phys = (int32_t)addr;
+
+
+	rc = ion_handle_get_flags(client, handle, &ionflag);
+	if (rc) {
+		MM_ERR("could not get flags for the handle\n");
+		goto output_buff_get_flags_error;
+	}
+
+	audio->map_v_write = ion_map_kernel(client, handle, ionflag);
+	if (IS_ERR(audio->map_v_write)) {
+		MM_ERR("could not map write buffers\n");
+		rc = -ENOMEM;
+		goto output_buff_map_error;
+	}
+	audio->data = audio->map_v_write;
+	MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
+		audio->phys, (int)audio->data);
+
+	audio->out_dma_sz = mem_sz;
 
 	if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) {
 		rc = audmgr_open(&audio->audmgr);
@@ -1684,6 +1744,7 @@
 		goto err;
 	}
 
+	audio->input_buff_handle = NULL;
 	mutex_init(&audio->lock);
 	mutex_init(&audio->write_lock);
 	mutex_init(&audio->read_lock);
@@ -1748,8 +1809,14 @@
 done:
 	return rc;
 err:
-	iounmap(audio->map_v_write);
-	free_contiguous_memory_by_paddr(audio->phys);
+	ion_unmap_kernel(client, audio->output_buff_handle);
+output_buff_map_error:
+output_buff_get_phys_error:
+output_buff_get_flags_error:
+	ion_free(client, audio->output_buff_handle);
+output_buff_alloc_error:
+	ion_client_destroy(client);
+client_create_error:
 	audpp_adec_free(audio->dec_id);
 	kfree(audio);
 	return rc;
diff --git a/arch/arm/mach-msm/qdsp5/audio_wmapro.c b/arch/arm/mach-msm/qdsp5/audio_wmapro.c
index 641b1c7..87afcf0 100644
--- a/arch/arm/mach-msm/qdsp5/audio_wmapro.c
+++ b/arch/arm/mach-msm/qdsp5/audio_wmapro.c
@@ -36,11 +36,11 @@
 #include <linux/delay.h>
 #include <linux/list.h>
 #include <linux/earlysuspend.h>
-#include <linux/android_pmem.h>
 #include <linux/slab.h>
 #include <linux/msm_audio.h>
 #include <linux/memory_alloc.h>
 #include <linux/msm_audio_wmapro.h>
+#include <linux/ion.h>
 
 #include <mach/msm_adsp.h>
 #include <mach/qdsp5/qdsp5audppcmdi.h>
@@ -186,6 +186,9 @@
 	int eq_needs_commit;
 	audpp_cmd_cfg_object_params_eqalizer eq;
 	audpp_cmd_cfg_object_params_volume vol_pan;
+	struct ion_client *client;
+	struct ion_handle *input_buff_handle;
+	struct ion_handle *output_buff_handle;
 };
 
 static int auddec_dsp_config(struct audio *audio, int enable);
@@ -803,6 +806,10 @@
 	uint16_t enable_mask;
 	int enable;
 	int prev_state;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	int len = 0;
 
 	MM_DBG("cmd = %d\n", cmd);
 
@@ -1031,25 +1038,54 @@
 				MM_DBG("allocate PCM buffer %d\n",
 						config.buffer_count *
 						config.buffer_size);
-				audio->read_phys =
-						allocate_contiguous_ebi_nomap(
-							config.buffer_size *
-							config.buffer_count,
-							SZ_4K);
-				if (!audio->read_phys) {
+				handle = ion_alloc(audio->client,
+					(config.buffer_size *
+					config.buffer_count),
+					SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
+				if (IS_ERR_OR_NULL(handle)) {
+					MM_ERR("Unable to alloc I/P buffs\n");
+					audio->input_buff_handle = NULL;
 					rc = -ENOMEM;
 					break;
 				}
-				audio->map_v_read = ioremap(
-						audio->read_phys,
-						config.buffer_size *
-						config.buffer_count);
 
+				audio->input_buff_handle = handle;
+
+				rc = ion_phys(audio->client ,
+					handle, &addr, &len);
+				if (rc) {
+					MM_ERR("Invalid phy: %x sz: %x\n",
+						(unsigned int) addr,
+						(unsigned int) len);
+					ion_free(audio->client, handle);
+					audio->input_buff_handle = NULL;
+					rc = -ENOMEM;
+					break;
+				} else {
+					MM_INFO("Got valid phy: %x sz: %x\n",
+						(unsigned int) audio->read_phys,
+						(unsigned int) len);
+				}
+				audio->read_phys = (int32_t)addr;
+
+				rc = ion_handle_get_flags(audio->client,
+					handle, &ionflag);
+				if (rc) {
+					MM_ERR("could not get flags\n");
+					ion_free(audio->client, handle);
+					audio->input_buff_handle = NULL;
+					rc = -ENOMEM;
+					break;
+				}
+
+				audio->map_v_read = ion_map_kernel(
+					audio->client,
+					handle, ionflag);
 				if (IS_ERR(audio->map_v_read)) {
 					MM_ERR("map of read buf failed\n");
+					ion_free(audio->client, handle);
+					audio->input_buff_handle = NULL;
 					rc = -ENOMEM;
-					free_contiguous_memory_by_paddr(
-							audio->read_phys);
 				} else {
 					uint8_t index;
 					uint32_t offset = 0;
@@ -1096,7 +1132,8 @@
 }
 
 /* Only useful in tunnel-mode */
-static int audio_fsync(struct file *file, loff_t a, loff_t b, int datasync)
+static int audio_fsync(struct file *file, loff_t a, loff_t b,
+	int datasync)
 {
 	struct audio *audio = file->private_data;
 	struct buffer *frame;
@@ -1429,12 +1466,13 @@
 	audio->event_abort = 1;
 	wake_up(&audio->event_wait);
 	audwmapro_reset_event_queue(audio);
-	iounmap(audio->map_v_write);
-	free_contiguous_memory_by_paddr(audio->phys);
-	if (audio->read_data) {
-		iounmap(audio->map_v_read);
-		free_contiguous_memory_by_paddr(audio->read_phys);
+	ion_unmap_kernel(audio->client, audio->output_buff_handle);
+	ion_free(audio->client, audio->output_buff_handle);
+	if (audio->input_buff_handle != NULL) {
+		ion_unmap_kernel(audio->client, audio->input_buff_handle);
+		ion_free(audio->client, audio->input_buff_handle);
 	}
+	ion_client_destroy(audio->client);
 	mutex_unlock(&audio->lock);
 #ifdef CONFIG_DEBUG_FS
 	if (audio->dentry)
@@ -1576,8 +1614,13 @@
 {
 	struct audio *audio = NULL;
 	int rc, dec_attrb, decid, i;
-	unsigned pmem_sz = DMASZ_MAX;
+	unsigned mem_sz = DMASZ_MAX;
 	struct audwmapro_event *e_node = NULL;
+	unsigned long ionflag = 0;
+	ion_phys_addr_t addr = 0;
+	struct ion_handle *handle = NULL;
+	struct ion_client *client = NULL;
+	int len = 0;
 #ifdef CONFIG_DEBUG_FS
 	/* 4 bytes represents decoder number, 1 byte for terminate string */
 	char name[sizeof "msm_wmapro_" + 5];
@@ -1620,36 +1663,52 @@
 	}
 	audio->dec_id = decid & MSM_AUD_DECODER_MASK;
 
-	while (pmem_sz >= DMASZ_MIN) {
-		MM_DBG("pmemsz = %d\n", pmem_sz);
-		audio->phys = allocate_contiguous_ebi_nomap(pmem_sz, SZ_4K);
-		if (audio->phys) {
-			audio->map_v_write = ioremap(audio->phys, pmem_sz);
-			if (IS_ERR(audio->map_v_write)) {
-				MM_ERR("could not map write buffers, \
-						freeing instance 0x%08x\n",
-						(int)audio);
-				rc = -ENOMEM;
-				free_contiguous_memory_by_paddr(audio->phys);
-				audpp_adec_free(audio->dec_id);
-				kfree(audio);
-				goto done;
-			}
-			audio->data = audio->map_v_write;
-			MM_DBG("write buf: phy addr 0x%08x kernel addr \
-				0x%08x\n", audio->phys, (int)audio->data);
-			break;
-		} else if (pmem_sz == DMASZ_MIN) {
-			MM_ERR("could not allocate write buffers, freeing \
-					instance 0x%08x\n", (int)audio);
-			rc = -ENOMEM;
-			audpp_adec_free(audio->dec_id);
-			kfree(audio);
-			goto done;
-		} else
-		pmem_sz >>= 1;
+	client = msm_ion_client_create(UINT_MAX, "Audio_WMA_PRO_Client");
+	if (IS_ERR_OR_NULL(client)) {
+		pr_err("Unable to create ION client\n");
+		rc = -ENOMEM;
+		goto client_create_error;
 	}
-	audio->out_dma_sz = pmem_sz;
+	audio->client = client;
+
+	handle = ion_alloc(client, mem_sz, SZ_4K,
+		ION_HEAP(ION_AUDIO_HEAP_ID));
+	if (IS_ERR_OR_NULL(handle)) {
+		MM_ERR("Unable to create allocate O/P buffers\n");
+		rc = -ENOMEM;
+		goto output_buff_alloc_error;
+	}
+	audio->output_buff_handle = handle;
+
+	rc = ion_phys(client, handle, &addr, &len);
+	if (rc) {
+		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+		goto output_buff_get_phys_error;
+	} else {
+		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
+			(unsigned int) addr, (unsigned int) len);
+	}
+	audio->phys = (int32_t)addr;
+
+
+	rc = ion_handle_get_flags(client, handle, &ionflag);
+	if (rc) {
+		MM_ERR("could not get flags for the handle\n");
+		goto output_buff_get_flags_error;
+	}
+
+	audio->map_v_write = ion_map_kernel(client, handle, ionflag);
+	if (IS_ERR(audio->map_v_write)) {
+		MM_ERR("could not map write buffers\n");
+		rc = -ENOMEM;
+		goto output_buff_map_error;
+	}
+	audio->data = audio->map_v_write;
+	MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
+		audio->phys, (int)audio->data);
+
+	audio->out_dma_sz = mem_sz;
 
 	rc = audmgr_open(&audio->audmgr);
 	if (rc) {
@@ -1677,6 +1736,7 @@
 		goto err;
 	}
 
+	audio->input_buff_handle = NULL;
 	mutex_init(&audio->lock);
 	mutex_init(&audio->write_lock);
 	mutex_init(&audio->read_lock);
@@ -1735,8 +1795,14 @@
 done:
 	return rc;
 err:
-	iounmap(audio->map_v_write);
-	free_contiguous_memory_by_paddr(audio->phys);
+	ion_unmap_kernel(client, audio->output_buff_handle);
+output_buff_map_error:
+output_buff_get_phys_error:
+output_buff_get_flags_error:
+	ion_free(client, audio->output_buff_handle);
+output_buff_alloc_error:
+	ion_client_destroy(client);
+client_create_error:
 	audpp_adec_free(audio->dec_id);
 	kfree(audio);
 	return rc;
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_lpa.c b/arch/arm/mach-msm/qdsp5v2/audio_lpa.c
index d5fb2e9..60f43b9 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_lpa.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_lpa.c
@@ -34,6 +34,7 @@
 #include <linux/slab.h>
 #include <linux/msm_audio.h>
 #include <mach/qdsp5v2/audio_dev_ctl.h>
+#include <linux/memory_alloc.h>
 
 #include <mach/qdsp5v2/qdsp5audppmsg.h>
 #include <mach/qdsp5v2/qdsp5audplaycmdi.h>
@@ -1410,7 +1411,7 @@
 	wake_up(&audio->event_wait);
 	audlpa_reset_event_queue(audio);
 	iounmap(audio->data);
-	pmem_kfree(audio->phys);
+	free_contiguous_memory_by_paddr(audio->phys);
 	mutex_unlock(&audio->lock);
 #ifdef CONFIG_DEBUG_FS
 	if (audio->dentry)
@@ -1655,7 +1656,7 @@
 	msm_adsp_put(audio->audplay);
 err:
 	iounmap(audio->data);
-	pmem_kfree(audio->phys);
+	free_contiguous_memory_by_paddr(audio->phys);
 	audpp_adec_free(audio->dec_id);
 	MM_INFO("audio instance 0x%08x freeing\n", (int)audio);
 	kfree(audio);
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_mvs.c b/arch/arm/mach-msm/qdsp5v2/audio_mvs.c
index 99da836..1884b3c 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_mvs.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_mvs.c
@@ -304,6 +304,7 @@
 	uint32_t buf_free_cnt;
 	uint32_t rate_type;
 	uint32_t dtx_mode;
+	struct min_max_rate min_max_rate;
 
 	struct msm_rpc_endpoint *rpc_endpt;
 	uint32_t rpc_prog;
@@ -416,8 +417,10 @@
 
 		/* Set EVRC mode. */
 		memset(&set_voc_mode_msg, 0, sizeof(set_voc_mode_msg));
-		set_voc_mode_msg.min_rate = cpu_to_be32(audio->rate_type);
-		set_voc_mode_msg.max_rate = cpu_to_be32(audio->rate_type);
+		set_voc_mode_msg.min_rate =
+				cpu_to_be32(audio->min_max_rate.min_rate);
+		set_voc_mode_msg.max_rate =
+				cpu_to_be32(audio->min_max_rate.max_rate);
 
 		msm_rpc_setup_req(&set_voc_mode_msg.rpc_hdr,
 				  audio->rpc_prog,
@@ -1555,6 +1558,8 @@
 		mutex_lock(&audio->lock);
 		config.mvs_mode = audio->mvs_mode;
 		config.rate_type = audio->rate_type;
+		config.min_max_rate.min_rate = audio->min_max_rate.min_rate;
+		config.min_max_rate.max_rate = audio->min_max_rate.max_rate;
 		mutex_unlock(&audio->lock);
 
 		rc = copy_to_user((void *)arg, &config, sizeof(config));
@@ -1579,6 +1584,10 @@
 				audio->mvs_mode = config.mvs_mode;
 				audio->rate_type = config.rate_type;
 				audio->dtx_mode = config.dtx_mode;
+				audio->min_max_rate.min_rate =
+						config.min_max_rate.min_rate;
+				audio->min_max_rate.max_rate =
+						config.min_max_rate.max_rate;
 			} else {
 				pr_err("%s: Set confg called in state %d\n",
 				       __func__, audio->state);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_acdb.c b/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
index e7a81d3..1092c77 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
@@ -617,11 +617,14 @@
 {
 	if (atomic64_read(&acdb_data.mem_len)) {
 		mutex_lock(&acdb_data.acdb_mutex);
+		atomic_set(&acdb_data.vocstrm_total_cal_size, 0);
+		atomic_set(&acdb_data.vocproc_total_cal_size, 0);
+		atomic_set(&acdb_data.vocvol_total_cal_size, 0);
+		atomic64_set(&acdb_data.mem_len, 0);
 		ion_unmap_kernel(acdb_data.ion_client, acdb_data.ion_handle);
 		ion_free(acdb_data.ion_client, acdb_data.ion_handle);
 		ion_client_destroy(acdb_data.ion_client);
 		mutex_unlock(&acdb_data.acdb_mutex);
-		atomic64_set(&acdb_data.mem_len, 0);
 	}
 	return 0;
 }
@@ -643,7 +646,7 @@
 		goto err;
 	}
 
-	acdb_data.ion_handle = ion_import_fd(acdb_data.ion_client,
+	acdb_data.ion_handle = ion_import_dma_buf(acdb_data.ion_client,
 		atomic_read(&acdb_data.map_handle));
 	if (IS_ERR_OR_NULL(acdb_data.ion_handle)) {
 		pr_err("%s: Could not import map handle!!!\n", __func__);
@@ -666,11 +669,11 @@
 		goto err_ion_handle;
 	}
 	kvaddr = (unsigned long)kvptr;
-	mutex_unlock(&acdb_data.acdb_mutex);
-
 	atomic64_set(&acdb_data.paddr, paddr);
 	atomic64_set(&acdb_data.kvaddr, kvaddr);
 	atomic64_set(&acdb_data.mem_len, mem_len);
+	mutex_unlock(&acdb_data.acdb_mutex);
+
 	pr_debug("%s done! paddr = 0x%lx, "
 		"kvaddr = 0x%lx, len = x%lx\n",
 		 __func__,
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_lpa.c b/arch/arm/mach-msm/qdsp6v2/audio_lpa.c
index 0591a71..cbfee70 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_lpa.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_lpa.c
@@ -91,7 +91,6 @@
 struct audlpa_ion_region {
 	struct list_head list;
 	struct ion_handle *handle;
-	struct ion_client *client;
 	int fd;
 	void *vaddr;
 	unsigned long paddr;
@@ -463,7 +462,6 @@
 	struct audlpa_ion_region *region;
 	int rc = -EINVAL;
 	struct ion_handle *handle;
-	struct ion_client *client;
 	unsigned long ionflag;
 	void *temp_ptr;
 
@@ -475,32 +473,27 @@
 		goto end;
 	}
 
-	client = msm_ion_client_create(UINT_MAX, "Audio_LPA_Client");
-	if (IS_ERR_OR_NULL(client)) {
-		pr_err("Unable to create ION client\n");
-		goto client_error;
-	}
 
-	handle = ion_import_fd(client, info->fd);
+	handle = ion_import_dma_buf(audio->client, info->fd);
 	if (IS_ERR_OR_NULL(handle)) {
 		pr_err("%s: could not get handle of the given fd\n", __func__);
 		goto import_error;
 	}
 
-	rc = ion_handle_get_flags(client, handle, &ionflag);
+	rc = ion_handle_get_flags(audio->client, handle, &ionflag);
 	if (rc) {
 		pr_err("%s: could not get flags for the handle\n", __func__);
 		goto flag_error;
 	}
 
-	temp_ptr = ion_map_kernel(client, handle, ionflag);
+	temp_ptr = ion_map_kernel(audio->client, handle, ionflag);
 	if (IS_ERR_OR_NULL(temp_ptr)) {
 		pr_err("%s: could not get virtual address\n", __func__);
 		goto map_error;
 	}
 	kvaddr = (unsigned long) temp_ptr;
 
-	rc = ion_phys(client, handle, &paddr, &len);
+	rc = ion_phys(audio->client, handle, &paddr, &len);
 	if (rc) {
 		pr_err("%s: could not get physical address\n", __func__);
 		goto ion_error;
@@ -512,7 +505,6 @@
 		goto ion_error;
 	}
 
-	region->client = client;
 	region->handle = handle;
 	region->vaddr = info->vaddr;
 	region->fd = info->fd;
@@ -534,13 +526,11 @@
 	}
 
 ion_error:
-	ion_unmap_kernel(client, handle);
+	ion_unmap_kernel(audio->client, handle);
 map_error:
-	ion_free(client, handle);
 flag_error:
+	ion_free(audio->client, handle);
 import_error:
-	ion_client_destroy(client);
-client_error:
 	kfree(region);
 end:
 	return rc;
@@ -571,9 +561,8 @@
 					__func__, audio);
 
 			list_del(&region->list);
-			ion_unmap_kernel(region->client, region->handle);
-			ion_free(region->client, region->handle);
-			ion_client_destroy(region->client);
+			ion_unmap_kernel(audio->client, region->handle);
+			ion_free(audio->client, region->handle);
 			kfree(region);
 			rc = 0;
 			break;
@@ -1097,9 +1086,8 @@
 	list_for_each_safe(ptr, next, &audio->ion_region_queue) {
 		region = list_entry(ptr, struct audlpa_ion_region, list);
 		list_del(&region->list);
-		ion_unmap_kernel(region->client, region->handle);
-		ion_free(region->client, region->handle);
-		ion_client_destroy(region->client);
+		ion_unmap_kernel(audio->client, region->handle);
+		ion_free(audio->client, region->handle);
 		kfree(region);
 	}
 
@@ -1144,6 +1132,7 @@
 	auddev_unregister_evt_listner(AUDDEV_CLNT_DEC, audio->ac->session);
 	q6asm_audio_client_free(audio->ac);
 	audlpa_reset_ion_region(audio);
+	ion_client_destroy(audio->client);
 #ifdef CONFIG_HAS_EARLYSUSPEND
 	unregister_early_suspend(&audio->suspend_ctl.node);
 #endif
@@ -1391,6 +1380,12 @@
 	pr_info("%s: audio instance 0x%08x created session[%d]\n", __func__,
 						(int)audio,
 						audio->ac->session);
+	audio->client = msm_ion_client_create(UINT_MAX, "Audio_LPA_Client");
+	if (IS_ERR_OR_NULL(audio->client)) {
+		pr_err("Unable to create ION client\n");
+		goto err;
+	}
+	pr_debug("Allocating ION clinet in audio_open %p", audio->client);
 done:
 	return rc;
 err:
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_lpa.h b/arch/arm/mach-msm/qdsp6v2/audio_lpa.h
index 34b53f2..93588b3 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_lpa.h
+++ b/arch/arm/mach-msm/qdsp6v2/audio_lpa.h
@@ -89,6 +89,7 @@
 	uint32_t device_events;
 
 	struct list_head ion_region_queue; /* protected by lock */
+	struct ion_client *client;
 
 	int eq_enable;
 	int eq_needs_commit;
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c b/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c
index 9253056..fbd94c5 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c
@@ -42,13 +42,16 @@
 		struct asm_aac_cfg aac_cfg;
 		struct msm_audio_aac_config *aac_config;
 		uint32_t sbr_ps = 0x00;
+		aac_config = (struct msm_audio_aac_config *)audio->codec_cfg;
+		aac_cfg.ch_cfg = aac_config->channel_configuration;
+		aac_cfg.sample_rate =  audio->pcm_cfg.sample_rate;
 		pr_debug("%s: AUDIO_START session_id[%d]\n", __func__,
 						audio->ac->session);
 		if (audio->feedback == NON_TUNNEL_MODE) {
 			/* Configure PCM output block */
-			rc = q6asm_enc_cfg_blk_pcm(audio->ac,
-				0, /*native sampling rate*/
-				0 /*native channel count*/);
+			rc = q6asm_enc_cfg_blk_pcm_native(audio->ac,
+				aac_cfg.sample_rate,
+				aac_cfg.ch_cfg);
 			if (rc < 0) {
 				pr_err("pcm output block config failed\n");
 				break;
@@ -58,7 +61,6 @@
 		rc = q6asm_enable_sbrps(audio->ac, sbr_ps);
 		if (rc < 0)
 			pr_err("sbr-ps enable failed\n");
-		aac_config = (struct msm_audio_aac_config *)audio->codec_cfg;
 		if (aac_config->sbr_ps_on_flag)
 			aac_cfg.aot = AAC_ENC_MODE_EAAC_P;
 		else if (aac_config->sbr_on_flag)
@@ -87,8 +89,6 @@
 			aac_config->aac_scalefactor_data_resilience_flag;
 		aac_cfg.spectral_data_resilience =
 			aac_config->aac_spectral_data_resilience_flag;
-		aac_cfg.ch_cfg = aac_config->channel_configuration;
-		aac_cfg.sample_rate =  audio->pcm_cfg.sample_rate;
 
 		pr_debug("%s:format=%x aot=%d  ch=%d sr=%d\n",
 			__func__, aac_cfg.format,
@@ -146,16 +146,14 @@
 				AUDIO_AAC_DUAL_MONO_PL_PR) ||
 				(aac_config->dual_mono_mode >
 				AUDIO_AAC_DUAL_MONO_PL_SR)) {
-				pr_err("%s:AUDIO_SET_AAC_CONFIG: Invalid"
-					"dual_mono mode =%d\n", __func__,
-					aac_config->dual_mono_mode);
+				pr_err("%s:AUDIO_SET_AAC_CONFIG: Invalid dual_mono mode =%d\n",
+					 __func__, aac_config->dual_mono_mode);
 			} else {
 				/* convert the data from user into sce_left
 				 * and sce_right based on the definitions
 				 */
-				pr_debug("%s: AUDIO_SET_AAC_CONFIG: modify"
-					 "dual_mono mode =%d\n", __func__,
-					 aac_config->dual_mono_mode);
+				pr_debug("%s: AUDIO_SET_AAC_CONFIG: modify dual_mono mode =%d\n",
+					 __func__, aac_config->dual_mono_mode);
 				switch (aac_config->dual_mono_mode) {
 				case AUDIO_AAC_DUAL_MONO_PL_PR:
 					sce_left = 1;
@@ -178,8 +176,8 @@
 				rc = q6asm_cfg_dual_mono_aac(audio->ac,
 							sce_left, sce_right);
 				if (rc < 0)
-					pr_err("%s: asm cmd dualmono failed"
-						" rc=%d\n", __func__, rc);
+					pr_err("%s: asm cmd dualmono failed rc=%d\n",
+								 __func__, rc);
 			}			break;
 		}
 		break;
@@ -212,8 +210,8 @@
 	audio->codec_cfg = kzalloc(sizeof(struct msm_audio_aac_config),
 					GFP_KERNEL);
 	if (audio->codec_cfg == NULL) {
-		pr_err("%s: Could not allocate memory for aac"
-			"config\n", __func__);
+		pr_err("%s: Could not allocate memory for aac config\n",
+							 __func__);
 		kfree(audio);
 		return -ENOMEM;
 	}
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
index 6a99be2..e9f5c0a 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
@@ -33,7 +33,7 @@
 	return 0;
 }
 
-ssize_t audio_aio_debug_read(struct file *file, char __user * buf,
+ssize_t audio_aio_debug_read(struct file *file, char __user *buf,
 				size_t count, loff_t *ppos)
 {
 	const int debug_bufmax = 4096;
@@ -67,7 +67,7 @@
 }
 #endif
 
-static int insert_eos_buf(struct q6audio_aio *audio,
+int insert_eos_buf(struct q6audio_aio *audio,
 		struct audio_aio_buffer_node *buf_node)
 {
 	struct dec_meta_out *eos_buf = buf_node->kvaddr;
@@ -93,7 +93,7 @@
 		sizeof(meta_data->meta_out_dsp[0]);
 }
 
-static void extract_meta_out_info(struct q6audio_aio *audio,
+void extract_meta_out_info(struct q6audio_aio *audio,
 		struct audio_aio_buffer_node *buf_node, int dir)
 {
 	struct dec_meta_out *meta_data = buf_node->kvaddr;
@@ -114,8 +114,7 @@
 			&buf_node->meta_info.meta_out,
 			sizeof(struct dec_meta_out));
 		meta_data->meta_out_dsp[0].nflags = 0x00000000;
-		pr_debug("%s[%p]:o/p: msw_ts 0x%8x lsw_ts 0x%8x nflags 0x%8x,"
-				"num_frames = %d\n",
+		pr_debug("%s[%p]:o/p: msw_ts 0x%8x lsw_ts 0x%8x nflags 0x%8x, num_frames = %d\n",
 		__func__, audio,
 		((struct dec_meta_out *)buf_node->kvaddr)->\
 			meta_out_dsp[0].msw_ts,
@@ -293,8 +292,8 @@
 		kfree(used_buf);
 		if (list_empty(&audio->out_queue) &&
 			(audio->drv_status & ADRV_STATUS_FSYNC)) {
-			pr_debug("%s[%p]: list is empty, reached EOS in"
-				"Tunnel\n", __func__, audio);
+			pr_debug("%s[%p]: list is empty, reached EOS in Tunnel\n",
+				 __func__, audio);
 			wake_up(&audio->write_wait);
 		}
 	} else {
@@ -304,60 +303,6 @@
 	}
 }
 
-/* Read buffer from DSP / Handle Ack from DSP */
-void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token,
-			uint32_t *payload)
-{
-	unsigned long flags;
-	union msm_audio_event_payload event_payload;
-	struct audio_aio_buffer_node *filled_buf;
-
-	/* No active flush in progress */
-	if (audio->rflush)
-		return;
-
-	/* Statistics of read */
-	atomic_add(payload[2], &audio->in_bytes);
-	atomic_add(payload[7], &audio->in_samples);
-
-	spin_lock_irqsave(&audio->dsp_lock, flags);
-	BUG_ON(list_empty(&audio->in_queue));
-	filled_buf = list_first_entry(&audio->in_queue,
-					struct audio_aio_buffer_node, list);
-	if (token == (filled_buf->token)) {
-		list_del(&filled_buf->list);
-		spin_unlock_irqrestore(&audio->dsp_lock, flags);
-		event_payload.aio_buf = filled_buf->buf;
-		/* Read done Buffer due to flush/normal condition
-		after EOS event, so append EOS buffer */
-		if (audio->eos_rsp == 0x1) {
-			event_payload.aio_buf.data_len =
-			insert_eos_buf(audio, filled_buf);
-			/* Reset flag back to indicate eos intimated */
-			audio->eos_rsp = 0;
-		} else {
-			filled_buf->meta_info.meta_out.num_of_frames =
-			payload[7];
-			event_payload.aio_buf.data_len = payload[2] + \
-						payload[3] + \
-						sizeof(struct dec_meta_out);
-			pr_debug("%s[%p]:nr of frames 0x%8x len=%d\n",
-				__func__, audio,
-				filled_buf->meta_info.meta_out.num_of_frames,
-				event_payload.aio_buf.data_len);
-			extract_meta_out_info(audio, filled_buf, 0);
-			audio->eos_rsp = 0;
-		}
-		audio_aio_post_event(audio, AUDIO_EVENT_READ_DONE,
-					event_payload);
-		kfree(filled_buf);
-	} else {
-		pr_err("%s[%p]:expected=%lx ret=%x\n",
-			__func__, audio, filled_buf->token, token);
-		spin_unlock_irqrestore(&audio->dsp_lock, flags);
-	}
-}
-
 /* ------------------- device --------------------- */
 void audio_aio_async_out_flush(struct q6audio_aio *audio)
 {
@@ -404,8 +349,8 @@
 		/* Forcefull send o/p eos buffer after flush, if no eos response
 		 * received by dsp even after sending eos command */
 		if ((audio->eos_rsp != 1) && audio->eos_flag) {
-			pr_debug("%s[%p]: send eos on o/p buffer during"
-				"flush\n", __func__, audio);
+			pr_debug("%s[%p]: send eos on o/p buffer during flush\n",
+				 __func__, audio);
 			payload.aio_buf = buf_node->buf;
 			payload.aio_buf.data_len =
 					insert_eos_buf(audio, buf_node);
@@ -459,9 +404,8 @@
 	list_for_each_safe(ptr, next, &audio->ion_region_queue) {
 		region = list_entry(ptr, struct audio_aio_ion_region, list);
 		list_del(&region->list);
-		ion_unmap_kernel(region->client, region->handle);
-		ion_free(region->client, region->handle);
-		ion_client_destroy(region->client);
+		ion_unmap_kernel(audio->client, region->handle);
+		ion_free(audio->client, region->handle);
 		kfree(region);
 	}
 
@@ -527,6 +471,7 @@
 	audio_aio_unmap_ion_region(audio);
 	audio_aio_disable(audio);
 	audio_aio_reset_ion_region(audio);
+	ion_client_destroy(audio->client);
 	audio->event_abort = 1;
 	wake_up(&audio->event_wait);
 	audio_aio_reset_event_queue(audio);
@@ -716,9 +661,7 @@
 	list_for_each_entry(region_elt, &audio->ion_region_queue, list) {
 		if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) ||
 			OVERLAPS(region_elt, &t)) {
-			pr_err("%s[%p]:region (vaddr %p len %ld)"
-				" clashes with registered region"
-				" (vaddr %p paddr %p len %ld)\n",
+			pr_err("%s[%p]:region (vaddr %p len %ld) clashes with registered region (vaddr %p paddr %p len %ld)\n",
 				__func__, audio, vaddr, len,
 				region_elt->vaddr,
 				(void *)region_elt->paddr, region_elt->len);
@@ -738,7 +681,6 @@
 	struct audio_aio_ion_region *region;
 	int rc = -EINVAL;
 	struct ion_handle *handle;
-	struct ion_client *client;
 	unsigned long ionflag;
 	void *temp_ptr;
 
@@ -750,32 +692,26 @@
 		goto end;
 	}
 
-	client = msm_ion_client_create(UINT_MAX, "Audio_Dec_Client");
-	if (IS_ERR_OR_NULL(client)) {
-		pr_err("Unable to create ION client\n");
-		goto client_error;
-	}
-
-	handle = ion_import_fd(client, info->fd);
+	handle = ion_import_dma_buf(audio->client, info->fd);
 	if (IS_ERR_OR_NULL(handle)) {
 		pr_err("%s: could not get handle of the given fd\n", __func__);
 		goto import_error;
 	}
 
-	rc = ion_handle_get_flags(client, handle, &ionflag);
+	rc = ion_handle_get_flags(audio->client, handle, &ionflag);
 	if (rc) {
 		pr_err("%s: could not get flags for the handle\n", __func__);
 		goto flag_error;
 	}
 
-	temp_ptr = ion_map_kernel(client, handle, ionflag);
+	temp_ptr = ion_map_kernel(audio->client, handle, ionflag);
 	if (IS_ERR_OR_NULL(temp_ptr)) {
 		pr_err("%s: could not get virtual address\n", __func__);
 		goto map_error;
 	}
 	kvaddr = (unsigned long)temp_ptr;
 
-	rc = ion_phys(client, handle, &paddr, &len);
+	rc = ion_phys(audio->client, handle, &paddr, &len);
 	if (rc) {
 		pr_err("%s: could not get physical address\n", __func__);
 		goto ion_error;
@@ -787,7 +723,6 @@
 		goto ion_error;
 	}
 
-	region->client = client;
 	region->handle = handle;
 	region->vaddr = info->vaddr;
 	region->fd = info->fd;
@@ -809,13 +744,11 @@
 	}
 
 ion_error:
-	ion_unmap_kernel(client, handle);
+	ion_unmap_kernel(audio->client, handle);
 map_error:
-	ion_free(client, handle);
 flag_error:
+	ion_free(audio->client, handle);
 import_error:
-	ion_client_destroy(client);
-client_error:
 	kfree(region);
 end:
 	return rc;
@@ -851,9 +784,8 @@
 					__func__, audio);
 
 			list_del(&region->list);
-			ion_unmap_kernel(region->client, region->handle);
-			ion_free(region->client, region->handle);
-			ion_client_destroy(region->client);
+			ion_unmap_kernel(audio->client, region->handle);
+			ion_free(audio->client, region->handle);
 			kfree(region);
 			rc = 0;
 			break;
@@ -870,8 +802,7 @@
 	struct audio_client *ac;
 	struct audio_aio_write_param param;
 
-	pr_debug("%s[%p]: Send write buff %p phy %lx len %d"
-		"meta_enable = %d\n",
+	pr_debug("%s[%p]: Send write buff %p phy %lx len %d meta_enable = %d\n",
 		__func__, audio, buf_node, buf_node->paddr,
 		buf_node->buf.data_len,
 		audio->buf_cfg.meta_info_enable);
@@ -973,8 +904,8 @@
 		return -EFAULT;
 	}
 
-	pr_debug("%s[%p]:node %p dir %x buf_addr %p buf_len %d data_len"
-		"%d\n", __func__, audio, buf_node, dir, buf_node->buf.buf_addr,
+	pr_debug("%s[%p]:node %p dir %x buf_addr %p buf_len %d data_len %d\n",
+		 __func__, audio, buf_node, dir, buf_node->buf.buf_addr,
 		buf_node->buf.buf_len, buf_node->buf.data_len);
 	buf_node->paddr = audio_aio_ion_fixup(audio, buf_node->buf.buf_addr,
 						buf_node->buf.buf_len, 1,
@@ -1130,6 +1061,13 @@
 			break;
 		}
 	}
+	audio->client = msm_ion_client_create(UINT_MAX, "Audio_Dec_Client");
+	if (IS_ERR_OR_NULL(audio->client)) {
+		pr_err("Unable to create ION client\n");
+		rc = -EACCES;
+		goto fail;
+	}
+	pr_debug("Ion client create in audio_aio_open %p", audio->client);
 	return 0;
 fail:
 	q6asm_audio_client_free(audio->ac);
@@ -1335,8 +1273,8 @@
 			break;
 		}
 		if (audio->feedback != NON_TUNNEL_MODE) {
-			pr_err("%s[%p]:Not sufficient permission to"
-				"change the playback mode\n", __func__, audio);
+			pr_err("%s[%p]:Not sufficient permission to change the playback mode\n",
+				 __func__, audio);
 			rc = -EACCES;
 			mutex_unlock(&audio->lock);
 			break;
@@ -1379,8 +1317,8 @@
 		break;
 	}
 	case AUDIO_GET_BUF_CFG: {
-		pr_debug("%s[%p]:session id %d: Get-buf-cfg: meta[%d]"
-			"framesperbuf[%d]\n", __func__, audio,
+		pr_debug("%s[%p]:session id %d: Get-buf-cfg: meta[%d] framesperbuf[%d]\n",
+			 __func__, audio,
 			audio->ac->session, audio->buf_cfg.meta_info_enable,
 			audio->buf_cfg.frames_per_buf);
 
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h
index 77288da..811baf0 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h
@@ -114,7 +114,6 @@
 struct audio_aio_ion_region {
 	struct list_head list;
 	struct ion_handle *handle;
-	struct ion_client *client;
 	int fd;
 	void *vaddr;
 	unsigned long paddr;
@@ -173,6 +172,7 @@
 	struct list_head free_event_queue;
 	struct list_head event_queue;
 	struct list_head ion_region_queue;     /* protected by lock */
+	struct ion_client *client;
 	struct audio_aio_drv_operations drv_ops;
 	union msm_audio_event_payload eos_write_payload;
 
@@ -195,6 +195,12 @@
 void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token,
 			uint32_t *payload);
 
+int insert_eos_buf(struct q6audio_aio *audio,
+		struct audio_aio_buffer_node *buf_node);
+
+void extract_meta_out_info(struct q6audio_aio *audio,
+		struct audio_aio_buffer_node *buf_node, int dir);
+
 int audio_aio_open(struct q6audio_aio *audio, struct file *file);
 int audio_aio_enable(struct q6audio_aio  *audio);
 void audio_aio_post_event(struct q6audio_aio *audio, int type,
@@ -206,6 +212,6 @@
 void audio_aio_async_in_flush(struct q6audio_aio *audio);
 #ifdef CONFIG_DEBUG_FS
 ssize_t audio_aio_debug_open(struct inode *inode, struct file *file);
-ssize_t audio_aio_debug_read(struct file *file, char __user * buf,
+ssize_t audio_aio_debug_read(struct file *file, char __user *buf,
 			size_t count, loff_t *ppos);
 #endif
diff --git a/arch/arm/mach-msm/qdsp6v2/q6audio_common.h b/arch/arm/mach-msm/qdsp6v2/q6audio_common.h
index fc20847..e4291e7 100644
--- a/arch/arm/mach-msm/qdsp6v2/q6audio_common.h
+++ b/arch/arm/mach-msm/qdsp6v2/q6audio_common.h
@@ -15,7 +15,7 @@
 #ifndef __Q6_AUDIO_COMMON_H__
 #define __Q6_AUDIO_COMMON_H__
 
-#ifdef CONFIG_ARCH_MSMCOPPER
+#ifdef CONFIG_ARCH_MSM8974
 #include <sound/apr_audio-v2.h>
 #include <sound/q6asm-v2.h>
 #else
diff --git a/arch/arm/mach-msm/qdsp6v2/q6audio_v1_aio.c b/arch/arm/mach-msm/qdsp6v2/q6audio_v1_aio.c
index 112de62..078eea8 100644
--- a/arch/arm/mach-msm/qdsp6v2/q6audio_v1_aio.c
+++ b/arch/arm/mach-msm/qdsp6v2/q6audio_v1_aio.c
@@ -97,9 +97,8 @@
 				"payload[2] = %d, payload[3] = %d\n", __func__,
 				audio, payload[0], payload[1], payload[2],
 				payload[3]);
-		pr_debug("%s[%p]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, "
-				"sr(prev) = %d, chl(prev) = %d,",
-				__func__, audio, audio->pcm_cfg.sample_rate,
+		pr_debug("%s[%p]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, sr(prev) = %d, chl(prev) = %d,",
+		__func__, audio, audio->pcm_cfg.sample_rate,
 		audio->pcm_cfg.channel_count);
 		audio->pcm_cfg.sample_rate = payload[0];
 		audio->pcm_cfg.channel_count = payload[1] & 0xFFFF;
@@ -111,3 +110,57 @@
 		break;
 	}
 }
+
+/* Read buffer from DSP / Handle Ack from DSP */
+void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token,
+			uint32_t *payload)
+{
+	unsigned long flags;
+	union msm_audio_event_payload event_payload;
+	struct audio_aio_buffer_node *filled_buf;
+
+	/* No active flush in progress */
+	if (audio->rflush)
+		return;
+
+	/* Statistics of read */
+	atomic_add(payload[2], &audio->in_bytes);
+	atomic_add(payload[7], &audio->in_samples);
+
+	spin_lock_irqsave(&audio->dsp_lock, flags);
+	BUG_ON(list_empty(&audio->in_queue));
+	filled_buf = list_first_entry(&audio->in_queue,
+					struct audio_aio_buffer_node, list);
+	if (token == (filled_buf->token)) {
+		list_del(&filled_buf->list);
+		spin_unlock_irqrestore(&audio->dsp_lock, flags);
+		event_payload.aio_buf = filled_buf->buf;
+		/* Read done Buffer due to flush/normal condition
+		after EOS event, so append EOS buffer */
+		if (audio->eos_rsp == 0x1) {
+			event_payload.aio_buf.data_len =
+			insert_eos_buf(audio, filled_buf);
+			/* Reset flag back to indicate eos intimated */
+			audio->eos_rsp = 0;
+		} else {
+			filled_buf->meta_info.meta_out.num_of_frames =
+			payload[7];
+			event_payload.aio_buf.data_len = payload[2] + \
+						payload[3] + \
+						sizeof(struct dec_meta_out);
+			pr_debug("%s[%p]:nr of frames 0x%8x len=%d\n",
+				__func__, audio,
+				filled_buf->meta_info.meta_out.num_of_frames,
+				event_payload.aio_buf.data_len);
+			extract_meta_out_info(audio, filled_buf, 0);
+			audio->eos_rsp = 0;
+		}
+		audio_aio_post_event(audio, AUDIO_EVENT_READ_DONE,
+					event_payload);
+		kfree(filled_buf);
+	} else {
+		pr_err("%s[%p]:expected=%lx ret=%x\n",
+			__func__, audio, filled_buf->token, token);
+		spin_unlock_irqrestore(&audio->dsp_lock, flags);
+	}
+}
diff --git a/arch/arm/mach-msm/qdsp6v2/q6audio_v2_aio.c b/arch/arm/mach-msm/qdsp6v2/q6audio_v2_aio.c
index aab7b19..ad4fc6f 100644
--- a/arch/arm/mach-msm/qdsp6v2/q6audio_v2_aio.c
+++ b/arch/arm/mach-msm/qdsp6v2/q6audio_v2_aio.c
@@ -91,14 +91,13 @@
 		break;
 	case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
 	case ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY:
-
 		pr_debug("%s[%p]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, payload[0]-sr = %d, payload[1]-chl = %d, payload[2] = %d, payload[3] = %d\n",
 					 __func__, audio, payload[0],
 					 payload[1], payload[2], payload[3]);
 
 		pr_debug("%s[%p]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, sr(prev) = %d, chl(prev) = %d,",
-				__func__, audio, audio->pcm_cfg.sample_rate,
-				audio->pcm_cfg.channel_count);
+		__func__, audio, audio->pcm_cfg.sample_rate,
+		audio->pcm_cfg.channel_count);
 
 		audio->pcm_cfg.sample_rate = payload[0];
 		audio->pcm_cfg.channel_count = payload[1] & 0xFFFF;
@@ -110,3 +109,61 @@
 		break;
 	}
 }
+
+/* Read buffer from DSP / Handle Ack from DSP */
+void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token,
+			uint32_t *payload)
+{
+	unsigned long flags;
+	union msm_audio_event_payload event_payload;
+	struct audio_aio_buffer_node *filled_buf;
+	pr_debug("%s\n", __func__);
+
+	/* No active flush in progress */
+	if (audio->rflush)
+		return;
+
+	/* Statistics of read */
+	atomic_add(payload[4], &audio->in_bytes);
+	atomic_add(payload[9], &audio->in_samples);
+
+	spin_lock_irqsave(&audio->dsp_lock, flags);
+	BUG_ON(list_empty(&audio->in_queue));
+	filled_buf = list_first_entry(&audio->in_queue,
+					struct audio_aio_buffer_node, list);
+
+	pr_debug("%s token: 0x[%d], filled_buf->token: 0x[%lu]",
+				 __func__, token, filled_buf->token);
+	if (token == (filled_buf->token)) {
+		list_del(&filled_buf->list);
+		spin_unlock_irqrestore(&audio->dsp_lock, flags);
+		event_payload.aio_buf = filled_buf->buf;
+		/* Read done Buffer due to flush/normal condition
+		after EOS event, so append EOS buffer */
+		if (audio->eos_rsp == 0x1) {
+			event_payload.aio_buf.data_len =
+			insert_eos_buf(audio, filled_buf);
+			/* Reset flag back to indicate eos intimated */
+			audio->eos_rsp = 0;
+		} else {
+			filled_buf->meta_info.meta_out.num_of_frames\
+							 = payload[9];
+			event_payload.aio_buf.data_len = payload[4]\
+				 + payload[5] + sizeof(struct dec_meta_out);
+			pr_debug("%s[%p]:nr of frames 0x%8x len=%d\n",
+				__func__, audio,
+				filled_buf->meta_info.meta_out.num_of_frames,
+				event_payload.aio_buf.data_len);
+			extract_meta_out_info(audio, filled_buf, 0);
+			audio->eos_rsp = 0;
+		}
+		pr_debug("%s, posting read done to the app here\n", __func__);
+		audio_aio_post_event(audio, AUDIO_EVENT_READ_DONE,
+					event_payload);
+		kfree(filled_buf);
+	} else {
+		pr_err("%s[%p]:expected=%lx ret=%x\n",
+			__func__, audio, filled_buf->token, token);
+		spin_unlock_irqrestore(&audio->dsp_lock, flags);
+	}
+}
diff --git a/arch/arm/mach-msm/qdss-etb.c b/arch/arm/mach-msm/qdss-etb.c
deleted file mode 100644
index 7837af0..0000000
--- a/arch/arm/mach-msm/qdss-etb.c
+++ /dev/null
@@ -1,412 +0,0 @@
-/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/uaccess.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-
-#include "qdss-priv.h"
-
-#define etb_writel(etb, val, off)	__raw_writel((val), etb.base + off)
-#define etb_readl(etb, off)		__raw_readl(etb.base + off)
-
-#define ETB_RAM_DEPTH_REG	(0x004)
-#define ETB_STATUS_REG		(0x00C)
-#define ETB_RAM_READ_DATA_REG	(0x010)
-#define ETB_RAM_READ_POINTER	(0x014)
-#define ETB_RAM_WRITE_POINTER	(0x018)
-#define ETB_TRG			(0x01C)
-#define ETB_CTL_REG		(0x020)
-#define ETB_RWD_REG		(0x024)
-#define ETB_FFSR		(0x300)
-#define ETB_FFCR		(0x304)
-#define ETB_ITMISCOP0		(0xEE0)
-#define ETB_ITTRFLINACK		(0xEE4)
-#define ETB_ITTRFLIN		(0xEE8)
-#define ETB_ITATBDATA0		(0xEEC)
-#define ETB_ITATBCTR2		(0xEF0)
-#define ETB_ITATBCTR1		(0xEF4)
-#define ETB_ITATBCTR0		(0xEF8)
-
-
-#define BYTES_PER_WORD		4
-#define ETB_SIZE_WORDS		4096
-#define FRAME_SIZE_WORDS	4
-
-#define ETB_LOCK()							\
-do {									\
-	mb();								\
-	etb_writel(etb, 0x0, CS_LAR);					\
-} while (0)
-#define ETB_UNLOCK()							\
-do {									\
-	etb_writel(etb, CS_UNLOCK_MAGIC, CS_LAR);			\
-	mb();								\
-} while (0)
-
-struct etb_ctx {
-	uint8_t		*buf;
-	void __iomem	*base;
-	bool		enabled;
-	bool		reading;
-	spinlock_t	spinlock;
-	atomic_t	in_use;
-	struct device	*dev;
-	struct kobject	*kobj;
-	uint32_t	trigger_cntr;
-};
-
-static struct etb_ctx etb;
-
-static void __etb_enable(void)
-{
-	int i;
-
-	ETB_UNLOCK();
-
-	etb_writel(etb, 0x0, ETB_RAM_WRITE_POINTER);
-	for (i = 0; i < ETB_SIZE_WORDS; i++)
-		etb_writel(etb, 0x0, ETB_RWD_REG);
-
-	etb_writel(etb, 0x0, ETB_RAM_WRITE_POINTER);
-	etb_writel(etb, 0x0, ETB_RAM_READ_POINTER);
-
-	etb_writel(etb, etb.trigger_cntr, ETB_TRG);
-	etb_writel(etb, BIT(13) | BIT(0), ETB_FFCR);
-	etb_writel(etb, BIT(0), ETB_CTL_REG);
-
-	ETB_LOCK();
-}
-
-void etb_enable(void)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&etb.spinlock, flags);
-	__etb_enable();
-	etb.enabled = true;
-	dev_info(etb.dev, "ETB enabled\n");
-	spin_unlock_irqrestore(&etb.spinlock, flags);
-}
-
-static void __etb_disable(void)
-{
-	int count;
-	uint32_t ffcr;
-
-	ETB_UNLOCK();
-
-	ffcr = etb_readl(etb, ETB_FFCR);
-	ffcr |= (BIT(12) | BIT(6));
-	etb_writel(etb, ffcr, ETB_FFCR);
-
-	for (count = TIMEOUT_US; BVAL(etb_readl(etb, ETB_FFCR), 6) != 0
-				&& count > 0; count--)
-		udelay(1);
-	WARN(count == 0, "timeout while flushing ETB, ETB_FFCR: %#x\n",
-	     etb_readl(etb, ETB_FFCR));
-
-	etb_writel(etb, 0x0, ETB_CTL_REG);
-
-	for (count = TIMEOUT_US; BVAL(etb_readl(etb, ETB_FFSR), 1) != 1
-				&& count > 0; count--)
-		udelay(1);
-	WARN(count == 0, "timeout while disabling ETB, ETB_FFSR: %#x\n",
-	     etb_readl(etb, ETB_FFSR));
-
-	ETB_LOCK();
-}
-
-void etb_disable(void)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&etb.spinlock, flags);
-	__etb_disable();
-	etb.enabled = false;
-	dev_info(etb.dev, "ETB disabled\n");
-	spin_unlock_irqrestore(&etb.spinlock, flags);
-}
-
-static void __etb_dump(void)
-{
-	int i;
-	uint8_t *buf_ptr;
-	uint32_t read_data;
-	uint32_t read_ptr;
-	uint32_t write_ptr;
-	uint32_t frame_off;
-	uint32_t frame_endoff;
-
-	ETB_UNLOCK();
-
-	read_ptr = etb_readl(etb, ETB_RAM_READ_POINTER);
-	write_ptr = etb_readl(etb, ETB_RAM_WRITE_POINTER);
-
-	frame_off = write_ptr % FRAME_SIZE_WORDS;
-	frame_endoff = FRAME_SIZE_WORDS - frame_off;
-	if (frame_off) {
-		dev_err(etb.dev, "write_ptr: %lu not aligned to formatter "
-				"frame size\n", (unsigned long)write_ptr);
-		dev_err(etb.dev, "frameoff: %lu, frame_endoff: %lu\n",
-			(unsigned long)frame_off, (unsigned long)frame_endoff);
-		write_ptr += frame_endoff;
-	}
-
-	if ((etb_readl(etb, ETB_STATUS_REG) & BIT(0)) == 0)
-		etb_writel(etb, 0x0, ETB_RAM_READ_POINTER);
-	else
-		etb_writel(etb, write_ptr, ETB_RAM_READ_POINTER);
-
-	buf_ptr = etb.buf;
-	for (i = 0; i < ETB_SIZE_WORDS; i++) {
-		read_data = etb_readl(etb, ETB_RAM_READ_DATA_REG);
-		*buf_ptr++ = read_data >> 0;
-		*buf_ptr++ = read_data >> 8;
-		*buf_ptr++ = read_data >> 16;
-		*buf_ptr++ = read_data >> 24;
-	}
-
-	if (frame_off) {
-		buf_ptr -= (frame_endoff * BYTES_PER_WORD);
-		for (i = 0; i < frame_endoff; i++) {
-			*buf_ptr++ = 0x0;
-			*buf_ptr++ = 0x0;
-			*buf_ptr++ = 0x0;
-			*buf_ptr++ = 0x0;
-		}
-	}
-
-	etb_writel(etb, read_ptr, ETB_RAM_READ_POINTER);
-
-	ETB_LOCK();
-}
-
-void etb_dump(void)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&etb.spinlock, flags);
-	if (etb.enabled) {
-		__etb_disable();
-		__etb_dump();
-		__etb_enable();
-
-		dev_info(etb.dev, "ETB dumped\n");
-	}
-	spin_unlock_irqrestore(&etb.spinlock, flags);
-}
-
-static int etb_open(struct inode *inode, struct file *file)
-{
-	if (atomic_cmpxchg(&etb.in_use, 0, 1))
-		return -EBUSY;
-
-	dev_dbg(etb.dev, "%s: successfully opened\n", __func__);
-	return 0;
-}
-
-static ssize_t etb_read(struct file *file, char __user *data,
-				size_t len, loff_t *ppos)
-{
-	if (etb.reading == false) {
-		etb_dump();
-		etb.reading = true;
-	}
-
-	if (*ppos + len > ETB_SIZE_WORDS * BYTES_PER_WORD)
-		len = ETB_SIZE_WORDS * BYTES_PER_WORD - *ppos;
-
-	if (copy_to_user(data, etb.buf + *ppos, len)) {
-		dev_dbg(etb.dev, "%s: copy_to_user failed\n", __func__);
-		return -EFAULT;
-	}
-
-	*ppos += len;
-
-	dev_dbg(etb.dev, "%s: %d bytes copied, %d bytes left\n",
-		__func__, len, (int) (ETB_SIZE_WORDS * BYTES_PER_WORD - *ppos));
-
-	return len;
-}
-
-static int etb_release(struct inode *inode, struct file *file)
-{
-	etb.reading = false;
-
-	atomic_set(&etb.in_use, 0);
-
-	dev_dbg(etb.dev, "%s: released\n", __func__);
-
-	return 0;
-}
-
-static const struct file_operations etb_fops = {
-	.owner =	THIS_MODULE,
-	.open =		etb_open,
-	.read =		etb_read,
-	.release =	etb_release,
-};
-
-static struct miscdevice etb_misc = {
-	.name =		"msm_etb",
-	.minor =	MISC_DYNAMIC_MINOR,
-	.fops =		&etb_fops,
-};
-
-#define ETB_ATTR(__name)						\
-static struct kobj_attribute __name##_attr =				\
-	__ATTR(__name, S_IRUGO | S_IWUSR, __name##_show, __name##_store)
-
-static ssize_t trigger_cntr_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-
-	etb.trigger_cntr = val;
-	return n;
-}
-static ssize_t trigger_cntr_show(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			char *buf)
-{
-	unsigned long val = etb.trigger_cntr;
-	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETB_ATTR(trigger_cntr);
-
-static int __devinit etb_sysfs_init(void)
-{
-	int ret;
-
-	etb.kobj = kobject_create_and_add("etb", qdss_get_modulekobj());
-	if (!etb.kobj) {
-		dev_err(etb.dev, "failed to create ETB sysfs kobject\n");
-		ret = -ENOMEM;
-		goto err_create;
-	}
-
-	ret = sysfs_create_file(etb.kobj, &trigger_cntr_attr.attr);
-	if (ret) {
-		dev_err(etb.dev, "failed to create ETB sysfs trigger_cntr"
-		" attribute\n");
-		goto err_file;
-	}
-
-	return 0;
-err_file:
-	kobject_put(etb.kobj);
-err_create:
-	return ret;
-}
-
-static void __devexit etb_sysfs_exit(void)
-{
-	sysfs_remove_file(etb.kobj, &trigger_cntr_attr.attr);
-	kobject_put(etb.kobj);
-}
-
-static int __devinit etb_probe(struct platform_device *pdev)
-{
-	int ret;
-	struct resource *res;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		ret = -EINVAL;
-		goto err_res;
-	}
-
-	etb.base = ioremap_nocache(res->start, resource_size(res));
-	if (!etb.base) {
-		ret = -EINVAL;
-		goto err_ioremap;
-	}
-
-	etb.dev = &pdev->dev;
-
-	spin_lock_init(&etb.spinlock);
-
-	ret = misc_register(&etb_misc);
-	if (ret)
-		goto err_misc;
-
-	etb.buf = kzalloc(ETB_SIZE_WORDS * BYTES_PER_WORD, GFP_KERNEL);
-	if (!etb.buf) {
-		ret = -ENOMEM;
-		goto err_alloc;
-	}
-
-	etb_sysfs_init();
-
-	dev_info(etb.dev, "ETB initialized\n");
-	return 0;
-
-err_alloc:
-	misc_deregister(&etb_misc);
-err_misc:
-	iounmap(etb.base);
-err_ioremap:
-err_res:
-	dev_err(etb.dev, "ETB init failed\n");
-	return ret;
-}
-
-static int __devexit etb_remove(struct platform_device *pdev)
-{
-	if (etb.enabled)
-		etb_disable();
-	etb_sysfs_exit();
-	kfree(etb.buf);
-	misc_deregister(&etb_misc);
-	iounmap(etb.base);
-
-	return 0;
-}
-
-static struct platform_driver etb_driver = {
-	.probe          = etb_probe,
-	.remove         = __devexit_p(etb_remove),
-	.driver         = {
-		.name   = "msm_etb",
-	},
-};
-
-static int __init etb_init(void)
-{
-	return platform_driver_register(&etb_driver);
-}
-module_init(etb_init);
-
-static void __exit etb_exit(void)
-{
-	platform_driver_unregister(&etb_driver);
-}
-module_exit(etb_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Embedded Trace Buffer driver");
diff --git a/arch/arm/mach-msm/qdss-etm.c b/arch/arm/mach-msm/qdss-etm.c
deleted file mode 100644
index ca6e0c6..0000000
--- a/arch/arm/mach-msm/qdss-etm.c
+++ /dev/null
@@ -1,1329 +0,0 @@
-/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/smp.h>
-#include <linux/wakelock.h>
-#include <linux/pm_qos.h>
-#include <linux/sysfs.h>
-#include <linux/stat.h>
-#include <asm/sections.h>
-#include <mach/socinfo.h>
-
-#include "qdss-priv.h"
-
-#define etm_writel(etm, cpu, val, off)	\
-			__raw_writel((val), etm.base + (SZ_4K * cpu) + off)
-#define etm_readl(etm, cpu, off)	\
-			__raw_readl(etm.base + (SZ_4K * cpu) + off)
-
-/*
- * Device registers:
- * 0x000 - 0x2FC: Trace		registers
- * 0x300 - 0x314: Management	registers
- * 0x318 - 0xEFC: Trace		registers
- *
- * Coresight registers
- * 0xF00 - 0xF9C: Management	registers
- * 0xFA0 - 0xFA4: Management	registers in PFTv1.0
- *		  Trace		registers in PFTv1.1
- * 0xFA8 - 0xFFC: Management	registers
- */
-
-/* Trace registers (0x000-0x2FC) */
-#define ETMCR			(0x000)
-#define ETMCCR			(0x004)
-#define ETMTRIGGER		(0x008)
-#define ETMSR			(0x010)
-#define ETMSCR			(0x014)
-#define ETMTSSCR		(0x018)
-#define ETMTEEVR		(0x020)
-#define ETMTECR1		(0x024)
-#define ETMFFLR			(0x02C)
-#define ETMACVRn(n)		(0x040 + (n * 4))
-#define ETMACTRn(n)		(0x080 + (n * 4))
-#define ETMCNTRLDVRn(n)		(0x140 + (n * 4))
-#define ETMCNTENRn(n)		(0x150 + (n * 4))
-#define ETMCNTRLDEVRn(n)	(0x160 + (n * 4))
-#define ETMCNTVRn(n)		(0x170 + (n * 4))
-#define ETMSQ12EVR		(0x180)
-#define ETMSQ21EVR		(0x184)
-#define ETMSQ23EVR		(0x188)
-#define ETMSQ31EVR		(0x18C)
-#define ETMSQ32EVR		(0x190)
-#define ETMSQ13EVR		(0x194)
-#define ETMSQR			(0x19C)
-#define ETMEXTOUTEVRn(n)	(0x1A0 + (n * 4))
-#define ETMCIDCVRn(n)		(0x1B0 + (n * 4))
-#define ETMCIDCMR		(0x1BC)
-#define ETMIMPSPEC0		(0x1C0)
-#define ETMIMPSPEC1		(0x1C4)
-#define ETMIMPSPEC2		(0x1C8)
-#define ETMIMPSPEC3		(0x1CC)
-#define ETMIMPSPEC4		(0x1D0)
-#define ETMIMPSPEC5		(0x1D4)
-#define ETMIMPSPEC6		(0x1D8)
-#define ETMIMPSPEC7		(0x1DC)
-#define ETMSYNCFR		(0x1E0)
-#define ETMIDR			(0x1E4)
-#define ETMCCER			(0x1E8)
-#define ETMEXTINSELR		(0x1EC)
-#define ETMTESSEICR		(0x1F0)
-#define ETMEIBCR		(0x1F4)
-#define ETMTSEVR		(0x1F8)
-#define ETMAUXCR		(0x1FC)
-#define ETMTRACEIDR		(0x200)
-#define ETMVMIDCVR		(0x240)
-/* Management registers (0x300-0x314) */
-#define ETMOSLAR		(0x300)
-#define ETMOSLSR		(0x304)
-#define ETMOSSRR		(0x308)
-#define ETMPDCR			(0x310)
-#define ETMPDSR			(0x314)
-
-#define ETM_MAX_ADDR_CMP	(16)
-#define ETM_MAX_CNTR		(4)
-#define ETM_MAX_CTXID_CMP	(3)
-
-#define ETM_MODE_EXCLUDE	BIT(0)
-#define ETM_MODE_CYCACC		BIT(1)
-#define ETM_MODE_STALL		BIT(2)
-#define ETM_MODE_TIMESTAMP	BIT(3)
-#define ETM_MODE_CTXID		BIT(4)
-#define ETM_MODE_ALL		(0x1F)
-
-#define ETM_EVENT_MASK		(0x1FFFF)
-#define ETM_SYNC_MASK		(0xFFF)
-#define ETM_ALL_MASK		(0xFFFFFFFF)
-
-#define ETM_SEQ_STATE_MAX_VAL	(0x2)
-
-enum {
-	ETM_ADDR_TYPE_NONE,
-	ETM_ADDR_TYPE_SINGLE,
-	ETM_ADDR_TYPE_RANGE,
-	ETM_ADDR_TYPE_START,
-	ETM_ADDR_TYPE_STOP,
-};
-
-#define ETM_LOCK(cpu)							\
-do {									\
-	mb();								\
-	etm_writel(etm, cpu, 0x0, CS_LAR);				\
-} while (0)
-#define ETM_UNLOCK(cpu)							\
-do {									\
-	etm_writel(etm, cpu, CS_UNLOCK_MAGIC, CS_LAR);			\
-	mb();								\
-} while (0)
-
-
-#ifdef MODULE_PARAM_PREFIX
-#undef MODULE_PARAM_PREFIX
-#endif
-#define MODULE_PARAM_PREFIX "qdss."
-
-#ifdef CONFIG_MSM_QDSS_ETM_DEFAULT_ENABLE
-static int etm_boot_enable = 1;
-#else
-static int etm_boot_enable;
-#endif
-module_param_named(
-	etm_boot_enable, etm_boot_enable, int, S_IRUGO
-);
-
-struct etm_ctx {
-	void __iomem			*base;
-	bool				enabled;
-	struct wake_lock		wake_lock;
-	struct pm_qos_request		qos_req;
-	struct qdss_source		*src;
-	struct mutex			mutex;
-	struct device			*dev;
-	struct kobject			*kobj;
-	uint8_t				arch;
-	uint8_t				nr_addr_cmp;
-	uint8_t				nr_cntr;
-	uint8_t				nr_ext_inp;
-	uint8_t				nr_ext_out;
-	uint8_t				nr_ctxid_cmp;
-	uint8_t				reset;
-	uint32_t			mode;
-	uint32_t			ctrl;
-	uint32_t			trigger_event;
-	uint32_t			startstop_ctrl;
-	uint32_t			enable_event;
-	uint32_t			enable_ctrl1;
-	uint32_t			fifofull_level;
-	uint8_t				addr_idx;
-	uint32_t			addr_val[ETM_MAX_ADDR_CMP];
-	uint32_t			addr_acctype[ETM_MAX_ADDR_CMP];
-	uint32_t			addr_type[ETM_MAX_ADDR_CMP];
-	uint8_t				cntr_idx;
-	uint32_t			cntr_rld_val[ETM_MAX_CNTR];
-	uint32_t			cntr_event[ETM_MAX_CNTR];
-	uint32_t			cntr_rld_event[ETM_MAX_CNTR];
-	uint32_t			cntr_val[ETM_MAX_CNTR];
-	uint32_t			seq_12_event;
-	uint32_t			seq_21_event;
-	uint32_t			seq_23_event;
-	uint32_t			seq_31_event;
-	uint32_t			seq_32_event;
-	uint32_t			seq_13_event;
-	uint32_t			seq_curr_state;
-	uint8_t				ctxid_idx;
-	uint32_t			ctxid_val[ETM_MAX_CTXID_CMP];
-	uint32_t			ctxid_mask;
-	uint32_t			sync_freq;
-	uint32_t			timestamp_event;
-};
-
-static struct etm_ctx etm = {
-	.trigger_event		= 0x406F,
-	.enable_event		= 0x6F,
-	.enable_ctrl1		= 0x1,
-	.fifofull_level		= 0x28,
-	.addr_val		= {(uint32_t) _stext, (uint32_t) _etext},
-	.addr_type		= {ETM_ADDR_TYPE_RANGE, ETM_ADDR_TYPE_RANGE},
-	.cntr_event		= {[0 ... (ETM_MAX_CNTR - 1)] = 0x406F},
-	.cntr_rld_event		= {[0 ... (ETM_MAX_CNTR - 1)] = 0x406F},
-	.seq_12_event		= 0x406F,
-	.seq_21_event		= 0x406F,
-	.seq_23_event		= 0x406F,
-	.seq_31_event		= 0x406F,
-	.seq_32_event		= 0x406F,
-	.seq_13_event		= 0x406F,
-	.sync_freq		= 0x80,
-	.timestamp_event	= 0x406F,
-};
-
-
-/* ETM clock is derived from the processor clock and gets enabled on a
- * logical OR of below items on Krait (pass2 onwards):
- * 1.CPMR[ETMCLKEN] is 1
- * 2.ETMCR[PD] is 0
- * 3.ETMPDCR[PU] is 1
- * 4.Reset is asserted (core or debug)
- * 5.APB memory mapped requests (eg. EDAP access)
- *
- * 1., 2. and 3. above are permanent enables whereas 4. and 5. are temporary
- * enables
- *
- * We rely on 5. to be able to access ETMCR and then use 2. above for ETM
- * clock vote in the driver and the save-restore code uses 1. above
- * for its vote
- */
-static void etm_set_pwrdwn(int cpu)
-{
-	uint32_t etmcr;
-
-	etmcr = etm_readl(etm, cpu, ETMCR);
-	etmcr |= BIT(0);
-	etm_writel(etm, cpu, etmcr, ETMCR);
-}
-
-static void etm_clr_pwrdwn(int cpu)
-{
-	uint32_t etmcr;
-
-	etmcr = etm_readl(etm, cpu, ETMCR);
-	etmcr &= ~BIT(0);
-	etm_writel(etm, cpu, etmcr, ETMCR);
-}
-
-static void etm_set_prog(int cpu)
-{
-	uint32_t etmcr;
-	int count;
-
-	etmcr = etm_readl(etm, cpu, ETMCR);
-	etmcr |= BIT(10);
-	etm_writel(etm, cpu, etmcr, ETMCR);
-
-	for (count = TIMEOUT_US; BVAL(etm_readl(etm, cpu, ETMSR), 1) != 1
-				&& count > 0; count--)
-		udelay(1);
-	WARN(count == 0, "timeout while setting prog bit, ETMSR: %#x\n",
-	     etm_readl(etm, cpu, ETMSR));
-}
-
-static void etm_clr_prog(int cpu)
-{
-	uint32_t etmcr;
-	int count;
-
-	etmcr = etm_readl(etm, cpu, ETMCR);
-	etmcr &= ~BIT(10);
-	etm_writel(etm, cpu, etmcr, ETMCR);
-
-	for (count = TIMEOUT_US; BVAL(etm_readl(etm, cpu, ETMSR), 1) != 0
-				&& count > 0; count--)
-		udelay(1);
-	WARN(count == 0, "timeout while clearing prog bit, ETMSR: %#x\n",
-	     etm_readl(etm, cpu, ETMSR));
-}
-
-static void __etm_enable(int cpu)
-{
-	int i;
-
-	ETM_UNLOCK(cpu);
-	/* Vote for ETM power/clock enable */
-	etm_clr_pwrdwn(cpu);
-	etm_set_prog(cpu);
-
-	etm_writel(etm, cpu, etm.ctrl | BIT(10), ETMCR);
-	etm_writel(etm, cpu, etm.trigger_event, ETMTRIGGER);
-	etm_writel(etm, cpu, etm.startstop_ctrl, ETMTSSCR);
-	etm_writel(etm, cpu, etm.enable_event, ETMTEEVR);
-	etm_writel(etm, cpu, etm.enable_ctrl1, ETMTECR1);
-	etm_writel(etm, cpu, etm.fifofull_level, ETMFFLR);
-	for (i = 0; i < etm.nr_addr_cmp; i++) {
-		etm_writel(etm, cpu, etm.addr_val[i], ETMACVRn(i));
-		etm_writel(etm, cpu, etm.addr_acctype[i], ETMACTRn(i));
-	}
-	for (i = 0; i < etm.nr_cntr; i++) {
-		etm_writel(etm, cpu, etm.cntr_rld_val[i], ETMCNTRLDVRn(i));
-		etm_writel(etm, cpu, etm.cntr_event[i], ETMCNTENRn(i));
-		etm_writel(etm, cpu, etm.cntr_rld_event[i], ETMCNTRLDEVRn(i));
-		etm_writel(etm, cpu, etm.cntr_val[i], ETMCNTVRn(i));
-	}
-	etm_writel(etm, cpu, etm.seq_12_event, ETMSQ12EVR);
-	etm_writel(etm, cpu, etm.seq_21_event, ETMSQ21EVR);
-	etm_writel(etm, cpu, etm.seq_23_event, ETMSQ23EVR);
-	etm_writel(etm, cpu, etm.seq_31_event, ETMSQ31EVR);
-	etm_writel(etm, cpu, etm.seq_32_event, ETMSQ32EVR);
-	etm_writel(etm, cpu, etm.seq_13_event, ETMSQ13EVR);
-	etm_writel(etm, cpu, etm.seq_curr_state, ETMSQR);
-	for (i = 0; i < etm.nr_ext_out; i++)
-		etm_writel(etm, cpu, 0x0000406F, ETMEXTOUTEVRn(i));
-	for (i = 0; i < etm.nr_ctxid_cmp; i++)
-		etm_writel(etm, cpu, etm.ctxid_val[i], ETMCIDCVRn(i));
-	etm_writel(etm, cpu, etm.ctxid_mask, ETMCIDCMR);
-	etm_writel(etm, cpu, etm.sync_freq, ETMSYNCFR);
-	etm_writel(etm, cpu, 0x00000000, ETMEXTINSELR);
-	etm_writel(etm, cpu, etm.timestamp_event, ETMTSEVR);
-	etm_writel(etm, cpu, 0x00000000, ETMAUXCR);
-	etm_writel(etm, cpu, cpu+1, ETMTRACEIDR);
-	etm_writel(etm, cpu, 0x00000000, ETMVMIDCVR);
-
-	etm_clr_prog(cpu);
-	ETM_LOCK(cpu);
-}
-
-static int etm_enable(void)
-{
-	int ret, cpu;
-
-	if (etm.enabled) {
-		dev_err(etm.dev, "ETM tracing already enabled\n");
-		ret = -EPERM;
-		goto err;
-	}
-
-	wake_lock(&etm.wake_lock);
-	/* 1. causes all online cpus to come out of idle PC
-	 * 2. prevents idle PC until save restore flag is enabled atomically
-	 *
-	 * we rely on the user to prevent hotplug on/off racing with this
-	 * operation and to ensure cores where trace is expected to be turned
-	 * on are already hotplugged on
-	 */
-	pm_qos_update_request(&etm.qos_req, 0);
-
-	ret = qdss_enable(etm.src);
-	if (ret)
-		goto err_qdss;
-
-	for_each_online_cpu(cpu)
-		__etm_enable(cpu);
-
-	etm.enabled = true;
-
-	pm_qos_update_request(&etm.qos_req, PM_QOS_DEFAULT_VALUE);
-	wake_unlock(&etm.wake_lock);
-
-	dev_info(etm.dev, "ETM tracing enabled\n");
-	return 0;
-
-err_qdss:
-	pm_qos_update_request(&etm.qos_req, PM_QOS_DEFAULT_VALUE);
-	wake_unlock(&etm.wake_lock);
-err:
-	return ret;
-}
-
-static void __etm_disable(int cpu)
-{
-	ETM_UNLOCK(cpu);
-	etm_set_prog(cpu);
-
-	/* program trace enable to low by using always false event */
-	etm_writel(etm, cpu, 0x6F | BIT(14), ETMTEEVR);
-
-	/* Vote for ETM power/clock disable */
-	etm_set_pwrdwn(cpu);
-	ETM_LOCK(cpu);
-}
-
-static int etm_disable(void)
-{
-	int ret, cpu;
-
-	if (!etm.enabled) {
-		dev_err(etm.dev, "ETM tracing already disabled\n");
-		ret = -EPERM;
-		goto err;
-	}
-
-	wake_lock(&etm.wake_lock);
-	/* 1. causes all online cpus to come out of idle PC
-	 * 2. prevents idle PC until save restore flag is disabled atomically
-	 *
-	 * we rely on the user to prevent hotplug on/off racing with this
-	 * operation and to ensure cores where trace is expected to be turned
-	 * off are already hotplugged on
-	 */
-	pm_qos_update_request(&etm.qos_req, 0);
-
-	for_each_online_cpu(cpu)
-		__etm_disable(cpu);
-
-	qdss_disable(etm.src);
-
-	etm.enabled = false;
-
-	pm_qos_update_request(&etm.qos_req, PM_QOS_DEFAULT_VALUE);
-	wake_unlock(&etm.wake_lock);
-
-	dev_info(etm.dev, "ETM tracing disabled\n");
-	return 0;
-err:
-	return ret;
-}
-
-/* Memory mapped writes to clear os lock not supported */
-static void etm_os_unlock(void *unused)
-{
-	unsigned long value = 0x0;
-
-	asm("mcr p14, 1, %0, c1, c0, 4\n\t" : : "r" (value));
-	asm("isb\n\t");
-}
-
-#define ETM_STORE(__name, mask)						\
-static ssize_t __name##_store(struct kobject *kobj,			\
-			struct kobj_attribute *attr,			\
-			const char *buf, size_t n)			\
-{									\
-	unsigned long val;						\
-									\
-	if (sscanf(buf, "%lx", &val) != 1)				\
-		return -EINVAL;						\
-									\
-	etm.__name = val & mask;					\
-	return n;							\
-}
-
-#define ETM_SHOW(__name)						\
-static ssize_t __name##_show(struct kobject *kobj,			\
-			struct kobj_attribute *attr,			\
-			char *buf)					\
-{									\
-	unsigned long val = etm.__name;					\
-	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);		\
-}
-
-#define ETM_ATTR(__name)						\
-static struct kobj_attribute __name##_attr =				\
-	__ATTR(__name, S_IRUGO | S_IWUSR, __name##_show, __name##_store)
-#define ETM_ATTR_RO(__name)						\
-static struct kobj_attribute __name##_attr =				\
-	__ATTR(__name, S_IRUGO, __name##_show, NULL)
-
-static ssize_t enabled_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	int ret = 0;
-	unsigned long val;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-
-	mutex_lock(&etm.mutex);
-	if (val)
-		ret = etm_enable();
-	else
-		ret = etm_disable();
-	mutex_unlock(&etm.mutex);
-
-	if (ret)
-		return ret;
-	return n;
-}
-ETM_SHOW(enabled);
-ETM_ATTR(enabled);
-
-ETM_SHOW(nr_addr_cmp);
-ETM_ATTR_RO(nr_addr_cmp);
-ETM_SHOW(nr_cntr);
-ETM_ATTR_RO(nr_cntr);
-ETM_SHOW(nr_ctxid_cmp);
-ETM_ATTR_RO(nr_ctxid_cmp);
-
-/* Reset to trace everything i.e. exclude nothing. */
-static ssize_t reset_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	int i;
-	unsigned long val;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-
-	mutex_lock(&etm.mutex);
-	if (val) {
-		etm.mode = ETM_MODE_EXCLUDE;
-		etm.ctrl = 0x0;
-		if (cpu_is_krait_v1()) {
-			etm.mode |= ETM_MODE_CYCACC;
-			etm.ctrl |= BIT(12);
-		}
-		etm.trigger_event = 0x406F;
-		etm.startstop_ctrl = 0x0;
-		etm.enable_event = 0x6F;
-		etm.enable_ctrl1 = 0x1000000;
-		etm.fifofull_level = 0x28;
-		etm.addr_idx = 0x0;
-		for (i = 0; i < etm.nr_addr_cmp; i++) {
-			etm.addr_val[i] = 0x0;
-			etm.addr_acctype[i] = 0x0;
-			etm.addr_type[i] = ETM_ADDR_TYPE_NONE;
-		}
-		etm.cntr_idx = 0x0;
-		for (i = 0; i < etm.nr_cntr; i++) {
-			etm.cntr_rld_val[i] = 0x0;
-			etm.cntr_event[i] = 0x406F;
-			etm.cntr_rld_event[i] = 0x406F;
-			etm.cntr_val[i] = 0x0;
-		}
-		etm.seq_12_event = 0x406F;
-		etm.seq_21_event = 0x406F;
-		etm.seq_23_event = 0x406F;
-		etm.seq_31_event = 0x406F;
-		etm.seq_32_event = 0x406F;
-		etm.seq_13_event = 0x406F;
-		etm.seq_curr_state = 0x0;
-		etm.ctxid_idx = 0x0;
-		for (i = 0; i < etm.nr_ctxid_cmp; i++)
-			etm.ctxid_val[i] = 0x0;
-		etm.ctxid_mask = 0x0;
-		etm.sync_freq = 0x80;
-		etm.timestamp_event = 0x406F;
-	}
-	mutex_unlock(&etm.mutex);
-	return n;
-}
-ETM_SHOW(reset);
-ETM_ATTR(reset);
-
-static ssize_t mode_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-
-	mutex_lock(&etm.mutex);
-	etm.mode = val & ETM_MODE_ALL;
-
-	if (etm.mode & ETM_MODE_EXCLUDE)
-		etm.enable_ctrl1 |= BIT(24);
-	else
-		etm.enable_ctrl1 &= ~BIT(24);
-
-	if (etm.mode & ETM_MODE_CYCACC)
-		etm.ctrl |= BIT(12);
-	else
-		etm.ctrl &= ~BIT(12);
-
-	if (etm.mode & ETM_MODE_STALL)
-		etm.ctrl |= BIT(7);
-	else
-		etm.ctrl &= ~BIT(7);
-
-	if (etm.mode & ETM_MODE_TIMESTAMP)
-		etm.ctrl |= BIT(28);
-	else
-		etm.ctrl &= ~BIT(28);
-	if (etm.mode & ETM_MODE_CTXID)
-		etm.ctrl |= (BIT(14) | BIT(15));
-	else
-		etm.ctrl &= ~(BIT(14) | BIT(15));
-	mutex_unlock(&etm.mutex);
-
-	return n;
-}
-ETM_SHOW(mode);
-ETM_ATTR(mode);
-
-ETM_STORE(trigger_event, ETM_EVENT_MASK);
-ETM_SHOW(trigger_event);
-ETM_ATTR(trigger_event);
-
-ETM_STORE(enable_event, ETM_EVENT_MASK);
-ETM_SHOW(enable_event);
-ETM_ATTR(enable_event);
-
-ETM_STORE(fifofull_level, ETM_ALL_MASK);
-ETM_SHOW(fifofull_level);
-ETM_ATTR(fifofull_level);
-
-static ssize_t addr_idx_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-	if (val >= etm.nr_addr_cmp)
-		return -EINVAL;
-
-	/* Use mutex to ensure index doesn't change while it gets dereferenced
-	 * multiple times within a mutex block elsewhere.
-	 */
-	mutex_lock(&etm.mutex);
-	etm.addr_idx = val;
-	mutex_unlock(&etm.mutex);
-	return n;
-}
-ETM_SHOW(addr_idx);
-ETM_ATTR(addr_idx);
-
-static ssize_t addr_single_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val;
-	uint8_t idx;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-
-	mutex_lock(&etm.mutex);
-	idx = etm.addr_idx;
-	if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-	      etm.addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
-		mutex_unlock(&etm.mutex);
-		return -EPERM;
-	}
-
-	etm.addr_val[idx] = val;
-	etm.addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
-	mutex_unlock(&etm.mutex);
-	return n;
-}
-static ssize_t addr_single_show(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			char *buf)
-{
-	unsigned long val;
-	uint8_t idx;
-
-	mutex_lock(&etm.mutex);
-	idx = etm.addr_idx;
-	if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-	      etm.addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
-		mutex_unlock(&etm.mutex);
-		return -EPERM;
-	}
-
-	val = etm.addr_val[idx];
-	mutex_unlock(&etm.mutex);
-	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETM_ATTR(addr_single);
-
-static ssize_t addr_range_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val1, val2;
-	uint8_t idx;
-
-	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
-		return -EINVAL;
-	/* lower address comparator cannot have a higher address value */
-	if (val1 > val2)
-		return -EINVAL;
-
-	mutex_lock(&etm.mutex);
-	idx = etm.addr_idx;
-	if (idx % 2 != 0) {
-		mutex_unlock(&etm.mutex);
-		return -EPERM;
-	}
-	if (!((etm.addr_type[idx] == ETM_ADDR_TYPE_NONE &&
-	       etm.addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
-	      (etm.addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
-	       etm.addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
-		mutex_unlock(&etm.mutex);
-		return -EPERM;
-	}
-
-	etm.addr_val[idx] = val1;
-	etm.addr_type[idx] = ETM_ADDR_TYPE_RANGE;
-	etm.addr_val[idx + 1] = val2;
-	etm.addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
-	etm.enable_ctrl1 |= (1 << (idx/2));
-	mutex_unlock(&etm.mutex);
-	return n;
-}
-static ssize_t addr_range_show(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			char *buf)
-{
-	unsigned long val1, val2;
-	uint8_t idx;
-
-	mutex_lock(&etm.mutex);
-	idx = etm.addr_idx;
-	if (idx % 2 != 0) {
-		mutex_unlock(&etm.mutex);
-		return -EPERM;
-	}
-	if (!((etm.addr_type[idx] == ETM_ADDR_TYPE_NONE &&
-	       etm.addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
-	      (etm.addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
-	       etm.addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
-		mutex_unlock(&etm.mutex);
-		return -EPERM;
-	}
-
-	val1 = etm.addr_val[idx];
-	val2 = etm.addr_val[idx + 1];
-	mutex_unlock(&etm.mutex);
-	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
-}
-ETM_ATTR(addr_range);
-
-static ssize_t addr_start_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val;
-	uint8_t idx;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-
-	mutex_lock(&etm.mutex);
-	idx = etm.addr_idx;
-	if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-	      etm.addr_type[idx] == ETM_ADDR_TYPE_START)) {
-		mutex_unlock(&etm.mutex);
-		return -EPERM;
-	}
-
-	etm.addr_val[idx] = val;
-	etm.addr_type[idx] = ETM_ADDR_TYPE_START;
-	etm.startstop_ctrl |= (1 << idx);
-	etm.enable_ctrl1 |= BIT(25);
-	mutex_unlock(&etm.mutex);
-	return n;
-}
-static ssize_t addr_start_show(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			char *buf)
-{
-	unsigned long val;
-	uint8_t idx;
-
-	mutex_lock(&etm.mutex);
-	idx = etm.addr_idx;
-	if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-	      etm.addr_type[idx] == ETM_ADDR_TYPE_START)) {
-		mutex_unlock(&etm.mutex);
-		return -EPERM;
-	}
-
-	val = etm.addr_val[idx];
-	mutex_unlock(&etm.mutex);
-	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETM_ATTR(addr_start);
-
-static ssize_t addr_stop_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val;
-	uint8_t idx;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-
-	mutex_lock(&etm.mutex);
-	idx = etm.addr_idx;
-	if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-	      etm.addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
-		mutex_unlock(&etm.mutex);
-		return -EPERM;
-	}
-
-	etm.addr_val[idx] = val;
-	etm.addr_type[idx] = ETM_ADDR_TYPE_STOP;
-	etm.startstop_ctrl |= (1 << (idx + 16));
-	etm.enable_ctrl1 |= BIT(25);
-	mutex_unlock(&etm.mutex);
-	return n;
-}
-static ssize_t addr_stop_show(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			char *buf)
-{
-	unsigned long val;
-	uint8_t idx;
-
-	mutex_lock(&etm.mutex);
-	idx = etm.addr_idx;
-	if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-	      etm.addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
-		mutex_unlock(&etm.mutex);
-		return -EPERM;
-	}
-
-	val = etm.addr_val[idx];
-	mutex_unlock(&etm.mutex);
-	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETM_ATTR(addr_stop);
-
-static ssize_t addr_acctype_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-
-	mutex_lock(&etm.mutex);
-	etm.addr_acctype[etm.addr_idx] = val;
-	mutex_unlock(&etm.mutex);
-	return n;
-}
-static ssize_t addr_acctype_show(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			char *buf)
-{
-	unsigned long val;
-
-	mutex_lock(&etm.mutex);
-	val = etm.addr_acctype[etm.addr_idx];
-	mutex_unlock(&etm.mutex);
-	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETM_ATTR(addr_acctype);
-
-static ssize_t cntr_idx_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-	if (val >= etm.nr_cntr)
-		return -EINVAL;
-
-	/* Use mutex to ensure index doesn't change while it gets dereferenced
-	 * multiple times within a mutex block elsewhere.
-	 */
-	mutex_lock(&etm.mutex);
-	etm.cntr_idx = val;
-	mutex_unlock(&etm.mutex);
-	return n;
-}
-ETM_SHOW(cntr_idx);
-ETM_ATTR(cntr_idx);
-
-static ssize_t cntr_rld_val_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-
-	mutex_lock(&etm.mutex);
-	etm.cntr_rld_val[etm.cntr_idx] = val;
-	mutex_unlock(&etm.mutex);
-	return n;
-}
-static ssize_t cntr_rld_val_show(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			char *buf)
-{
-	unsigned long val;
-	mutex_lock(&etm.mutex);
-	val = etm.cntr_rld_val[etm.cntr_idx];
-	mutex_unlock(&etm.mutex);
-	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETM_ATTR(cntr_rld_val);
-
-static ssize_t cntr_event_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-
-	mutex_lock(&etm.mutex);
-	etm.cntr_event[etm.cntr_idx] = val & ETM_EVENT_MASK;
-	mutex_unlock(&etm.mutex);
-	return n;
-}
-static ssize_t cntr_event_show(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			char *buf)
-{
-	unsigned long val;
-
-	mutex_lock(&etm.mutex);
-	val = etm.cntr_event[etm.cntr_idx];
-	mutex_unlock(&etm.mutex);
-	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETM_ATTR(cntr_event);
-
-static ssize_t cntr_rld_event_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-
-	mutex_lock(&etm.mutex);
-	etm.cntr_rld_event[etm.cntr_idx] = val & ETM_EVENT_MASK;
-	mutex_unlock(&etm.mutex);
-	return n;
-}
-static ssize_t cntr_rld_event_show(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			char *buf)
-{
-	unsigned long val;
-
-	mutex_lock(&etm.mutex);
-	val = etm.cntr_rld_event[etm.cntr_idx];
-	mutex_unlock(&etm.mutex);
-	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETM_ATTR(cntr_rld_event);
-
-static ssize_t cntr_val_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-
-	mutex_lock(&etm.mutex);
-	etm.cntr_val[etm.cntr_idx] = val;
-	mutex_unlock(&etm.mutex);
-	return n;
-}
-static ssize_t cntr_val_show(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			char *buf)
-{
-	unsigned long val;
-
-	mutex_lock(&etm.mutex);
-	val = etm.cntr_val[etm.cntr_idx];
-	mutex_unlock(&etm.mutex);
-	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETM_ATTR(cntr_val);
-
-ETM_STORE(seq_12_event, ETM_EVENT_MASK);
-ETM_SHOW(seq_12_event);
-ETM_ATTR(seq_12_event);
-
-ETM_STORE(seq_21_event, ETM_EVENT_MASK);
-ETM_SHOW(seq_21_event);
-ETM_ATTR(seq_21_event);
-
-ETM_STORE(seq_23_event, ETM_EVENT_MASK);
-ETM_SHOW(seq_23_event);
-ETM_ATTR(seq_23_event);
-
-ETM_STORE(seq_31_event, ETM_EVENT_MASK);
-ETM_SHOW(seq_31_event);
-ETM_ATTR(seq_31_event);
-
-ETM_STORE(seq_32_event, ETM_EVENT_MASK);
-ETM_SHOW(seq_32_event);
-ETM_ATTR(seq_32_event);
-
-ETM_STORE(seq_13_event, ETM_EVENT_MASK);
-ETM_SHOW(seq_13_event);
-ETM_ATTR(seq_13_event);
-
-static ssize_t seq_curr_state_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-	if (val > ETM_SEQ_STATE_MAX_VAL)
-		return -EINVAL;
-
-	etm.seq_curr_state = val;
-	return n;
-}
-ETM_SHOW(seq_curr_state);
-ETM_ATTR(seq_curr_state);
-
-static ssize_t ctxid_idx_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-	if (val >= etm.nr_ctxid_cmp)
-		return -EINVAL;
-
-	/* Use mutex to ensure index doesn't change while it gets dereferenced
-	 * multiple times within a mutex block elsewhere.
-	 */
-	mutex_lock(&etm.mutex);
-	etm.ctxid_idx = val;
-	mutex_unlock(&etm.mutex);
-	return n;
-}
-ETM_SHOW(ctxid_idx);
-ETM_ATTR(ctxid_idx);
-
-static ssize_t ctxid_val_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-
-	mutex_lock(&etm.mutex);
-	etm.ctxid_val[etm.ctxid_idx] = val;
-	mutex_unlock(&etm.mutex);
-	return n;
-}
-static ssize_t ctxid_val_show(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			char *buf)
-{
-	unsigned long val;
-
-	mutex_lock(&etm.mutex);
-	val = etm.ctxid_val[etm.ctxid_idx];
-	mutex_unlock(&etm.mutex);
-	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETM_ATTR(ctxid_val);
-
-ETM_STORE(ctxid_mask, ETM_ALL_MASK);
-ETM_SHOW(ctxid_mask);
-ETM_ATTR(ctxid_mask);
-
-ETM_STORE(sync_freq, ETM_SYNC_MASK);
-ETM_SHOW(sync_freq);
-ETM_ATTR(sync_freq);
-
-ETM_STORE(timestamp_event, ETM_EVENT_MASK);
-ETM_SHOW(timestamp_event);
-ETM_ATTR(timestamp_event);
-
-static struct attribute *etm_attrs[] = {
-	&nr_addr_cmp_attr.attr,
-	&nr_cntr_attr.attr,
-	&nr_ctxid_cmp_attr.attr,
-	&reset_attr.attr,
-	&mode_attr.attr,
-	&trigger_event_attr.attr,
-	&enable_event_attr.attr,
-	&fifofull_level_attr.attr,
-	&addr_idx_attr.attr,
-	&addr_single_attr.attr,
-	&addr_range_attr.attr,
-	&addr_start_attr.attr,
-	&addr_stop_attr.attr,
-	&addr_acctype_attr.attr,
-	&cntr_idx_attr.attr,
-	&cntr_rld_val_attr.attr,
-	&cntr_event_attr.attr,
-	&cntr_rld_event_attr.attr,
-	&cntr_val_attr.attr,
-	&seq_12_event_attr.attr,
-	&seq_21_event_attr.attr,
-	&seq_23_event_attr.attr,
-	&seq_31_event_attr.attr,
-	&seq_32_event_attr.attr,
-	&seq_13_event_attr.attr,
-	&seq_curr_state_attr.attr,
-	&ctxid_idx_attr.attr,
-	&ctxid_val_attr.attr,
-	&ctxid_mask_attr.attr,
-	&sync_freq_attr.attr,
-	&timestamp_event_attr.attr,
-	NULL,
-};
-
-static struct attribute_group etm_attr_grp = {
-	.attrs = etm_attrs,
-};
-
-static int __devinit etm_sysfs_init(void)
-{
-	int ret;
-
-	etm.kobj = kobject_create_and_add("etm", qdss_get_modulekobj());
-	if (!etm.kobj) {
-		dev_err(etm.dev, "failed to create ETM sysfs kobject\n");
-		ret = -ENOMEM;
-		goto err_create;
-	}
-
-	ret = sysfs_create_file(etm.kobj, &enabled_attr.attr);
-	if (ret) {
-		dev_err(etm.dev, "failed to create ETM sysfs enabled"
-		" attribute\n");
-		goto err_file;
-	}
-
-	if (sysfs_create_group(etm.kobj, &etm_attr_grp))
-		dev_err(etm.dev, "failed to create ETM sysfs group\n");
-
-	return 0;
-err_file:
-	kobject_put(etm.kobj);
-err_create:
-	return ret;
-}
-
-static void __devexit etm_sysfs_exit(void)
-{
-	sysfs_remove_group(etm.kobj, &etm_attr_grp);
-	sysfs_remove_file(etm.kobj, &enabled_attr.attr);
-	kobject_put(etm.kobj);
-}
-
-static bool __devinit etm_arch_supported(uint8_t arch)
-{
-	switch (arch) {
-	case PFT_ARCH_V1_1:
-		break;
-	default:
-		return false;
-	}
-	return true;
-}
-
-static int __devinit etm_arch_init(void)
-{
-	int ret, i;
-	/* use cpu 0 for setup */
-	int cpu = 0;
-	uint32_t etmidr;
-	uint32_t etmccr;
-
-	/* Unlock OS lock first to allow memory mapped reads and writes */
-	etm_os_unlock(NULL);
-	smp_call_function(etm_os_unlock, NULL, 1);
-	ETM_UNLOCK(cpu);
-	/* Vote for ETM power/clock enable */
-	etm_clr_pwrdwn(cpu);
-	/* Set prog bit. It will be set from reset but this is included to
-	 * ensure it is set
-	 */
-	etm_set_prog(cpu);
-
-	/* find all capabilities */
-	etmidr = etm_readl(etm, cpu, ETMIDR);
-	etm.arch = BMVAL(etmidr, 4, 11);
-	if (etm_arch_supported(etm.arch) == false) {
-		ret = -EINVAL;
-		goto err;
-	}
-
-	etmccr = etm_readl(etm, cpu, ETMCCR);
-	etm.nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
-	etm.nr_cntr = BMVAL(etmccr, 13, 15);
-	etm.nr_ext_inp = BMVAL(etmccr, 17, 19);
-	etm.nr_ext_out = BMVAL(etmccr, 20, 22);
-	etm.nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
-
-	if (cpu_is_krait_v1()) {
-		/* Krait pass1 doesn't support include filtering and non-cycle
-		 * accurate tracing
-		 */
-		etm.mode = (ETM_MODE_EXCLUDE | ETM_MODE_CYCACC);
-		etm.ctrl = 0x1000;
-		etm.enable_ctrl1 = 0x1000000;
-		for (i = 0; i < etm.nr_addr_cmp; i++) {
-			etm.addr_val[i] = 0x0;
-			etm.addr_acctype[i] = 0x0;
-			etm.addr_type[i] = ETM_ADDR_TYPE_NONE;
-		}
-	}
-
-	/* Vote for ETM power/clock disable */
-	etm_set_pwrdwn(cpu);
-	ETM_LOCK(cpu);
-
-	return 0;
-err:
-	return ret;
-}
-
-static int __devinit etm_probe(struct platform_device *pdev)
-{
-	int ret;
-	struct resource *res;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		ret = -EINVAL;
-		goto err_res;
-	}
-
-	etm.base = ioremap_nocache(res->start, resource_size(res));
-	if (!etm.base) {
-		ret = -EINVAL;
-		goto err_ioremap;
-	}
-
-	etm.dev = &pdev->dev;
-
-	mutex_init(&etm.mutex);
-	wake_lock_init(&etm.wake_lock, WAKE_LOCK_SUSPEND, "msm_etm");
-	pm_qos_add_request(&etm.qos_req, PM_QOS_CPU_DMA_LATENCY,
-						PM_QOS_DEFAULT_VALUE);
-	etm.src = qdss_get("msm_etm");
-	if (IS_ERR(etm.src)) {
-		ret = PTR_ERR(etm.src);
-		goto err_qdssget;
-	}
-
-	ret = qdss_clk_enable();
-	if (ret)
-		goto err_clk;
-
-	ret = etm_arch_init();
-	if (ret)
-		goto err_arch;
-
-	ret = etm_sysfs_init();
-	if (ret)
-		goto err_sysfs;
-
-	etm.enabled = false;
-
-	qdss_clk_disable();
-
-	dev_info(etm.dev, "ETM initialized\n");
-
-	if (etm_boot_enable)
-		etm_enable();
-
-	return 0;
-
-err_sysfs:
-err_arch:
-	qdss_clk_disable();
-err_clk:
-	qdss_put(etm.src);
-err_qdssget:
-	pm_qos_remove_request(&etm.qos_req);
-	wake_lock_destroy(&etm.wake_lock);
-	mutex_destroy(&etm.mutex);
-	iounmap(etm.base);
-err_ioremap:
-err_res:
-	dev_err(etm.dev, "ETM init failed\n");
-	return ret;
-}
-
-static int __devexit etm_remove(struct platform_device *pdev)
-{
-	if (etm.enabled)
-		etm_disable();
-	etm_sysfs_exit();
-	qdss_put(etm.src);
-	pm_qos_remove_request(&etm.qos_req);
-	wake_lock_destroy(&etm.wake_lock);
-	mutex_destroy(&etm.mutex);
-	iounmap(etm.base);
-
-	return 0;
-}
-
-static struct platform_driver etm_driver = {
-	.probe          = etm_probe,
-	.remove         = __devexit_p(etm_remove),
-	.driver         = {
-		.name   = "msm_etm",
-	},
-};
-
-int __init etm_init(void)
-{
-	return platform_driver_register(&etm_driver);
-}
-module_init(etm_init);
-
-void __exit etm_exit(void)
-{
-	platform_driver_unregister(&etm_driver);
-}
-module_exit(etm_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");
diff --git a/arch/arm/mach-msm/qdss-funnel.c b/arch/arm/mach-msm/qdss-funnel.c
deleted file mode 100644
index 52eb2b6..0000000
--- a/arch/arm/mach-msm/qdss-funnel.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/err.h>
-
-#include "qdss-priv.h"
-
-#define funnel_writel(funnel, id, val, off)	\
-			__raw_writel((val), funnel.base + (SZ_4K * id) + off)
-#define funnel_readl(funnel, id, off)		\
-			__raw_readl(funnel.base + (SZ_4K * id) + off)
-
-#define FUNNEL_FUNCTL			(0x000)
-#define FUNNEL_PRICTL			(0x004)
-#define FUNNEL_ITATBDATA0		(0xEEC)
-#define FUNNEL_ITATBCTR2		(0xEF0)
-#define FUNNEL_ITATBCTR1		(0xEF4)
-#define FUNNEL_ITATBCTR0		(0xEF8)
-
-
-#define FUNNEL_LOCK(id)							\
-do {									\
-	mb();								\
-	funnel_writel(funnel, id, 0x0, CS_LAR);				\
-} while (0)
-#define FUNNEL_UNLOCK(id)						\
-do {									\
-	funnel_writel(funnel, id, CS_UNLOCK_MAGIC, CS_LAR);		\
-	mb();								\
-} while (0)
-
-#define FUNNEL_HOLDTIME_MASK		(0xF00)
-#define FUNNEL_HOLDTIME_SHFT		(0x8)
-#define FUNNEL_HOLDTIME			(0x7 << FUNNEL_HOLDTIME_SHFT)
-
-struct funnel_ctx {
-	void __iomem	*base;
-	bool		enabled;
-	struct mutex	mutex;
-	struct device	*dev;
-	struct kobject	*kobj;
-	uint32_t	priority;
-};
-
-static struct funnel_ctx funnel;
-
-static void __funnel_enable(uint8_t id, uint32_t port_mask)
-{
-	uint32_t functl;
-
-	FUNNEL_UNLOCK(id);
-
-	functl = funnel_readl(funnel, id, FUNNEL_FUNCTL);
-	functl &= ~FUNNEL_HOLDTIME_MASK;
-	functl |= FUNNEL_HOLDTIME;
-	functl |= port_mask;
-	funnel_writel(funnel, id, functl, FUNNEL_FUNCTL);
-	funnel_writel(funnel, id, funnel.priority, FUNNEL_PRICTL);
-
-	FUNNEL_LOCK(id);
-}
-
-void funnel_enable(uint8_t id, uint32_t port_mask)
-{
-	mutex_lock(&funnel.mutex);
-	__funnel_enable(id, port_mask);
-	funnel.enabled = true;
-	dev_info(funnel.dev, "FUNNEL port mask 0x%lx enabled\n",
-					(unsigned long) port_mask);
-	mutex_unlock(&funnel.mutex);
-}
-
-static void __funnel_disable(uint8_t id, uint32_t port_mask)
-{
-	uint32_t functl;
-
-	FUNNEL_UNLOCK(id);
-
-	functl = funnel_readl(funnel, id, FUNNEL_FUNCTL);
-	functl &= ~port_mask;
-	funnel_writel(funnel, id, functl, FUNNEL_FUNCTL);
-
-	FUNNEL_LOCK(id);
-}
-
-void funnel_disable(uint8_t id, uint32_t port_mask)
-{
-	mutex_lock(&funnel.mutex);
-	__funnel_disable(id, port_mask);
-	funnel.enabled = false;
-	dev_info(funnel.dev, "FUNNEL port mask 0x%lx disabled\n",
-					(unsigned long) port_mask);
-	mutex_unlock(&funnel.mutex);
-}
-
-#define FUNNEL_ATTR(__name)						\
-static struct kobj_attribute __name##_attr =				\
-	__ATTR(__name, S_IRUGO | S_IWUSR, __name##_show, __name##_store)
-
-static ssize_t priority_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-
-	funnel.priority = val;
-	return n;
-}
-static ssize_t priority_show(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			char *buf)
-{
-	unsigned long val = funnel.priority;
-	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-FUNNEL_ATTR(priority);
-
-static int __devinit funnel_sysfs_init(void)
-{
-	int ret;
-
-	funnel.kobj = kobject_create_and_add("funnel", qdss_get_modulekobj());
-	if (!funnel.kobj) {
-		dev_err(funnel.dev, "failed to create FUNNEL sysfs kobject\n");
-		ret = -ENOMEM;
-		goto err_create;
-	}
-
-	ret = sysfs_create_file(funnel.kobj, &priority_attr.attr);
-	if (ret) {
-		dev_err(funnel.dev, "failed to create FUNNEL sysfs priority"
-		" attribute\n");
-		goto err_file;
-	}
-
-	return 0;
-err_file:
-	kobject_put(funnel.kobj);
-err_create:
-	return ret;
-}
-
-static void __devexit funnel_sysfs_exit(void)
-{
-	sysfs_remove_file(funnel.kobj, &priority_attr.attr);
-	kobject_put(funnel.kobj);
-}
-
-static int __devinit funnel_probe(struct platform_device *pdev)
-{
-	int ret;
-	struct resource *res;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		ret = -EINVAL;
-		goto err_res;
-	}
-
-	funnel.base = ioremap_nocache(res->start, resource_size(res));
-	if (!funnel.base) {
-		ret = -EINVAL;
-		goto err_ioremap;
-	}
-
-	funnel.dev = &pdev->dev;
-
-	mutex_init(&funnel.mutex);
-
-	funnel_sysfs_init();
-
-	dev_info(funnel.dev, "FUNNEL initialized\n");
-	return 0;
-
-err_ioremap:
-err_res:
-	dev_err(funnel.dev, "FUNNEL init failed\n");
-	return ret;
-}
-
-static int __devexit funnel_remove(struct platform_device *pdev)
-{
-	if (funnel.enabled)
-		funnel_disable(0x0, 0xFF);
-	funnel_sysfs_exit();
-	mutex_destroy(&funnel.mutex);
-	iounmap(funnel.base);
-
-	return 0;
-}
-
-static struct platform_driver funnel_driver = {
-	.probe          = funnel_probe,
-	.remove         = __devexit_p(funnel_remove),
-	.driver         = {
-		.name   = "msm_funnel",
-	},
-};
-
-static int __init funnel_init(void)
-{
-	return platform_driver_register(&funnel_driver);
-}
-module_init(funnel_init);
-
-static void __exit funnel_exit(void)
-{
-	platform_driver_unregister(&funnel_driver);
-}
-module_exit(funnel_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Funnel driver");
diff --git a/arch/arm/mach-msm/qdss-priv.h b/arch/arm/mach-msm/qdss-priv.h
deleted file mode 100644
index f39bc52..0000000
--- a/arch/arm/mach-msm/qdss-priv.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef _ARCH_ARM_MACH_MSM_QDSS_H_
-#define _ARCH_ARM_MACH_MSM_QDSS_H_
-
-#include <linux/bitops.h>
-#include <mach/qdss.h>
-
-/* Coresight management registers (0xF00-0xFCC)
- * 0xFA0 - 0xFA4: Management	registers in PFTv1.0
- *		  Trace		registers in PFTv1.1
- */
-#define CS_ITCTRL		(0xF00)
-#define CS_CLAIMSET		(0xFA0)
-#define CS_CLAIMCLR		(0xFA4)
-#define CS_LAR			(0xFB0)
-#define CS_LSR			(0xFB4)
-#define CS_AUTHSTATUS		(0xFB8)
-#define CS_DEVID		(0xFC8)
-#define CS_DEVTYPE		(0xFCC)
-/* Peripheral id registers (0xFD0-0xFEC) */
-#define CS_PIDR4		(0xFD0)
-#define CS_PIDR5		(0xFD4)
-#define CS_PIDR6		(0xFD8)
-#define CS_PIDR7		(0xFDC)
-#define CS_PIDR0		(0xFE0)
-#define CS_PIDR1		(0xFE4)
-#define CS_PIDR2		(0xFE8)
-#define CS_PIDR3		(0xFEC)
-/* Component id registers (0xFF0-0xFFC) */
-#define CS_CIDR0		(0xFF0)
-#define CS_CIDR1		(0xFF4)
-#define CS_CIDR2		(0xFF8)
-#define CS_CIDR3		(0xFFC)
-
-/* DBGv7 with baseline CP14 registers implemented */
-#define ARM_DEBUG_ARCH_V7B	(0x3)
-/* DBGv7 with all CP14 registers implemented */
-#define ARM_DEBUG_ARCH_V7	(0x4)
-#define ARM_DEBUG_ARCH_V7_1	(0x5)
-#define ETM_ARCH_V3_3		(0x23)
-#define PFT_ARCH_V1_1		(0x31)
-
-#define TIMEOUT_US		(100)
-#define CS_UNLOCK_MAGIC		(0xC5ACCE55)
-
-#define BM(lsb, msb)		((BIT(msb) - BIT(lsb)) + BIT(msb))
-#define BMVAL(val, lsb, msb)	((val & BM(lsb, msb)) >> lsb)
-#define BVAL(val, n)		((val & BIT(n)) >> n)
-
-void etb_enable(void);
-void etb_disable(void);
-void etb_dump(void);
-void tpiu_disable(void);
-void funnel_enable(uint8_t id, uint32_t port_mask);
-void funnel_disable(uint8_t id, uint32_t port_mask);
-
-struct kobject *qdss_get_modulekobj(void);
-
-#endif
diff --git a/arch/arm/mach-msm/qdss.c b/arch/arm/mach-msm/qdss.c
deleted file mode 100644
index fd1fc2b..0000000
--- a/arch/arm/mach-msm/qdss.c
+++ /dev/null
@@ -1,408 +0,0 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <linux/export.h>
-#include <mach/rpm.h>
-
-#include "rpm_resources.h"
-#include "qdss-priv.h"
-
-#define MAX_STR_LEN	(65535)
-
-enum {
-	QDSS_CLK_OFF,
-	QDSS_CLK_ON_DBG,
-	QDSS_CLK_ON_HSDBG,
-};
-
-/*
- * Exclusion rules for structure fields.
- *
- * S: qdss.sources_mutex protected.
- * I: qdss.sink_mutex protected.
- * C: qdss.clk_mutex protected.
- */
-struct qdss_ctx {
-	struct kobject			*modulekobj;
-	struct msm_qdss_platform_data	*pdata;
-	struct list_head		sources;	/* S: sources list */
-	struct mutex			sources_mutex;
-	uint8_t				sink_count;	/* I: sink count */
-	struct mutex			sink_mutex;
-	uint8_t				max_clk;
-	uint8_t				clk_count;	/* C: clk count */
-	struct mutex			clk_mutex;
-};
-
-static struct qdss_ctx qdss;
-
-/**
- * qdss_get - get the qdss source handle
- * @name: name of the qdss source
- *
- * Searches the sources list to get the qdss source handle for this source.
- *
- * CONTEXT:
- * Typically called from init or probe functions
- *
- * RETURNS:
- * pointer to struct qdss_source on success, %NULL on failure
- */
-struct qdss_source *qdss_get(const char *name)
-{
-	struct qdss_source *src, *source = NULL;
-
-	mutex_lock(&qdss.sources_mutex);
-	list_for_each_entry(src, &qdss.sources, link) {
-		if (src->name) {
-			if (strncmp(src->name, name, MAX_STR_LEN))
-				continue;
-			source = src;
-			break;
-		}
-	}
-	mutex_unlock(&qdss.sources_mutex);
-
-	return source ? source : ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL(qdss_get);
-
-/**
- * qdss_put - release the qdss source handle
- * @name: name of the qdss source
- *
- * CONTEXT:
- * Typically called from driver remove or exit functions
- */
-void qdss_put(struct qdss_source *src)
-{
-}
-EXPORT_SYMBOL(qdss_put);
-
-/**
- * qdss_enable - enable qdss for the source
- * @src: handle for the source making the call
- *
- * Enables qdss block (relevant funnel ports and sink) if not already
- * enabled, otherwise increments the reference count
- *
- * CONTEXT:
- * Might sleep. Uses a mutex lock. Should be called from a non-atomic context.
- *
- * RETURNS:
- * 0 on success, non-zero on failure
- */
-int qdss_enable(struct qdss_source *src)
-{
-	int ret;
-
-	if (!src)
-		return -EINVAL;
-
-	ret = qdss_clk_enable();
-	if (ret)
-		goto err;
-
-	if ((qdss.pdata)->afamily) {
-		mutex_lock(&qdss.sink_mutex);
-		if (qdss.sink_count == 0) {
-			etb_disable();
-			tpiu_disable();
-			/* enable ETB first to avoid losing any trace data */
-			etb_enable();
-		}
-		qdss.sink_count++;
-		mutex_unlock(&qdss.sink_mutex);
-	}
-
-	funnel_enable(0x0, src->fport_mask);
-	return 0;
-err:
-	return ret;
-}
-EXPORT_SYMBOL(qdss_enable);
-
-/**
- * qdss_disable - disable qdss for the source
- * @src: handle for the source making the call
- *
- * Disables qdss block (relevant funnel ports and sink) if the reference count
- * is one, otherwise decrements the reference count
- *
- * CONTEXT:
- * Might sleep. Uses a mutex lock. Should be called from a non-atomic context.
- */
-void qdss_disable(struct qdss_source *src)
-{
-	if (!src)
-		return;
-
-	if ((qdss.pdata)->afamily) {
-		mutex_lock(&qdss.sink_mutex);
-		if (WARN(qdss.sink_count == 0, "qdss is unbalanced\n"))
-			goto out;
-		if (qdss.sink_count == 1) {
-			etb_dump();
-			etb_disable();
-		}
-		qdss.sink_count--;
-		mutex_unlock(&qdss.sink_mutex);
-	}
-
-	funnel_disable(0x0, src->fport_mask);
-	qdss_clk_disable();
-	return;
-out:
-	mutex_unlock(&qdss.sink_mutex);
-}
-EXPORT_SYMBOL(qdss_disable);
-
-/**
- * qdss_disable_sink - force disable the current qdss sink(s)
- *
- * Force disable the current qdss sink(s) to stop the sink from accepting any
- * trace generated subsequent to this call. This function should only be used
- * as a way to stop the sink from getting polluted with trace data that is
- * uninteresting after an event of interest has occured.
- *
- * CONTEXT:
- * Can be called from atomic or non-atomic context.
- */
-void qdss_disable_sink(void)
-{
-	if ((qdss.pdata)->afamily) {
-		etb_dump();
-		etb_disable();
-	}
-}
-EXPORT_SYMBOL(qdss_disable_sink);
-
-/**
- * qdss_clk_enable - enable qdss clocks
- *
- * Enables qdss clocks via RPM if they aren't already enabled, otherwise
- * increments the reference count.
- *
- * CONTEXT:
- * Might sleep. Uses a mutex lock. Should be called from a non-atomic context.
- *
- * RETURNS:
- * 0 on success, non-zero on failure
- */
-int qdss_clk_enable(void)
-{
-	int ret;
-	struct msm_rpm_iv_pair iv;
-
-	mutex_lock(&qdss.clk_mutex);
-	if (qdss.clk_count == 0) {
-		iv.id = MSM_RPM_ID_QDSS_CLK;
-		if (qdss.max_clk)
-			iv.value = QDSS_CLK_ON_HSDBG;
-		else
-			iv.value = QDSS_CLK_ON_DBG;
-		ret = msm_rpmrs_set(MSM_RPM_CTX_SET_0, &iv, 1);
-		if (WARN(ret, "qdss clks not enabled (%d)\n", ret))
-			goto err_clk;
-	}
-	qdss.clk_count++;
-	mutex_unlock(&qdss.clk_mutex);
-	return 0;
-err_clk:
-	mutex_unlock(&qdss.clk_mutex);
-	return ret;
-}
-EXPORT_SYMBOL(qdss_clk_enable);
-
-/**
- * qdss_clk_disable - disable qdss clocks
- *
- * Disables qdss clocks via RPM if the reference count is one, otherwise
- * decrements the reference count.
- *
- * CONTEXT:
- * Might sleep. Uses a mutex lock. Should be called from a non-atomic context.
- */
-void qdss_clk_disable(void)
-{
-	int ret;
-	struct msm_rpm_iv_pair iv;
-
-	mutex_lock(&qdss.clk_mutex);
-	if (WARN(qdss.clk_count == 0, "qdss clks are unbalanced\n"))
-		goto out;
-	if (qdss.clk_count == 1) {
-		iv.id = MSM_RPM_ID_QDSS_CLK;
-		iv.value = QDSS_CLK_OFF;
-		ret = msm_rpmrs_set(MSM_RPM_CTX_SET_0, &iv, 1);
-		WARN(ret, "qdss clks not disabled (%d)\n", ret);
-	}
-	qdss.clk_count--;
-out:
-	mutex_unlock(&qdss.clk_mutex);
-}
-EXPORT_SYMBOL(qdss_clk_disable);
-
-struct kobject *qdss_get_modulekobj(void)
-{
-	return qdss.modulekobj;
-}
-
-#define QDSS_ATTR(name)						\
-static struct kobj_attribute name##_attr =				\
-		__ATTR(name, S_IRUGO | S_IWUSR, name##_show, name##_store)
-
-static ssize_t max_clk_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val;
-
-	if (sscanf(buf, "%lx", &val) != 1)
-		return -EINVAL;
-
-	qdss.max_clk = val;
-	return n;
-}
-static ssize_t max_clk_show(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			char *buf)
-{
-	unsigned long val = qdss.max_clk;
-	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-QDSS_ATTR(max_clk);
-
-static void __devinit qdss_add_sources(struct qdss_source *srcs, size_t num)
-{
-	mutex_lock(&qdss.sources_mutex);
-	while (num--) {
-		list_add_tail(&srcs->link, &qdss.sources);
-		srcs++;
-	}
-	mutex_unlock(&qdss.sources_mutex);
-}
-
-static int __init qdss_sysfs_init(void)
-{
-	int ret;
-
-	qdss.modulekobj = kset_find_obj(module_kset, KBUILD_MODNAME);
-	if (!qdss.modulekobj) {
-		pr_err("failed to find QDSS sysfs module kobject\n");
-		ret = -ENOENT;
-		goto err;
-	}
-
-	ret = sysfs_create_file(qdss.modulekobj, &max_clk_attr.attr);
-	if (ret) {
-		pr_err("failed to create QDSS sysfs max_clk attribute\n");
-		goto err;
-	}
-
-	return 0;
-err:
-	return ret;
-}
-
-static void __devexit qdss_sysfs_exit(void)
-{
-	sysfs_remove_file(qdss.modulekobj, &max_clk_attr.attr);
-}
-
-static int __devinit qdss_probe(struct platform_device *pdev)
-{
-	int ret;
-	struct qdss_source *src_table;
-	size_t num_srcs;
-
-	mutex_init(&qdss.sources_mutex);
-	mutex_init(&qdss.clk_mutex);
-	mutex_init(&qdss.sink_mutex);
-
-	if (pdev->dev.platform_data == NULL) {
-		pr_err("%s: platform data is NULL\n", __func__);
-		ret = -ENODEV;
-		goto err_pdata;
-	}
-	qdss.pdata = pdev->dev.platform_data;
-
-	INIT_LIST_HEAD(&qdss.sources);
-	src_table = (qdss.pdata)->src_table;
-	num_srcs = (qdss.pdata)->size;
-	qdss_add_sources(src_table, num_srcs);
-
-	pr_info("QDSS arch initialized\n");
-	return 0;
-err_pdata:
-	mutex_destroy(&qdss.sink_mutex);
-	mutex_destroy(&qdss.clk_mutex);
-	mutex_destroy(&qdss.sources_mutex);
-	pr_err("QDSS init failed\n");
-	return ret;
-}
-
-static int __devexit qdss_remove(struct platform_device *pdev)
-{
-	qdss_sysfs_exit();
-	mutex_destroy(&qdss.sink_mutex);
-	mutex_destroy(&qdss.clk_mutex);
-	mutex_destroy(&qdss.sources_mutex);
-
-	return 0;
-}
-
-static struct platform_driver qdss_driver = {
-	.probe          = qdss_probe,
-	.remove         = __devexit_p(qdss_remove),
-	.driver         = {
-		.name   = "msm_qdss",
-	},
-};
-
-static int __init qdss_init(void)
-{
-	return platform_driver_register(&qdss_driver);
-}
-arch_initcall(qdss_init);
-
-static int __init qdss_module_init(void)
-{
-	int ret;
-
-	ret = qdss_sysfs_init();
-	if (ret)
-		goto err_sysfs;
-
-	pr_info("QDSS module initialized\n");
-	return 0;
-err_sysfs:
-	return ret;
-}
-module_init(qdss_module_init);
-
-static void __exit qdss_exit(void)
-{
-	platform_driver_unregister(&qdss_driver);
-}
-module_exit(qdss_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Qualcomm Debug SubSystem Driver");
diff --git a/arch/arm/mach-msm/rpm-regulator-smd.c b/arch/arm/mach-msm/rpm-regulator-smd.c
index 152b6e5..fdff231 100644
--- a/arch/arm/mach-msm/rpm-regulator-smd.c
+++ b/arch/arm/mach-msm/rpm-regulator-smd.c
@@ -67,6 +67,7 @@
 	RPM_REGULATOR_PARAM_HEAD_ROOM,
 	RPM_REGULATOR_PARAM_QUIET_MODE,
 	RPM_REGULATOR_PARAM_FREQ_REASON,
+	RPM_REGULATOR_PARAM_CORNER,
 	RPM_REGULATOR_PARAM_MAX,
 };
 
@@ -110,6 +111,7 @@
 	PARAM(HEAD_ROOM,       1,  0,  0,  1, "hr",   0, 0x7FFFFFFF, "qcom,init-head-room"),
 	PARAM(QUIET_MODE,      0,  1,  0,  0, "qm",   0, 2,          "qcom,init-quiet-mode"),
 	PARAM(FREQ_REASON,     0,  1,  0,  1, "resn", 0, 8,          "qcom,init-freq-reason"),
+	PARAM(CORNER,          0,  1,  0,  0, "corn", 0, 5,          "qcom,init-voltage-corner"),
 };
 
 struct rpm_vreg_request {
@@ -437,6 +439,7 @@
 	RPM_VREG_AGGR_MAX(HEAD_ROOM, param_aggr, param_reg);
 	RPM_VREG_AGGR_MAX(QUIET_MODE, param_aggr, param_reg);
 	RPM_VREG_AGGR_MAX(FREQ_REASON, param_aggr, param_reg);
+	RPM_VREG_AGGR_MAX(CORNER, param_aggr, param_reg);
 }
 
 static int rpm_vreg_aggregate_requests(struct rpm_regulator *regulator)
@@ -666,6 +669,56 @@
 	return uV;
 }
 
+static int rpm_vreg_set_voltage_corner(struct regulator_dev *rdev, int min_uV,
+				int max_uV, unsigned *selector)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+	int rc = 0;
+	int corner;
+	u32 prev_corner;
+
+	/*
+	 * Translate from values which work as inputs in the
+	 * regulator_set_voltage function to the actual corner values
+	 * sent to the RPM.
+	 */
+	corner = min_uV - RPM_REGULATOR_CORNER_RETENTION;
+
+	if (corner < params[RPM_REGULATOR_PARAM_CORNER].min
+	    || corner > params[RPM_REGULATOR_PARAM_CORNER].max) {
+		vreg_err(reg, "corner=%d is not within allowed range: [%u, %u]\n",
+			corner, params[RPM_REGULATOR_PARAM_CORNER].min,
+			params[RPM_REGULATOR_PARAM_CORNER].max);
+		return -EINVAL;
+	}
+
+	rpm_vreg_lock(reg->rpm_vreg);
+
+	prev_corner = reg->req.param[RPM_REGULATOR_PARAM_CORNER];
+	RPM_VREG_SET_PARAM(reg, CORNER, corner);
+
+	/* Only send a new voltage if the regulator is currently enabled. */
+	if (rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg))
+		rc = rpm_vreg_aggregate_requests(reg);
+
+	if (rc) {
+		vreg_err(reg, "set voltage corner failed, rc=%d", rc);
+		RPM_VREG_SET_PARAM(reg, CORNER, prev_corner);
+	}
+
+	rpm_vreg_unlock(reg->rpm_vreg);
+
+	return rc;
+}
+
+static int rpm_vreg_get_voltage_corner(struct regulator_dev *rdev)
+{
+	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+	return reg->req.param[RPM_REGULATOR_PARAM_CORNER]
+		+ RPM_REGULATOR_CORNER_RETENTION;
+}
+
 static int rpm_vreg_set_mode(struct regulator_dev *rdev, unsigned int mode)
 {
 	struct rpm_regulator *reg = rdev_get_drvdata(rdev);
@@ -802,6 +855,7 @@
 	priv_reg->rdev->reg_data	= priv_reg;
 	priv_reg->rpm_vreg		= rpm_vreg;
 	priv_reg->rdesc.name		= framework_reg->rdesc.name;
+	priv_reg->rdesc.ops		= framework_reg->rdesc.ops;
 	priv_reg->set_active		= framework_reg->set_active;
 	priv_reg->set_sleep		= framework_reg->set_sleep;
 	priv_reg->min_uV		= framework_reg->min_uV;
@@ -963,7 +1017,7 @@
 		return -EINVAL;
 	}
 
-	return rpm_vreg_set_voltage(regulator->rdev, uV, uV, NULL);
+	return regulator->rdesc.ops->set_voltage(regulator->rdev, uV, uV, NULL);
 }
 EXPORT_SYMBOL_GPL(rpm_regulator_set_voltage);
 
@@ -993,6 +1047,19 @@
 	.enable_time		= rpm_vreg_enable_time,
 };
 
+static struct regulator_ops smps_corner_ops = {
+	.enable			= rpm_vreg_enable,
+	.disable		= rpm_vreg_disable,
+	.is_enabled		= rpm_vreg_is_enabled,
+	.set_voltage		= rpm_vreg_set_voltage_corner,
+	.get_voltage		= rpm_vreg_get_voltage_corner,
+	.list_voltage		= rpm_vreg_list_voltage,
+	.set_mode		= rpm_vreg_set_mode,
+	.get_mode		= rpm_vreg_get_mode,
+	.get_optimum_mode	= rpm_vreg_get_optimum_mode,
+	.enable_time		= rpm_vreg_enable_time,
+};
+
 static struct regulator_ops switch_ops = {
 	.enable			= rpm_vreg_enable,
 	.disable		= rpm_vreg_disable,
@@ -1122,6 +1189,14 @@
 	reg->rdesc.owner	= THIS_MODULE;
 	reg->rdesc.type		= REGULATOR_VOLTAGE;
 
+	/*
+	 * Switch to voltage corner regulator ops if qcom,use-voltage-corner
+	 * is specified in the device node (SMPS only).
+	 */
+	if (of_find_property(node, "qcom,use-voltage-corner", NULL)
+	    && regulator_type == RPM_REGULATOR_SMD_TYPE_SMPS)
+		reg->rdesc.ops = &smps_corner_ops;
+
 	if (regulator_type == RPM_REGULATOR_SMD_TYPE_VS)
 		reg->rdesc.n_voltages = 0;
 	else
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
index 75f4d92..0edea3f 100644
--- a/arch/arm/mach-msm/rpm-smd.c
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -736,7 +736,7 @@
 EXPORT_SYMBOL(msm_rpm_send_message_noirq);
 static bool msm_rpm_set_standalone(void)
 {
-	if (machine_is_copper()) {
+	if (machine_is_msm8974()) {
 		pr_warn("%s(): Running in standalone mode, requests "
 				"will not be sent to RPM\n", __func__);
 		standalone = true;
diff --git a/arch/arm/mach-msm/rpm_resources.c b/arch/arm/mach-msm/rpm_resources.c
index c86da6a..f9edfc9 100644
--- a/arch/arm/mach-msm/rpm_resources.c
+++ b/arch/arm/mach-msm/rpm_resources.c
@@ -1105,7 +1105,8 @@
 
 static int __init msm_rpmrs_l2_init(void)
 {
-	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_apq8064()) {
+	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_apq8064() ||
+	    cpu_is_msm8627()) {
 
 		msm_pm_set_l2_flush_flag(0);
 
diff --git a/arch/arm/mach-msm/sdio_ctl.c b/arch/arm/mach-msm/sdio_ctl.c
index 586e890..ac16e77 100644
--- a/arch/arm/mach-msm/sdio_ctl.c
+++ b/arch/arm/mach-msm/sdio_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,7 @@
 #include <linux/mutex.h>
 #include <linux/uaccess.h>
 #include <linux/workqueue.h>
+#include <linux/poll.h>
 #include <asm/ioctls.h>
 #include <linux/platform_device.h>
 #include <mach/msm_smd.h>
@@ -206,6 +207,34 @@
 	return ret;
 }
 
+static unsigned int sdio_ctl_poll(struct file *file, poll_table *wait)
+{
+	struct sdio_ctl_dev *sdio_ctl_devp;
+	unsigned int mask = 0;
+
+	sdio_ctl_devp = file->private_data;
+	if (!sdio_ctl_devp) {
+		pr_err("%s: on a NULL device\n", __func__);
+		return POLLERR;
+	}
+
+	poll_wait(file, &sdio_ctl_devp->read_wait_queue, wait);
+	mutex_lock(&sdio_ctl_devp->rx_lock);
+	if (sdio_cmux_is_channel_reset(sdio_ctl_devp->id)) {
+		mutex_unlock(&sdio_ctl_devp->rx_lock);
+		pr_err("%s notifying reset for sdio_ctl_dev id:%d\n",
+			__func__, sdio_ctl_devp->id);
+		return POLLERR;
+	}
+
+	if (sdio_ctl_devp->read_avail > 0)
+		mask |= POLLIN | POLLRDNORM;
+
+	mutex_unlock(&sdio_ctl_devp->rx_lock);
+
+	return mask;
+}
+
 ssize_t sdio_ctl_read(struct file *file,
 		      char __user *buf,
 		      size_t count,
@@ -417,6 +446,7 @@
 	.release = sdio_ctl_release,
 	.read = sdio_ctl_read,
 	.write = sdio_ctl_write,
+	.poll = sdio_ctl_poll,
 	.unlocked_ioctl = sdio_ctl_ioctl,
 };
 
diff --git a/arch/arm/mach-msm/smd_pkt.c b/arch/arm/mach-msm/smd_pkt.c
index 8d567f8..f5f76f7 100644
--- a/arch/arm/mach-msm/smd_pkt.c
+++ b/arch/arm/mach-msm/smd_pkt.c
@@ -43,6 +43,7 @@
 #define NUM_SMD_PKT_PORTS 15
 #endif
 
+#define PDRIVER_NAME_MAX_SIZE 32
 #define LOOPBACK_INX (NUM_SMD_PKT_PORTS - 1)
 
 #define DEVICE_NAME "smdpkt"
@@ -52,6 +53,7 @@
 	struct cdev cdev;
 	struct device *devicep;
 	void *pil;
+	char pdriver_name[PDRIVER_NAME_MAX_SIZE];
 	struct platform_driver driver;
 
 	struct smd_channel *ch;
@@ -729,7 +731,10 @@
 	int i;
 
 	for (i = 0; i < NUM_SMD_PKT_PORTS; i++) {
-		if (!strncmp(pdev->name, smd_ch_name[i], SMD_MAX_CH_NAME_LEN)) {
+		if (smd_ch_edge[i] == pdev->id
+		    && !strncmp(pdev->name, smd_ch_name[i],
+				SMD_MAX_CH_NAME_LEN)
+		    && smd_pkt_devp[i]->driver.probe) {
 			complete_all(&smd_pkt_devp[i]->ch_allocated);
 			D_STATUS("%s allocated SMD ch for smd_pkt_dev id:%d\n",
 				 __func__, i);
@@ -772,8 +777,9 @@
 	if (smd_pkt_devp->ch == 0) {
 		init_completion(&smd_pkt_devp->ch_allocated);
 		smd_pkt_devp->driver.probe = smd_pkt_dummy_probe;
-		smd_pkt_devp->driver.driver.name =
-			smd_ch_name[smd_pkt_devp->i];
+		scnprintf(smd_pkt_devp->pdriver_name, PDRIVER_NAME_MAX_SIZE,
+			  "%s", smd_ch_name[smd_pkt_devp->i]);
+		smd_pkt_devp->driver.driver.name = smd_pkt_devp->pdriver_name;
 		smd_pkt_devp->driver.driver.owner = THIS_MODULE;
 		r = platform_driver_register(&smd_pkt_devp->driver);
 		if (r) {
@@ -870,8 +876,10 @@
 		pil_put(smd_pkt_devp->pil);
 
 release_pd:
-	if (r < 0)
+	if (r < 0) {
 		platform_driver_unregister(&smd_pkt_devp->driver);
+		smd_pkt_devp->driver.probe = NULL;
+	}
 out:
 	mutex_unlock(&smd_pkt_devp->ch_lock);
 
@@ -904,6 +912,7 @@
 		smd_pkt_devp->blocking_write = 0;
 		smd_pkt_devp->poll_mode = 0;
 		platform_driver_unregister(&smd_pkt_devp->driver);
+		smd_pkt_devp->driver.probe = NULL;
 		if (smd_pkt_devp->pil)
 			pil_put(smd_pkt_devp->pil);
 	}
diff --git a/arch/arm/mach-msm/smd_tty.c b/arch/arm/mach-msm/smd_tty.c
index 68e0f41..44ef822 100644
--- a/arch/arm/mach-msm/smd_tty.c
+++ b/arch/arm/mach-msm/smd_tty.c
@@ -144,14 +144,8 @@
 
 		avail = tty_prepare_flip_string(tty, &ptr, avail);
 		if (avail <= 0) {
-			if (!timer_pending(&info->buf_req_timer)) {
-				init_timer(&info->buf_req_timer);
-				info->buf_req_timer.expires = jiffies +
-							((30 * HZ)/1000);
-				info->buf_req_timer.function = buf_req_retry;
-				info->buf_req_timer.data = param;
-				add_timer(&info->buf_req_timer);
-			}
+			mod_timer(&info->buf_req_timer,
+					jiffies + msecs_to_jiffies(30));
 			return;
 		}
 
@@ -572,6 +566,8 @@
 		smd_tty[idx].driver.driver.owner = THIS_MODULE;
 		spin_lock_init(&smd_tty[idx].reset_lock);
 		smd_tty[idx].is_open = 0;
+		setup_timer(&smd_tty[idx].buf_req_timer, buf_req_retry,
+				(unsigned long)&smd_tty[idx]);
 		init_waitqueue_head(&smd_tty[idx].ch_opened_wait_queue);
 		ret = platform_driver_register(&smd_tty[idx].driver);
 
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index b047cf4..c37943c 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -209,6 +209,7 @@
 	[101] = MSM_CPU_7X27AA,
 	[102] = MSM_CPU_7X27AA,
 	[103] = MSM_CPU_7X27AA,
+	[136] = MSM_CPU_7X27AA,
 
 	/* 9x15 ID */
 	[104] = MSM_CPU_9615,
@@ -238,8 +239,8 @@
 	/* 8060A ID */
 	[124] = MSM_CPU_8960,
 
-	/* Copper IDs */
-	[126] = MSM_CPU_COPPER,
+	/* 8974 IDs */
+	[126] = MSM_CPU_8974,
 
 	/* 8625 IDs */
 	[127] = MSM_CPU_8625,
@@ -253,6 +254,7 @@
 	[131] = MSM_CPU_7X25AB,
 	[132] = MSM_CPU_7X25AB,
 	[133] = MSM_CPU_7X25AB,
+	[135] = MSM_CPU_7X25AB,
 
 	/* 9625 IDs */
 	[134] = MSM_CPU_9625,
@@ -622,9 +624,9 @@
 		dummy_socinfo.id = 109;
 	else if (machine_is_msm9615_mtp() || machine_is_msm9615_cdp())
 		dummy_socinfo.id = 104;
-	else if (early_machine_is_copper()) {
+	else if (early_machine_is_msm8974()) {
 		dummy_socinfo.id = 126;
-		strlcpy(dummy_socinfo.build_id, "copper - ",
+		strlcpy(dummy_socinfo.build_id, "msm8974 - ",
 			sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_msm9625()) {
 		dummy_socinfo.id = 134;
diff --git a/arch/arm/mach-msm/spm.c b/arch/arm/mach-msm/spm.c
index 4654fba..3d90678 100644
--- a/arch/arm/mach-msm/spm.c
+++ b/arch/arm/mach-msm/spm.c
@@ -72,8 +72,6 @@
 };
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_spm_devices);
-static atomic_t msm_spm_set_vdd_x_cpu_allowed = ATOMIC_INIT(1);
-
 /******************************************************************************
  * Internal helper functions
  *****************************************************************************/
@@ -189,20 +187,9 @@
 
 int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
 {
-	unsigned long flags;
 	struct msm_spm_device *dev;
 	uint32_t timeout_us;
 
-	local_irq_save(flags);
-
-	if (!atomic_read(&msm_spm_set_vdd_x_cpu_allowed) &&
-				unlikely(smp_processor_id() != cpu)) {
-		if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
-			pr_info("%s: attempting to set vdd of cpu %u from "
-				"cpu %u\n", __func__, cpu, smp_processor_id());
-		goto set_vdd_x_cpu_bail;
-	}
-
 	dev = &per_cpu(msm_spm_devices, cpu);
 
 	if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
@@ -239,15 +226,12 @@
 		pr_info("%s: cpu %u done, remaining timeout %uus\n",
 			__func__, cpu, timeout_us);
 
-	local_irq_restore(flags);
 	return 0;
 
 set_vdd_bail:
 	pr_err("%s: cpu %u failed, remaining timeout %uus, vlevel 0x%x\n",
 	       __func__, cpu, timeout_us, msm_spm_get_sts_curr_pmic_data(dev));
 
-set_vdd_x_cpu_bail:
-	local_irq_restore(flags);
 	return -EIO;
 }
 
@@ -263,11 +247,6 @@
 	mb();
 }
 
-void msm_spm_allow_x_cpu_set_vdd(bool allowed)
-{
-	atomic_set(&msm_spm_set_vdd_x_cpu_allowed, allowed ? 1 : 0);
-}
-
 int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs)
 {
 	unsigned int cpu;
diff --git a/arch/arm/mach-msm/spm.h b/arch/arm/mach-msm/spm.h
index 154303b..e81e335 100644
--- a/arch/arm/mach-msm/spm.h
+++ b/arch/arm/mach-msm/spm.h
@@ -146,16 +146,9 @@
  */
 int msm_spm_turn_on_cpu_rail(unsigned int cpu);
 
-
 /* Internal low power management specific functions */
 
 /**
- * msm_spm_allow_x_cpu_set_vdd(): Turn on/off cross calling to set voltage
- * @allowed: boolean to indicate on/off.
- */
-void msm_spm_allow_x_cpu_set_vdd(bool allowed);
-
-/**
  * msm_spm_reinit(): Reinitialize SPM registers
  */
 void msm_spm_reinit(void);
@@ -251,11 +244,6 @@
 	/* empty */
 }
 
-static inline void msm_spm_allow_x_cpu_set_vdd(bool allowed)
-{
-	/* empty */
-}
-
 static inline int msm_spm_turn_on_cpu_rail(unsigned int cpu)
 {
 	return -ENOSYS;
diff --git a/arch/arm/mach-msm/spm_devices.c b/arch/arm/mach-msm/spm_devices.c
index 6e81be6..fb560ba 100644
--- a/arch/arm/mach-msm/spm_devices.c
+++ b/arch/arm/mach-msm/spm_devices.c
@@ -40,31 +40,15 @@
 
 static struct msm_spm_device msm_spm_l2_device;
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_cpu_spm_device);
-static atomic_t msm_spm_set_vdd_x_cpu_allowed = ATOMIC_INIT(1);
-
-void msm_spm_allow_x_cpu_set_vdd(bool allowed)
-{
-	atomic_set(&msm_spm_set_vdd_x_cpu_allowed, allowed ? 1 : 0);
-}
-EXPORT_SYMBOL(msm_spm_allow_x_cpu_set_vdd);
 
 int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
 {
-	unsigned long flags;
 	struct msm_spm_device *dev;
 	int ret = -EIO;
 
-	local_irq_save(flags);
-	if (!atomic_read(&msm_spm_set_vdd_x_cpu_allowed) &&
-				unlikely(smp_processor_id() != cpu)) {
-		goto set_vdd_x_cpu_bail;
-	}
-
 	dev = &per_cpu(msm_cpu_spm_device, cpu);
 	ret = msm_spm_drv_set_vdd(&dev->reg_data, vlevel);
 
-set_vdd_x_cpu_bail:
-	local_irq_restore(flags);
 	return ret;
 }
 EXPORT_SYMBOL(msm_spm_set_vdd);
@@ -152,7 +136,8 @@
 
 	reg = saw_bases[cpu];
 
-	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_apq8064()) {
+	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_apq8064() ||
+	    cpu_is_msm8627()) {
 		val = 0xA4;
 		reg += 0x14;
 		timeout = 512;
diff --git a/arch/arm/mach-msm/subsystem_map.c b/arch/arm/mach-msm/subsystem_map.c
index 5f5a02b..fcb8517 100644
--- a/arch/arm/mach-msm/subsystem_map.c
+++ b/arch/arm/mach-msm/subsystem_map.c
@@ -38,10 +38,8 @@
 	VIDEO_DOMAIN,
 	VIDEO_DOMAIN,
 	CAMERA_DOMAIN,
-	DISPLAY_READ_DOMAIN,
-	DISPLAY_WRITE_DOMAIN,
-	ROTATOR_SRC_DOMAIN,
-	ROTATOR_DST_DOMAIN,
+	DISPLAY_DOMAIN,
+	ROTATOR_DOMAIN,
 	0xFFFFFFFF
 };
 
diff --git a/arch/arm/mach-msm/subsystem_restart.c b/arch/arm/mach-msm/subsystem_restart.c
index c98a672..03db126 100644
--- a/arch/arm/mach-msm/subsystem_restart.c
+++ b/arch/arm/mach-msm/subsystem_restart.c
@@ -92,6 +92,9 @@
 
 /* MSM 8960 restart ordering info */
 static const char * const order_8960[] = {"modem", "lpass"};
+/*SGLTE restart ordering info*/
+static const char * const order_8960_sglte[] = {"external_modem",
+						"modem"};
 
 static struct subsys_soc_restart_order restart_orders_8960_one = {
 	.subsystem_list = order_8960,
@@ -99,9 +102,19 @@
 	.subsys_ptrs = {[ARRAY_SIZE(order_8960)] = NULL}
 	};
 
+static struct subsys_soc_restart_order restart_orders_8960_fusion_sglte = {
+	.subsystem_list = order_8960_sglte,
+	.count = ARRAY_SIZE(order_8960_sglte),
+	.subsys_ptrs = {[ARRAY_SIZE(order_8960_sglte)] = NULL}
+	};
+
 static struct subsys_soc_restart_order *restart_orders_8960[] = {
 	&restart_orders_8960_one,
-};
+	};
+
+static struct subsys_soc_restart_order *restart_orders_8960_sglte[] = {
+	&restart_orders_8960_fusion_sglte,
+	};
 
 /* These will be assigned to one of the sets above after
  * runtime SoC identification.
@@ -556,9 +569,19 @@
 	}
 
 	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm9615() ||
-			cpu_is_apq8064()) {
-		restart_orders = restart_orders_8960;
-		n_restart_orders = ARRAY_SIZE(restart_orders_8960);
+			cpu_is_apq8064() || cpu_is_msm8627()) {
+		if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE) {
+			restart_orders = restart_orders_8960_sglte;
+			n_restart_orders =
+				ARRAY_SIZE(restart_orders_8960_sglte);
+		} else {
+			restart_orders = restart_orders_8960;
+			n_restart_orders = ARRAY_SIZE(restart_orders_8960);
+		}
+		for (i = 0; i < n_restart_orders; i++) {
+			mutex_init(&restart_orders[i]->powerup_lock);
+			mutex_init(&restart_orders[i]->shutdown_lock);
+		}
 	}
 
 	if (restart_orders == NULL || n_restart_orders < 1) {
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 912ae6e..fcbd432 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -965,7 +965,7 @@
 		return 0;
 
 	if (cpu_is_msm8x60() || cpu_is_msm8960() || cpu_is_apq8064()
-			|| cpu_is_msm8930())
+			|| cpu_is_msm8930() || cpu_is_msm8627())
 		__raw_writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
 
 	if (__get_cpu_var(first_boot)) {
@@ -1061,7 +1061,8 @@
 		sclk_hz = 32765;
 		gpt->flags |= MSM_CLOCK_FLAGS_UNSTABLE_COUNT;
 		dgt->flags |= MSM_CLOCK_FLAGS_UNSTABLE_COUNT;
-	} else if (cpu_is_msm8960() || cpu_is_apq8064() || cpu_is_msm8930()) {
+	} else if (cpu_is_msm8960() || cpu_is_apq8064() || cpu_is_msm8930() ||
+		   cpu_is_msm8627()) {
 		global_timer_offset = MSM_TMR0_BASE - MSM_TMR_BASE;
 		dgt->freq = 6750000;
 		__raw_writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
@@ -1070,7 +1071,7 @@
 		gpt->freq = 32765;
 		gpt_hz = 32765;
 		sclk_hz = 32765;
-		if (!cpu_is_msm8930()) {
+		if (!cpu_is_msm8930() && !cpu_is_msm8627()) {
 			gpt->flags |= MSM_CLOCK_FLAGS_UNSTABLE_COUNT;
 			dgt->flags |= MSM_CLOCK_FLAGS_UNSTABLE_COUNT;
 		}
@@ -1122,7 +1123,7 @@
 		ce->irq = clock->irq;
 		if (cpu_is_msm8x60() || cpu_is_msm8960() || cpu_is_apq8064() ||
 				cpu_is_msm8930() || cpu_is_msm9615() ||
-				cpu_is_msm8625()) {
+				cpu_is_msm8625() || cpu_is_msm8627()) {
 			clock->percpu_evt = alloc_percpu(struct clock_event_device *);
 			if (!clock->percpu_evt) {
 				pr_err("msm_timer_init: memory allocation "
diff --git a/arch/arm/mach-msm/tz_log.c b/arch/arm/mach-msm/tz_log.c
index 7426bb2..db797cd 100644
--- a/arch/arm/mach-msm/tz_log.c
+++ b/arch/arm/mach-msm/tz_log.c
@@ -536,12 +536,19 @@
 	return 0;
 }
 
+static struct of_device_id tzlog_match[] = {
+	{	.compatible = "qcom,tz-log",
+	},
+	{}
+};
+
 static struct platform_driver tz_log_driver = {
 	.probe		= tz_log_probe,
 	.remove		= __devexit_p(tz_log_remove),
 	.driver		= {
 		.name = "tz_log",
 		.owner = THIS_MODULE,
+		.of_match_table = tzlog_match,
 	},
 };
 
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 702408c..b1911c4 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -17,7 +17,9 @@
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
 #include <linux/highmem.h>
+#include <linux/memblock.h>
 #include <linux/slab.h>
 
 #include <asm/memory.h>
@@ -26,6 +28,9 @@
 #include <asm/tlbflush.h>
 #include <asm/sizes.h>
 #include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/system_info.h>
+#include <asm/dma-contiguous.h>
 
 #include "mm.h"
 
@@ -56,6 +61,19 @@
 	return mask;
 }
 
+static void __dma_clear_buffer(struct page *page, size_t size)
+{
+	void *ptr;
+	/*
+	 * Ensure that the allocated pages are zeroed, and that any data
+	 * lurking in the kernel direct-mapped region is invalidated.
+	 */
+	ptr = page_address(page);
+	memset(ptr, 0, size);
+	dmac_flush_range(ptr, ptr + size);
+	outer_flush_range(__pa(ptr), __pa(ptr) + size);
+}
+
 /*
  * Allocate a DMA buffer for 'dev' of size 'size' using the
  * specified gfp mask.  Note that 'size' must be page aligned.
@@ -64,23 +82,6 @@
 {
 	unsigned long order = get_order(size);
 	struct page *page, *p, *e;
-	void *ptr;
-	u64 mask = get_coherent_dma_mask(dev);
-
-#ifdef CONFIG_DMA_API_DEBUG
-	u64 limit = (mask + 1) & ~mask;
-	if (limit && size >= limit) {
-		dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
-			size, mask);
-		return NULL;
-	}
-#endif
-
-	if (!mask)
-		return NULL;
-
-	if (mask < 0xffffffffULL)
-		gfp |= GFP_DMA;
 
 	page = alloc_pages(gfp, order);
 	if (!page)
@@ -93,14 +94,7 @@
 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
 		__free_page(p);
 
-	/*
-	 * Ensure that the allocated pages are zeroed, and that any data
-	 * lurking in the kernel direct-mapped region is invalidated.
-	 */
-	ptr = page_address(page);
-	memset(ptr, 0, size);
-	dmac_flush_range(ptr, ptr + size);
-	outer_flush_range(__pa(ptr), __pa(ptr) + size);
+	__dma_clear_buffer(page, size);
 
 	return page;
 }
@@ -170,6 +164,9 @@
 	unsigned long base = consistent_base;
 	unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
 
+	if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+		return 0;
+
 	consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
 	if (!consistent_pte) {
 		pr_err("%s: no memory\n", __func__);
@@ -210,9 +207,101 @@
 
 	return ret;
 }
-
 core_initcall(consistent_init);
 
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+				     pgprot_t prot, struct page **ret_page);
+
+static struct arm_vmregion_head coherent_head = {
+	.vm_lock	= __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
+	.vm_list	= LIST_HEAD_INIT(coherent_head.vm_list),
+};
+
+size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
+
+static int __init early_coherent_pool(char *p)
+{
+	coherent_pool_size = memparse(p, &p);
+	return 0;
+}
+early_param("coherent_pool", early_coherent_pool);
+
+/*
+ * Initialise the coherent pool for atomic allocations.
+ */
+static int __init coherent_init(void)
+{
+	pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
+	size_t size = coherent_pool_size;
+	struct page *page;
+	void *ptr;
+
+	if (!IS_ENABLED(CONFIG_CMA))
+		return 0;
+
+	ptr = __alloc_from_contiguous(NULL, size, prot, &page);
+	if (ptr) {
+		coherent_head.vm_start = (unsigned long) ptr;
+		coherent_head.vm_end = (unsigned long) ptr + size;
+		printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
+		       (unsigned)size / 1024);
+		return 0;
+	}
+	printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
+	       (unsigned)size / 1024);
+	return -ENOMEM;
+}
+/*
+ * CMA is activated by core_initcall, so we must be called after it.
+ */
+postcore_initcall(coherent_init);
+
+struct dma_contig_early_reserve {
+	phys_addr_t base;
+	unsigned long size;
+};
+
+static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
+
+static int dma_mmu_remap_num __initdata;
+
+void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+{
+	dma_mmu_remap[dma_mmu_remap_num].base = base;
+	dma_mmu_remap[dma_mmu_remap_num].size = size;
+	dma_mmu_remap_num++;
+}
+
+void __init dma_contiguous_remap(void)
+{
+	int i;
+	for (i = 0; i < dma_mmu_remap_num; i++) {
+		phys_addr_t start = dma_mmu_remap[i].base;
+		phys_addr_t end = start + dma_mmu_remap[i].size;
+		struct map_desc map;
+		unsigned long addr;
+
+		if (end > arm_lowmem_limit)
+			end = arm_lowmem_limit;
+		if (start >= end)
+			return;
+
+		map.pfn = __phys_to_pfn(start);
+		map.virtual = __phys_to_virt(start);
+		map.length = end - start;
+		map.type = MT_MEMORY_DMA_READY;
+
+		/*
+		 * Clear previous low-memory mapping
+		 */
+		for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
+		     addr += PGDIR_SIZE)
+			pmd_clear(pmd_off_k(addr));
+
+		iotable_init(&map, 1);
+	}
+}
+
 static void *
 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
 	const void *caller)
@@ -319,20 +408,173 @@
 	arm_vmregion_free(&consistent_head, c);
 }
 
+static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+			    void *data)
+{
+	struct page *page = virt_to_page(addr);
+	pgprot_t prot = *(pgprot_t *)data;
+
+	set_pte_ext(pte, mk_pte(page, prot), 0);
+	return 0;
+}
+
+static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
+{
+	unsigned long start = (unsigned long) page_address(page);
+	unsigned end = start + size;
+
+	apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
+	dsb();
+	flush_tlb_kernel_range(start, end);
+}
+
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
+				 pgprot_t prot, struct page **ret_page,
+				 const void *caller)
+{
+	struct page *page;
+	void *ptr;
+	page = __dma_alloc_buffer(dev, size, gfp);
+	if (!page)
+		return NULL;
+
+	ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
+	if (!ptr) {
+		__dma_free_buffer(page, size);
+		return NULL;
+	}
+
+	*ret_page = page;
+	return ptr;
+}
+
+static void *__alloc_from_pool(struct device *dev, size_t size,
+			       struct page **ret_page, const void *caller)
+{
+	struct arm_vmregion *c;
+	size_t align;
+
+	if (!coherent_head.vm_start) {
+		printk(KERN_ERR "%s: coherent pool not initialised!\n",
+		       __func__);
+		dump_stack();
+		return NULL;
+	}
+
+	/*
+	 * Align the region allocation - allocations from pool are rather
+	 * small, so align them to their order in pages, minimum is a page
+	 * size. This helps reduce fragmentation of the DMA space.
+	 */
+	align = PAGE_SIZE << get_order(size);
+	c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller);
+	if (c) {
+		void *ptr = (void *)c->vm_start;
+		struct page *page = virt_to_page(ptr);
+		*ret_page = page;
+		return ptr;
+	}
+	return NULL;
+}
+
+static int __free_from_pool(void *cpu_addr, size_t size)
+{
+	unsigned long start = (unsigned long)cpu_addr;
+	unsigned long end = start + size;
+	struct arm_vmregion *c;
+
+	if (start < coherent_head.vm_start || end > coherent_head.vm_end)
+		return 0;
+
+	c = arm_vmregion_find_remove(&coherent_head, (unsigned long)start);
+
+	if ((c->vm_end - c->vm_start) != size) {
+		printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+		       __func__, c->vm_end - c->vm_start, size);
+		dump_stack();
+		size = c->vm_end - c->vm_start;
+	}
+
+	arm_vmregion_free(&coherent_head, c);
+	return 1;
+}
+
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+				     pgprot_t prot, struct page **ret_page)
+{
+	unsigned long order = get_order(size);
+	size_t count = size >> PAGE_SHIFT;
+	struct page *page;
+
+	page = dma_alloc_from_contiguous(dev, count, order);
+	if (!page)
+		return NULL;
+
+	__dma_clear_buffer(page, size);
+	__dma_remap(page, size, prot);
+
+	*ret_page = page;
+	return page_address(page);
+}
+
+static void __free_from_contiguous(struct device *dev, struct page *page,
+				   size_t size)
+{
+	__dma_remap(page, size, pgprot_kernel);
+	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+}
+
+#define nommu() 0
+
 #else	/* !CONFIG_MMU */
 
-#define __dma_alloc_remap(page, size, gfp, prot, c)	page_address(page)
-#define __dma_free_remap(addr, size)			do { } while (0)
+#define nommu() 1
+
+#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c)	NULL
+#define __alloc_from_pool(dev, size, ret_page, c)		NULL
+#define __alloc_from_contiguous(dev, size, prot, ret)		NULL
+#define __free_from_pool(cpu_addr, size)			0
+#define __free_from_contiguous(dev, page, size)			do { } while (0)
+#define __dma_free_remap(cpu_addr, size)			do { } while (0)
 
 #endif	/* CONFIG_MMU */
 
-static void *
-__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
-	    pgprot_t prot, const void *caller)
+static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
+				   struct page **ret_page)
 {
 	struct page *page;
+	page = __dma_alloc_buffer(dev, size, gfp);
+	if (!page)
+		return NULL;
+
+	*ret_page = page;
+	return page_address(page);
+}
+
+
+
+static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+			 gfp_t gfp, pgprot_t prot, const void *caller)
+{
+	u64 mask = get_coherent_dma_mask(dev);
+	struct page *page;
 	void *addr;
 
+#ifdef CONFIG_DMA_API_DEBUG
+	u64 limit = (mask + 1) & ~mask;
+	if (limit && size >= limit) {
+		dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
+			size, mask);
+		return NULL;
+	}
+#endif
+
+	if (!mask)
+		return NULL;
+
+	if (mask < 0xffffffffULL)
+		gfp |= GFP_DMA;
+
 	/*
 	 * Following is a work-around (a.k.a. hack) to prevent pages
 	 * with __GFP_COMP being passed to split_page() which cannot
@@ -345,19 +587,17 @@
 	*handle = ~0;
 	size = PAGE_ALIGN(size);
 
-	page = __dma_alloc_buffer(dev, size, gfp);
-	if (!page)
-		return NULL;
-
-	if (!arch_is_coherent())
-		addr = __dma_alloc_remap(page, size, gfp, prot, caller);
+	if (arch_is_coherent() || nommu())
+		addr = __alloc_simple_buffer(dev, size, gfp, &page);
+	else if (!IS_ENABLED(CONFIG_CMA))
+		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
+	else if (gfp & GFP_ATOMIC)
+		addr = __alloc_from_pool(dev, size, &page, caller);
 	else
-		addr = page_address(page);
+		addr = __alloc_from_contiguous(dev, size, prot, &page);
 
 	if (addr)
 		*handle = pfn_to_dma(dev, page_to_pfn(page));
-	else
-		__dma_free_buffer(page, size);
 
 	return addr;
 }
@@ -366,8 +606,8 @@
  * Allocate DMA-coherent memory space and return both the kernel remapped
  * virtual and bus address for that space.
  */
-void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
+void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle,
+			 gfp_t gfp)
 {
 	void *memory;
 
@@ -398,25 +638,11 @@
 {
 	int ret = -ENXIO;
 #ifdef CONFIG_MMU
-	unsigned long user_size, kern_size;
-	struct arm_vmregion *c;
-
-	user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-
-	c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
-	if (c) {
-		unsigned long off = vma->vm_pgoff;
-
-		kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
-
-		if (off < kern_size &&
-		    user_size <= (kern_size - off)) {
-			ret = remap_pfn_range(vma, vma->vm_start,
-					      page_to_pfn(c->vm_pages) + off,
-					      user_size << PAGE_SHIFT,
-					      vma->vm_page_prot);
-		}
-	}
+	unsigned long pfn = dma_to_pfn(dev, dma_addr);
+	ret = remap_pfn_range(vma, vma->vm_start,
+			      pfn + vma->vm_pgoff,
+			      vma->vm_end - vma->vm_start,
+			      vma->vm_page_prot);
 #endif	/* CONFIG_MMU */
 
 	return ret;
@@ -438,23 +664,33 @@
 }
 EXPORT_SYMBOL(dma_mmap_writecombine);
 
+
 /*
- * free a page as defined by the above mapping.
- * Must not be called with IRQs disabled.
+ * Free a buffer as defined by the above mapping.
  */
 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
 {
-	WARN_ON(irqs_disabled());
+	struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
 
 	if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
 		return;
 
 	size = PAGE_ALIGN(size);
 
-	if (!arch_is_coherent())
+	if (arch_is_coherent() || nommu()) {
+		__dma_free_buffer(page, size);
+	} else if (!IS_ENABLED(CONFIG_CMA)) {
 		__dma_free_remap(cpu_addr, size);
-
-	__dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
+		__dma_free_buffer(page, size);
+	} else {
+		if (__free_from_pool(cpu_addr, size))
+			return;
+		/*
+		 * Non-atomic allocations cannot be freed with IRQs disabled
+		 */
+		WARN_ON(irqs_disabled());
+		__free_from_contiguous(dev, page, size);
+	}
 }
 EXPORT_SYMBOL(dma_free_coherent);
 
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 59e252b..32f61f5 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -22,6 +22,7 @@
 #include <linux/gfp.h>
 #include <linux/memblock.h>
 #include <linux/sort.h>
+#include <linux/dma-contiguous.h>
 
 #include <asm/mach-types.h>
 #include <asm/memblock.h>
@@ -235,6 +236,17 @@
 }
 #endif
 
+void __init setup_dma_zone(struct machine_desc *mdesc)
+{
+#ifdef CONFIG_ZONE_DMA
+	if (mdesc->dma_zone_size) {
+		arm_dma_zone_size = mdesc->dma_zone_size;
+		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
+	} else
+		arm_dma_limit = 0xffffffff;
+#endif
+}
+
 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
 static void __init arm_bootmem_free_apnm(unsigned long max_low,
 	unsigned long max_high)
@@ -305,12 +317,9 @@
 	 * Adjust the sizes according to any special requirements for
 	 * this machine type.
 	 */
-	if (arm_dma_zone_size) {
+	if (arm_dma_zone_size)
 		arm_adjust_dma_zone(zone_size, zhole_size,
 			arm_dma_zone_size >> PAGE_SHIFT);
-		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
-	} else
-		arm_dma_limit = 0xffffffff;
 #endif
 
 	free_area_init_node(0, zone_size, min, zhole_size);
@@ -425,6 +434,12 @@
 	if (mdesc->reserve)
 		mdesc->reserve();
 
+	/*
+	 * reserve memory for DMA contigouos allocations,
+	 * must come from DMA area inside low memory
+	 */
+	dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
+
 	arm_memblock_steal_permitted = false;
 	memblock_allow_resize();
 	memblock_dump_all();
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 411fbd9..bd41abc 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -68,6 +68,8 @@
 #endif
 
 struct map_desc;
+extern phys_addr_t arm_lowmem_limit;
 
 void __init bootmem_init(void);
 void arm_mm_memblock_reserve(void);
+void dma_contiguous_remap(void);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e6b733b..a20b6cb 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -304,6 +304,11 @@
 				PMD_SECT_UNCACHED | PMD_SECT_XN,
 		.domain    = DOMAIN_KERNEL,
 	},
+	[MT_MEMORY_DMA_READY] = {
+		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+		.prot_l1   = PMD_TYPE_TABLE,
+		.domain    = DOMAIN_KERNEL,
+	},
 };
 
 const struct mem_type *get_mem_type(unsigned int type)
@@ -445,6 +450,7 @@
 	if (arch_is_coherent() && cpu_is_xsc3()) {
 		mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
 		mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+		mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
 		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
 		mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
 	}
@@ -478,6 +484,7 @@
 			mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
 			mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
 			mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+			mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
 			mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
 			mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_S;
 			mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
@@ -533,6 +540,7 @@
 	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
 	mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
 	mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+	mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
 	mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
 	mem_types[MT_MEMORY_R].prot_sect |= ecc_mask | cp->pmd;
 	mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
@@ -643,7 +651,7 @@
 	 * L1 entries, whereas PGDs refer to a group of L1 entries making
 	 * up one logical pointer to an L2 table.
 	 */
-	if (((addr | end | phys) & ~SECTION_MASK) == 0 && !force_pages) {
+	if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0 && !force_pages) {
 		pmd_t *p = pmd;
 
 #ifndef CONFIG_ARM_LPAE
@@ -862,7 +870,7 @@
 }
 early_param("vmalloc", early_vmalloc);
 
-static phys_addr_t lowmem_limit __initdata = 0;
+phys_addr_t arm_lowmem_limit __initdata = 0;
 
 void __init sanity_check_meminfo(void)
 {
@@ -953,8 +961,8 @@
 			bank->size = newsize;
 		}
 #endif
-		if (!bank->highmem && bank->start + bank->size > lowmem_limit)
-			lowmem_limit = bank->start + bank->size;
+		if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
+			arm_lowmem_limit = bank->start + bank->size;
 
 		j++;
 	}
@@ -979,8 +987,8 @@
 	}
 #endif
 	meminfo.nr_banks = j;
-	high_memory = __va(lowmem_limit - 1) + 1;
-	memblock_set_current_limit(lowmem_limit);
+	high_memory = __va(arm_lowmem_limit - 1) + 1;
+	memblock_set_current_limit(arm_lowmem_limit);
 }
 
 static inline void prepare_page_table(void)
@@ -1005,8 +1013,8 @@
 	 * Find the end of the first block of lowmem.
 	 */
 	end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
-	if (end >= lowmem_limit)
-		end = lowmem_limit;
+	if (end >= arm_lowmem_limit)
+		end = arm_lowmem_limit;
 
 	/*
 	 * Clear out all the kernel space mappings, except for the first
@@ -1251,8 +1259,8 @@
 		start = reg->base;
 		end = start + reg->size;
 
-		if (end > lowmem_limit)
-			end = lowmem_limit;
+		if (end > arm_lowmem_limit)
+			end = arm_lowmem_limit;
 		if (start >= end)
 			break;
 
@@ -1323,11 +1331,12 @@
 {
 	void *zero_page;
 
-	memblock_set_current_limit(lowmem_limit);
+	memblock_set_current_limit(arm_lowmem_limit);
 
 	build_mem_type_table();
 	prepare_page_table();
 	map_lowmem();
+	dma_contiguous_remap();
 	devicemaps_init(mdesc);
 	kmap_init();
 
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c9866b0..7cbdfda 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -31,6 +31,7 @@
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select ARCH_WANT_FRAME_POINTERS
 	select HAVE_DMA_ATTRS
+	select HAVE_DMA_CONTIGUOUS if !SWIOTLB
 	select HAVE_KRETPROBES
 	select HAVE_OPTPROBES
 	select HAVE_FTRACE_MCOUNT_RECORD
diff --git a/arch/x86/include/asm/dma-contiguous.h b/arch/x86/include/asm/dma-contiguous.h
new file mode 100644
index 0000000..c092416
--- /dev/null
+++ b/arch/x86/include/asm/dma-contiguous.h
@@ -0,0 +1,13 @@
+#ifndef ASMX86_DMA_CONTIGUOUS_H
+#define ASMX86_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm-generic/dma-contiguous.h>
+
+static inline void
+dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
+
+#endif
+#endif
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 4b4331d..7b9227b 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -13,6 +13,7 @@
 #include <asm/io.h>
 #include <asm/swiotlb.h>
 #include <asm-generic/dma-coherent.h>
+#include <linux/dma-contiguous.h>
 
 #ifdef CONFIG_ISA
 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
@@ -62,6 +63,10 @@
 					dma_addr_t *dma_addr, gfp_t flag,
 					struct dma_attrs *attrs);
 
+extern void dma_generic_free_coherent(struct device *dev, size_t size,
+				      void *vaddr, dma_addr_t dma_addr,
+				      struct dma_attrs *attrs);
+
 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 {
 	if (!dev->dma_mask)
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 3003250..62c9457 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -100,14 +100,18 @@
 				 struct dma_attrs *attrs)
 {
 	unsigned long dma_mask;
-	struct page *page;
+	struct page *page = NULL;
+	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 	dma_addr_t addr;
 
 	dma_mask = dma_alloc_coherent_mask(dev, flag);
 
 	flag |= __GFP_ZERO;
 again:
-	page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
+	if (!(flag & GFP_ATOMIC))
+		page = dma_alloc_from_contiguous(dev, count, get_order(size));
+	if (!page)
+		page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
 	if (!page)
 		return NULL;
 
@@ -127,6 +131,16 @@
 	return page_address(page);
 }
 
+void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
+			       dma_addr_t dma_addr, struct dma_attrs *attrs)
+{
+	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	struct page *page = virt_to_page(vaddr);
+
+	if (!dma_release_from_contiguous(dev, page, count))
+		free_pages((unsigned long)vaddr, get_order(size));
+}
+
 /*
  * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
  * parameter documentation.
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index f960506..871be4a 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -74,12 +74,6 @@
 	return nents;
 }
 
-static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
-				dma_addr_t dma_addr, struct dma_attrs *attrs)
-{
-	free_pages((unsigned long)vaddr, get_order(size));
-}
-
 static void nommu_sync_single_for_device(struct device *dev,
 			dma_addr_t addr, size_t size,
 			enum dma_data_direction dir)
@@ -97,7 +91,7 @@
 
 struct dma_map_ops nommu_dma_ops = {
 	.alloc			= dma_generic_alloc_coherent,
-	.free			= nommu_free_coherent,
+	.free			= dma_generic_free_coherent,
 	.map_sg			= nommu_map_sg,
 	.map_page		= nommu_map_page,
 	.sync_single_for_device = nommu_sync_single_for_device,
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 1a29015..d6c956e 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -50,6 +50,7 @@
 #include <asm/pci-direct.h>
 #include <linux/init_ohci1394_dma.h>
 #include <linux/kvm_para.h>
+#include <linux/dma-contiguous.h>
 
 #include <linux/errno.h>
 #include <linux/kernel.h>
@@ -934,6 +935,7 @@
 	}
 #endif
 	memblock.current_limit = get_max_mapped();
+	dma_contiguous_reserve(0);
 
 	/*
 	 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 3199b76..4142d91 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -12,6 +12,17 @@
 	  that do their own scheduling and require only minimal assistance from
 	  the kernel.
 
+config IOSCHED_TEST
+	tristate "Test I/O scheduler"
+	depends on DEBUG_FS
+	default m
+	---help---
+	  The test I/O scheduler is a duplicate of the noop scheduler with
+	  addition of test utlity.
+	  It allows testing a block device by dispatching specific requests
+	  according to the test case and declare PASS/FAIL according to the
+	  requests completion error code.
+
 config IOSCHED_DEADLINE
 	tristate "Deadline I/O scheduler"
 	default y
diff --git a/block/Makefile b/block/Makefile
index 39b76ba..436b220 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -15,6 +15,7 @@
 obj-$(CONFIG_IOSCHED_NOOP)	+= noop-iosched.o
 obj-$(CONFIG_IOSCHED_DEADLINE)	+= deadline-iosched.o
 obj-$(CONFIG_IOSCHED_CFQ)	+= cfq-iosched.o
+obj-$(CONFIG_IOSCHED_TEST)	+= test-iosched.o
 
 obj-$(CONFIG_BLOCK_COMPAT)	+= compat_ioctl.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY)	+= blk-integrity.o
diff --git a/block/blk-core.c b/block/blk-core.c
index 038d11f..68d7158 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -977,8 +977,6 @@
 {
 	struct request *rq;
 
-	BUG_ON(rw != READ && rw != WRITE);
-
 	spin_lock_irq(q->queue_lock);
 	if (gfp_mask & __GFP_WAIT)
 		rq = get_request_wait(q, rw, NULL);
@@ -1311,6 +1309,7 @@
 	req->ioprio = bio_prio(bio);
 	blk_rq_bio_prep(req->q, req, bio);
 }
+EXPORT_SYMBOL(init_request_from_bio);
 
 void blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
diff --git a/block/test-iosched.c b/block/test-iosched.c
new file mode 100644
index 0000000..3c38734
--- /dev/null
+++ b/block/test-iosched.c
@@ -0,0 +1,1019 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * The test scheduler allows to test the block device by dispatching
+ * specific requests according to the test case and declare PASS/FAIL
+ * according to the requests completion error code.
+ * Each test is exposed via debugfs and can be triggered by writing to
+ * the debugfs file.
+ *
+ */
+
+/* elevator test iosched */
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/test-iosched.h>
+#include <linux/delay.h>
+#include "blk.h"
+
+#define MODULE_NAME "test-iosched"
+#define WR_RD_START_REQ_ID 1234
+#define UNIQUE_START_REQ_ID 5678
+#define TIMEOUT_TIMER_MS 40000
+#define TEST_MAX_TESTCASE_ROUNDS 15
+
+#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
+#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
+#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
+
+static DEFINE_SPINLOCK(blk_dev_test_list_lock);
+static LIST_HEAD(blk_dev_test_list);
+static struct test_data *ptd;
+
+/* Get the request after `test_rq' in the test requests list */
+static struct test_request *
+latter_test_request(struct request_queue *q,
+				 struct test_request *test_rq)
+{
+	struct test_data *td = q->elevator->elevator_data;
+
+	if (test_rq->queuelist.next == &td->test_queue)
+		return NULL;
+	return list_entry(test_rq->queuelist.next, struct test_request,
+			  queuelist);
+}
+
+/**
+ * test_iosched_get_req_queue() - returns the request queue
+ * served by the scheduler
+ */
+struct request_queue *test_iosched_get_req_queue(void)
+{
+	if (!ptd)
+		return NULL;
+
+	return ptd->req_q;
+}
+EXPORT_SYMBOL(test_iosched_get_req_queue);
+
+/**
+ * test_iosched_mark_test_completion() - Wakeup the debugfs
+ * thread, waiting on the test completion
+ */
+void test_iosched_mark_test_completion(void)
+{
+	if (!ptd)
+		return;
+
+	ptd->test_state = TEST_COMPLETED;
+	wake_up(&ptd->wait_q);
+}
+EXPORT_SYMBOL(test_iosched_mark_test_completion);
+
+/* Check if all the queued test requests were completed */
+static void check_test_completion(void)
+{
+	struct test_request *test_rq;
+	struct request *rq;
+
+	list_for_each_entry(test_rq, &ptd->test_queue, queuelist) {
+		rq = test_rq->rq;
+		if (!test_rq->req_completed)
+			return;
+	}
+
+	test_pr_info("%s: Test is completed", __func__);
+
+	test_iosched_mark_test_completion();
+}
+
+/*
+ * A callback to be called per bio completion.
+ * Frees the bio memory.
+ */
+static void end_test_bio(struct bio *bio, int err)
+{
+	if (err)
+		clear_bit(BIO_UPTODATE, &bio->bi_flags);
+
+	bio_put(bio);
+}
+
+/*
+ * A callback to be called per request completion.
+ * the request memory is not freed here, will be freed later after the test
+ * results checking.
+ */
+static void end_test_req(struct request *rq, int err)
+{
+	struct test_request *test_rq;
+
+	test_rq = (struct test_request *)rq->elv.priv[0];
+	BUG_ON(!test_rq);
+
+	test_pr_info("%s: request %d completed, err=%d",
+	       __func__, test_rq->req_id, err);
+
+	test_rq->req_completed = 1;
+	test_rq->req_result = err;
+
+	check_test_completion();
+}
+
+/**
+ * test_iosched_add_unique_test_req() - Create and queue a non
+ * read/write request (such as FLUSH/DISCRAD/SANITIZE).
+ * @is_err_expcted:	A flag to indicate if this request
+ *			should succeed or not
+ * @req_unique:		The type of request to add
+ * @start_sec:		start address of the first bio
+ * @nr_sects:		number of sectors in the request
+ * @end_req_io:		specific completion callback. When not
+ *			set, the defaulcallback will be used
+ */
+int test_iosched_add_unique_test_req(int is_err_expcted,
+			enum req_unique_type req_unique,
+			int start_sec, int nr_sects, rq_end_io_fn *end_req_io)
+{
+	struct bio *bio;
+	struct request *rq;
+	int rw_flags;
+	struct test_request *test_rq;
+
+	if (!ptd)
+		return -ENODEV;
+
+	bio = bio_alloc(GFP_KERNEL, 0);
+	if (!bio) {
+		test_pr_err("%s: Failed to allocate a bio", __func__);
+		return -ENODEV;
+	}
+	bio_get(bio);
+	bio->bi_end_io = end_test_bio;
+
+	switch (req_unique) {
+	case REQ_UNIQUE_FLUSH:
+		bio->bi_rw = WRITE_FLUSH;
+		break;
+	case REQ_UNIQUE_DISCARD:
+		bio->bi_rw = REQ_WRITE | REQ_DISCARD;
+		bio->bi_size = nr_sects << 9;
+		bio->bi_sector = start_sec;
+		break;
+	default:
+		test_pr_err("%s: Invalid request type %d", __func__,
+			    req_unique);
+		bio_put(bio);
+		return -ENODEV;
+	}
+
+	rw_flags = bio_data_dir(bio);
+	if (bio->bi_rw & REQ_SYNC)
+		rw_flags |= REQ_SYNC;
+
+	rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
+	if (!rq) {
+		test_pr_err("%s: Failed to allocate a request", __func__);
+		bio_put(bio);
+		return -ENODEV;
+	}
+
+	init_request_from_bio(rq, bio);
+	if (end_req_io)
+		rq->end_io = end_req_io;
+	else
+		rq->end_io = end_test_req;
+
+	test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
+	if (!test_rq) {
+		test_pr_err("%s: Failed to allocate a test request", __func__);
+		bio_put(bio);
+		blk_put_request(rq);
+		return -ENODEV;
+	}
+	test_rq->req_completed = 0;
+	test_rq->req_result = -1;
+	test_rq->rq = rq;
+	test_rq->is_err_expected = is_err_expcted;
+	rq->elv.priv[0] = (void *)test_rq;
+	test_rq->req_id = ptd->unique_next_req_id++;
+
+	test_pr_debug(
+		"%s: added request %d to the test requests list, type = %d",
+		__func__, test_rq->req_id, req_unique);
+
+	list_add_tail(&test_rq->queuelist, &ptd->test_queue);
+
+	return 0;
+}
+EXPORT_SYMBOL(test_iosched_add_unique_test_req);
+
+/*
+ * Get a pattern to be filled in the request data buffer.
+ * If the pattern used is (-1) the buffer will be filled with sequential
+ * numbers
+ */
+static void fill_buf_with_pattern(int *buf, int num_bytes, int pattern)
+{
+	int i = 0;
+	int num_of_dwords = num_bytes/sizeof(int);
+
+	if (pattern == TEST_NO_PATTERN)
+		return;
+
+	/* num_bytes should be aligned to sizeof(int) */
+	BUG_ON((num_bytes % sizeof(int)) != 0);
+
+	if (pattern == TEST_PATTERN_SEQUENTIAL) {
+		for (i = 0; i < num_of_dwords; i++)
+			buf[i] = i;
+	} else {
+		for (i = 0; i < num_of_dwords; i++)
+			buf[i] = pattern;
+	}
+}
+
+/**
+ * test_iosched_add_wr_rd_test_req() - Create and queue a
+ * read/write request.
+ * @is_err_expcted:	A flag to indicate if this request
+ *			should succeed or not
+ * @direction:		READ/WRITE
+ * @start_sec:		start address of the first bio
+ * @num_bios:		number of BIOs to be allocated for the
+ *			request
+ * @pattern:		A pattern, to be written into the write
+ *			requests data buffer. In case of READ
+ *			request, the given pattern is kept as
+ *			the expected pattern. The expected
+ *			pattern will be compared in the test
+ *			check result function. If no comparisson
+ *			is required, set pattern to
+ *			TEST_NO_PATTERN.
+ * @end_req_io:		specific completion callback. When not
+ *			set,the default callback will be used
+ *
+ * This function allocates the test request and the block
+ * request and calls blk_rq_map_kern which allocates the
+ * required BIO. The allocated test request and the block
+ * request memory is freed at the end of the test and the
+ * allocated BIO memory is freed by end_test_bio.
+ */
+int test_iosched_add_wr_rd_test_req(int is_err_expcted,
+		      int direction, int start_sec,
+		      int num_bios, int pattern, rq_end_io_fn *end_req_io)
+{
+	struct request *rq = NULL;
+	struct test_request *test_rq = NULL;
+	int rw_flags = 0;
+	int buf_size = 0;
+	int ret = 0, i = 0;
+	unsigned int *bio_ptr = NULL;
+	struct bio *bio = NULL;
+
+	if (!ptd)
+		return -ENODEV;
+
+	rw_flags = direction;
+
+	rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
+	if (!rq) {
+		test_pr_err("%s: Failed to allocate a request", __func__);
+		return -ENODEV;
+	}
+
+	test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
+	if (!test_rq) {
+		test_pr_err("%s: Failed to allocate test request", __func__);
+		blk_put_request(rq);
+		return -ENODEV;
+	}
+
+	buf_size = sizeof(unsigned int) * BIO_U32_SIZE * num_bios;
+	test_rq->bios_buffer = kzalloc(buf_size, GFP_KERNEL);
+	if (!test_rq->bios_buffer) {
+		test_pr_err("%s: Failed to allocate the data buf", __func__);
+		goto err;
+	}
+	test_rq->buf_size = buf_size;
+
+	if (direction == WRITE)
+		fill_buf_with_pattern(test_rq->bios_buffer,
+						   buf_size, pattern);
+	test_rq->wr_rd_data_pattern = pattern;
+
+	bio_ptr = test_rq->bios_buffer;
+	for (i = 0; i < num_bios; ++i) {
+		ret = blk_rq_map_kern(ptd->req_q, rq,
+				      (void *)bio_ptr,
+				      sizeof(unsigned int)*BIO_U32_SIZE,
+				      GFP_KERNEL);
+		if (ret) {
+			test_pr_err("%s: blk_rq_map_kern returned error %d",
+				    __func__, ret);
+			goto err;
+		}
+		bio_ptr += BIO_U32_SIZE;
+	}
+
+	if (end_req_io)
+		rq->end_io = end_req_io;
+	else
+		rq->end_io = end_test_req;
+	rq->__sector = start_sec;
+	rq->cmd_type |= REQ_TYPE_FS;
+
+	if (rq->bio) {
+		rq->bio->bi_sector = start_sec;
+		rq->bio->bi_end_io = end_test_bio;
+		bio = rq->bio;
+		while ((bio = bio->bi_next) != NULL)
+			bio->bi_end_io = end_test_bio;
+	}
+
+	ptd->num_of_write_bios += num_bios;
+	test_rq->req_id = ptd->wr_rd_next_req_id++;
+
+	test_rq->req_completed = 0;
+	test_rq->req_result = -1;
+	test_rq->rq = rq;
+	test_rq->is_err_expected = is_err_expcted;
+	rq->elv.priv[0] = (void *)test_rq;
+
+	test_pr_debug(
+		"%s: added request %d to the test requests list, buf_size=%d",
+		__func__, test_rq->req_id, buf_size);
+
+	list_add_tail(&test_rq->queuelist, &ptd->test_queue);
+
+	return 0;
+err:
+	blk_put_request(rq);
+	kfree(test_rq->bios_buffer);
+	return -ENODEV;
+}
+EXPORT_SYMBOL(test_iosched_add_wr_rd_test_req);
+
+/* Converts the testcase number into a string */
+static char *get_test_case_str(struct test_data *td)
+{
+	if (td->test_info.get_test_case_str_fn)
+		return td->test_info.get_test_case_str_fn(td);
+
+	return "Unknown testcase";
+}
+
+/*
+ * Verify that the test request data buffer includes the expected
+ * pattern
+ */
+static int compare_buffer_to_pattern(struct test_request *test_rq)
+{
+	int i = 0;
+	int num_of_dwords = test_rq->buf_size/sizeof(int);
+
+	/* num_bytes should be aligned to sizeof(int) */
+	BUG_ON((test_rq->buf_size % sizeof(int)) != 0);
+	BUG_ON(test_rq->bios_buffer == NULL);
+
+	if (test_rq->wr_rd_data_pattern == TEST_NO_PATTERN)
+		return 0;
+
+	if (test_rq->wr_rd_data_pattern == TEST_PATTERN_SEQUENTIAL) {
+		for (i = 0; i < num_of_dwords; i++) {
+			if (test_rq->bios_buffer[i] != i) {
+				test_pr_err(
+					"%s: wrong pattern 0x%x in index %d",
+					__func__, test_rq->bios_buffer[i], i);
+				return -EINVAL;
+			}
+		}
+	} else {
+		for (i = 0; i < num_of_dwords; i++) {
+			if (test_rq->bios_buffer[i] !=
+			    test_rq->wr_rd_data_pattern) {
+				test_pr_err(
+					"%s: wrong pattern 0x%x in index %d",
+					__func__, test_rq->bios_buffer[i], i);
+				return -EINVAL;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Determine if the test passed or failed.
+ * The function checks the test request completion value and calls
+ * check_testcase_result for result checking that are specific
+ * to a test case.
+ */
+static int check_test_result(struct test_data *td)
+{
+	struct test_request *test_rq;
+	struct request *rq;
+	int res = 0;
+	static int run;
+
+	list_for_each_entry(test_rq, &ptd->test_queue, queuelist) {
+		rq = test_rq->rq;
+		if (!test_rq->req_completed) {
+			test_pr_err("%s: rq %d not completed", __func__,
+				    test_rq->req_id);
+			res = -EINVAL;
+			goto err;
+		}
+
+		if ((test_rq->req_result < 0) && !test_rq->is_err_expected) {
+			test_pr_err(
+				"%s: rq %d completed with err, not as expected",
+				__func__, test_rq->req_id);
+			res = -EINVAL;
+			goto err;
+		}
+		if ((test_rq->req_result == 0) && test_rq->is_err_expected) {
+			test_pr_err("%s: rq %d succeeded, not as expected",
+				    __func__, test_rq->req_id);
+			res = -EINVAL;
+			goto err;
+		}
+		if (rq_data_dir(test_rq->rq) == READ) {
+			res = compare_buffer_to_pattern(test_rq);
+			if (res) {
+				test_pr_err("%s: read pattern not as expected",
+					    __func__);
+				res = -EINVAL;
+				goto err;
+			}
+		}
+	}
+
+	if (td->test_info.check_test_result_fn) {
+		res = td->test_info.check_test_result_fn(td);
+		if (res)
+			goto err;
+	}
+
+	test_pr_info("%s: %s, run# %03d, PASSED",
+			    __func__, get_test_case_str(td), ++run);
+	td->test_result = TEST_PASSED;
+
+	return 0;
+err:
+	test_pr_err("%s: %s, run# %03d, FAILED",
+		    __func__, get_test_case_str(td), ++run);
+	td->test_result = TEST_FAILED;
+	return res;
+}
+
+/* Create and queue the required requests according to the test case */
+static int prepare_test(struct test_data *td)
+{
+	int ret = 0;
+
+	if (td->test_info.prepare_test_fn) {
+		ret = td->test_info.prepare_test_fn(td);
+		return ret;
+	}
+
+	return 0;
+}
+
+/* Run the test */
+static int run_test(struct test_data *td)
+{
+	int ret = 0;
+
+	if (td->test_info.run_test_fn) {
+		ret = td->test_info.run_test_fn(td);
+		return ret;
+	}
+
+	/*
+	 * Set the next_req pointer to the first request in the test requests
+	 * list
+	 */
+	if (!list_empty(&td->test_queue))
+		td->next_req = list_entry(td->test_queue.next,
+					  struct test_request, queuelist);
+	__blk_run_queue(td->req_q);
+
+	return 0;
+}
+
+/* Free the allocated test requests, their requests and BIOs buffer */
+static void free_test_requests(struct test_data *td)
+{
+	struct test_request *test_rq;
+	while (!list_empty(&td->test_queue)) {
+		test_rq = list_entry(td->test_queue.next, struct test_request,
+				     queuelist);
+		list_del_init(&test_rq->queuelist);
+		blk_put_request(test_rq->rq);
+		kfree(test_rq->bios_buffer);
+		kfree(test_rq);
+	}
+}
+
+/*
+ * Do post test operations.
+ * Free the allocated test requests, their requests and BIOs buffer.
+ */
+static int post_test(struct test_data *td)
+{
+	int ret = 0;
+
+	if (td->test_info.post_test_fn)
+		ret = td->test_info.post_test_fn(td);
+
+	ptd->test_info.testcase = 0;
+	ptd->test_state = TEST_IDLE;
+
+	free_test_requests(td);
+
+	return ret;
+}
+
+/*
+ * The timer verifies that the test will be completed even if we don't get
+ * the completion callback for all the requests.
+ */
+static void test_timeout_handler(unsigned long data)
+{
+	struct test_data *td = (struct test_data *)data;
+
+	test_pr_info("%s: TIMEOUT timer expired", __func__);
+	td->test_state = TEST_COMPLETED;
+	wake_up(&td->wait_q);
+	return;
+}
+
+static unsigned int get_timeout_msec(struct test_data *td)
+{
+	if (td->test_info.timeout_msec)
+		return td->test_info.timeout_msec;
+	else
+		return TIMEOUT_TIMER_MS;
+}
+
+/**
+ * test_iosched_start_test() - Prepares and runs the test.
+ * @t_info:	the current test testcase and callbacks
+ *		functions
+ *
+ * The function also checks the test result upon test completion
+ */
+int test_iosched_start_test(struct test_info *t_info)
+{
+	int ret = 0;
+	unsigned timeout_msec;
+	int counter = 0;
+	char *test_name = NULL;
+
+	if (!ptd)
+		return -ENODEV;
+
+	if (!t_info) {
+		ptd->test_result = TEST_FAILED;
+		return -EINVAL;
+	}
+
+	do {
+		if (ptd->ignore_round)
+			/*
+			 * We ignored the last run due to FS write requests.
+			 * Sleep to allow those requests to be issued
+			 */
+			msleep(2000);
+
+		spin_lock(&ptd->lock);
+
+		if (ptd->test_state != TEST_IDLE) {
+			test_pr_info(
+				"%s: Another test is running, try again later",
+				__func__);
+			return -EINVAL;
+		}
+
+		if (ptd->start_sector == 0) {
+			test_pr_err("%s: Invalid start sector", __func__);
+			ptd->test_result = TEST_FAILED;
+			spin_unlock(&ptd->lock);
+			return -EINVAL;
+		}
+
+		memcpy(&ptd->test_info, t_info, sizeof(struct test_info));
+
+		ptd->next_req = NULL;
+		ptd->test_result = TEST_NO_RESULT;
+		ptd->num_of_write_bios = 0;
+
+		ptd->unique_next_req_id = UNIQUE_START_REQ_ID;
+		ptd->wr_rd_next_req_id = WR_RD_START_REQ_ID;
+
+		ptd->ignore_round = false;
+		ptd->fs_wr_reqs_during_test = false;
+
+		ptd->test_state = TEST_RUNNING;
+
+		spin_unlock(&ptd->lock);
+
+		timeout_msec = get_timeout_msec(ptd);
+		mod_timer(&ptd->timeout_timer, jiffies +
+			  msecs_to_jiffies(timeout_msec));
+
+		if (ptd->test_info.get_test_case_str_fn)
+			test_name = ptd->test_info.get_test_case_str_fn(ptd);
+		else
+			test_name = "Unknown testcase";
+		test_pr_info("%s: Starting test %s\n", __func__, test_name);
+
+		ret = prepare_test(ptd);
+		if (ret) {
+			test_pr_err("%s: failed to prepare the test\n",
+				    __func__);
+			goto error;
+		}
+
+		ret = run_test(ptd);
+		if (ret) {
+			test_pr_err("%s: failed to run the test\n", __func__);
+			goto error;
+		}
+
+		test_pr_info("%s: Waiting for the test completion", __func__);
+
+		wait_event(ptd->wait_q, ptd->test_state == TEST_COMPLETED);
+		del_timer_sync(&ptd->timeout_timer);
+
+		ret = check_test_result(ptd);
+		if (ret) {
+			test_pr_err("%s: check_test_result failed\n",
+				    __func__);
+			goto error;
+		}
+
+		ret = post_test(ptd);
+		if (ret) {
+			test_pr_err("%s: post_test failed\n", __func__);
+			goto error;
+		}
+
+		/*
+		 * Wakeup the queue thread to fetch FS requests that might got
+		 * postponded due to the test
+		 */
+		__blk_run_queue(ptd->req_q);
+
+		if (ptd->ignore_round)
+			test_pr_info(
+			"%s: Round canceled (Got wr reqs in the middle)",
+			__func__);
+
+		if (++counter == TEST_MAX_TESTCASE_ROUNDS) {
+			test_pr_info("%s: Too many rounds, did not succeed...",
+			     __func__);
+			ptd->test_result = TEST_FAILED;
+		}
+
+	} while ((ptd->ignore_round) && (counter < TEST_MAX_TESTCASE_ROUNDS));
+
+	if (ptd->test_result == TEST_PASSED)
+		return 0;
+	else
+		return -EINVAL;
+
+error:
+	ptd->test_result = TEST_FAILED;
+	ptd->test_info.testcase = 0;
+	post_test(ptd);
+	return ret;
+}
+EXPORT_SYMBOL(test_iosched_start_test);
+
+/**
+ * test_iosched_register() - register a block device test
+ * utility.
+ * @bdt:	the block device test type to register
+ */
+void test_iosched_register(struct blk_dev_test_type *bdt)
+{
+	spin_lock(&blk_dev_test_list_lock);
+	list_add_tail(&bdt->list, &blk_dev_test_list);
+	spin_unlock(&blk_dev_test_list_lock);
+}
+EXPORT_SYMBOL_GPL(test_iosched_register);
+
+/**
+ * test_iosched_unregister() - unregister a block device test
+ * utility.
+ * @bdt:	the block device test type to unregister
+ */
+void test_iosched_unregister(struct blk_dev_test_type *bdt)
+{
+	spin_lock(&blk_dev_test_list_lock);
+	list_del_init(&bdt->list);
+	spin_unlock(&blk_dev_test_list_lock);
+}
+EXPORT_SYMBOL_GPL(test_iosched_unregister);
+
+/**
+ * test_iosched_set_test_result() - Set the test
+ * result(PASS/FAIL)
+ * @test_result:	the test result
+ */
+void test_iosched_set_test_result(int test_result)
+{
+	if (!ptd)
+		return;
+
+	ptd->test_result = test_result;
+}
+EXPORT_SYMBOL(test_iosched_set_test_result);
+
+
+/**
+ * test_iosched_set_ignore_round() - Set the ignore_round flag
+ * @ignore_round:	A flag to indicate if this test round
+ * should be ignored and re-run
+ */
+void test_iosched_set_ignore_round(bool ignore_round)
+{
+	if (!ptd)
+		return;
+
+	ptd->ignore_round = ignore_round;
+}
+EXPORT_SYMBOL(test_iosched_set_ignore_round);
+
+/**
+ * test_iosched_get_debugfs_tests_root() - returns the root
+ * debugfs directory for the test_iosched tests
+ */
+struct dentry *test_iosched_get_debugfs_tests_root(void)
+{
+	if (!ptd)
+		return NULL;
+
+	return ptd->debug.debug_tests_root;
+}
+EXPORT_SYMBOL(test_iosched_get_debugfs_tests_root);
+
+/**
+ * test_iosched_get_debugfs_utils_root() - returns the root
+ * debugfs directory for the test_iosched utils
+ */
+struct dentry *test_iosched_get_debugfs_utils_root(void)
+{
+	if (!ptd)
+		return NULL;
+
+	return ptd->debug.debug_utils_root;
+}
+EXPORT_SYMBOL(test_iosched_get_debugfs_utils_root);
+
+static int test_debugfs_init(struct test_data *td)
+{
+	td->debug.debug_root = debugfs_create_dir("test-iosched", NULL);
+	if (!td->debug.debug_root)
+		return -ENOENT;
+
+	td->debug.debug_tests_root = debugfs_create_dir("tests",
+							td->debug.debug_root);
+	if (!td->debug.debug_tests_root)
+		goto err;
+
+	td->debug.debug_utils_root = debugfs_create_dir("utils",
+							td->debug.debug_root);
+	if (!td->debug.debug_utils_root)
+		goto err;
+
+	td->debug.debug_test_result = debugfs_create_u32(
+					"test_result",
+					S_IRUGO | S_IWUGO,
+					td->debug.debug_utils_root,
+					&td->test_result);
+	if (!td->debug.debug_test_result)
+		goto err;
+
+	td->debug.start_sector = debugfs_create_u32(
+					"start_sector",
+					S_IRUGO | S_IWUGO,
+					td->debug.debug_utils_root,
+					&td->start_sector);
+	if (!td->debug.start_sector)
+		goto err;
+
+	return 0;
+
+err:
+	debugfs_remove_recursive(td->debug.debug_root);
+	return -ENOENT;
+}
+
+static void test_debugfs_cleanup(struct test_data *td)
+{
+	debugfs_remove_recursive(td->debug.debug_root);
+}
+
+static void print_req(struct request *req)
+{
+	struct bio *bio;
+	struct test_request *test_rq;
+
+	if (!req)
+		return;
+
+	test_rq = (struct test_request *)req->elv.priv[0];
+
+	if (test_rq) {
+		test_pr_debug("%s: Dispatch request %d: __sector=0x%lx",
+		       __func__, test_rq->req_id, (unsigned long)req->__sector);
+		test_pr_debug("%s: nr_phys_segments=%d, num_of_sectors=%d",
+		       __func__, req->nr_phys_segments, blk_rq_sectors(req));
+		bio = req->bio;
+		test_pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
+			      __func__, bio->bi_size,
+			      (unsigned long)bio->bi_sector);
+		while ((bio = bio->bi_next) != NULL) {
+			test_pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
+				      __func__, bio->bi_size,
+				      (unsigned long)bio->bi_sector);
+		}
+	}
+}
+
+static void test_merged_requests(struct request_queue *q,
+			 struct request *rq, struct request *next)
+{
+	list_del_init(&next->queuelist);
+}
+
+/*
+ * Dispatch a test request in case there is a running test Otherwise, dispatch
+ * a request that was queued by the FS to keep the card functional.
+ */
+static int test_dispatch_requests(struct request_queue *q, int force)
+{
+	struct test_data *td = q->elevator->elevator_data;
+	struct request *rq = NULL;
+
+	switch (td->test_state) {
+	case TEST_IDLE:
+		if (!list_empty(&td->queue)) {
+			rq = list_entry(td->queue.next, struct request,
+					queuelist);
+			list_del_init(&rq->queuelist);
+			elv_dispatch_sort(q, rq);
+			return 1;
+		}
+		break;
+	case TEST_RUNNING:
+		if (td->next_req) {
+			rq = td->next_req->rq;
+			td->next_req =
+				latter_test_request(td->req_q, td->next_req);
+			if (!rq)
+				return 0;
+			print_req(rq);
+			elv_dispatch_sort(q, rq);
+			return 1;
+		}
+		break;
+	case TEST_COMPLETED:
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static void test_add_request(struct request_queue *q, struct request *rq)
+{
+	struct test_data *td = q->elevator->elevator_data;
+
+	list_add_tail(&rq->queuelist, &td->queue);
+
+	/*
+	 * The write requests can be followed by a FLUSH request that might
+	 * cause unexpected results of the test.
+	 */
+	if ((rq_data_dir(rq) == WRITE) && (td->test_state == TEST_RUNNING)) {
+		test_pr_debug("%s: got WRITE req in the middle of the test",
+			__func__);
+		td->fs_wr_reqs_during_test = true;
+	}
+}
+
+static struct request *
+test_former_request(struct request_queue *q, struct request *rq)
+{
+	struct test_data *td = q->elevator->elevator_data;
+
+	if (rq->queuelist.prev == &td->queue)
+		return NULL;
+	return list_entry(rq->queuelist.prev, struct request, queuelist);
+}
+
+static struct request *
+test_latter_request(struct request_queue *q, struct request *rq)
+{
+	struct test_data *td = q->elevator->elevator_data;
+
+	if (rq->queuelist.next == &td->queue)
+		return NULL;
+	return list_entry(rq->queuelist.next, struct request, queuelist);
+}
+
+static void *test_init_queue(struct request_queue *q)
+{
+	struct blk_dev_test_type *__bdt;
+
+	ptd = kmalloc_node(sizeof(struct test_data), GFP_KERNEL,
+			     q->node);
+	if (!ptd) {
+		test_pr_err("%s: failed to allocate test data", __func__);
+		return NULL;
+	}
+	memset((void *)ptd, 0, sizeof(struct test_data));
+	INIT_LIST_HEAD(&ptd->queue);
+	INIT_LIST_HEAD(&ptd->test_queue);
+	init_waitqueue_head(&ptd->wait_q);
+	ptd->req_q = q;
+
+	setup_timer(&ptd->timeout_timer, test_timeout_handler,
+		    (unsigned long)ptd);
+
+	spin_lock_init(&ptd->lock);
+
+	if (test_debugfs_init(ptd)) {
+		test_pr_err("%s: Failed to create debugfs files", __func__);
+		return NULL;
+	}
+
+	list_for_each_entry(__bdt, &blk_dev_test_list, list)
+		__bdt->init_fn();
+
+	return ptd;
+}
+
+static void test_exit_queue(struct elevator_queue *e)
+{
+	struct test_data *td = e->elevator_data;
+	struct blk_dev_test_type *__bdt;
+
+	BUG_ON(!list_empty(&td->queue));
+
+	list_for_each_entry(__bdt, &blk_dev_test_list, list)
+		__bdt->exit_fn();
+
+	test_debugfs_cleanup(td);
+
+	kfree(td);
+}
+
+static struct elevator_type elevator_test_iosched = {
+	.ops = {
+		.elevator_merge_req_fn = test_merged_requests,
+		.elevator_dispatch_fn = test_dispatch_requests,
+		.elevator_add_req_fn = test_add_request,
+		.elevator_former_req_fn = test_former_request,
+		.elevator_latter_req_fn = test_latter_request,
+		.elevator_init_fn = test_init_queue,
+		.elevator_exit_fn = test_exit_queue,
+	},
+	.elevator_name = "test-iosched",
+	.elevator_owner = THIS_MODULE,
+};
+
+static int __init test_init(void)
+{
+	elv_register(&elevator_test_iosched);
+
+	return 0;
+}
+
+static void __exit test_exit(void)
+{
+	elv_unregister(&elevator_test_iosched);
+}
+
+module_init(test_init);
+module_exit(test_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Test IO scheduler");
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 286a4d4..a73d713 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -148,4 +148,6 @@
 
 source "drivers/gud/Kconfig"
 
+source "drivers/coresight/Kconfig"
+
 endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index bea505c..bd18a62 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -140,3 +140,5 @@
 
 #MobiCore
 obj-$(CONFIG_MOBICORE_SUPPORT)  += gud/
+
+obj-$(CONFIG_MSM_QDSS)		+= coresight/
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 4201aba..0b92897 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -232,4 +232,94 @@
          Provides a user space API to the sw sync object.
          *WARNING* improper use of this can result in deadlocking kernel
 	 drivers from userspace.
+
+config CMA
+	bool "Contiguous Memory Allocator (EXPERIMENTAL)"
+	depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL
+	select MIGRATION
+	help
+	  This enables the Contiguous Memory Allocator which allows drivers
+	  to allocate big physically-contiguous blocks of memory for use with
+	  hardware components that do not support I/O map nor scatter-gather.
+
+	  For more information see <include/linux/dma-contiguous.h>.
+	  If unsure, say "n".
+
+if CMA
+
+config CMA_DEBUG
+	bool "CMA debug messages (DEVELOPMENT)"
+	depends on DEBUG_KERNEL
+	help
+	  Turns on debug messages in CMA.  This produces KERN_DEBUG
+	  messages for every CMA call as well as various messages while
+	  processing calls such as dma_alloc_from_contiguous().
+	  This option does not affect warning and error messages.
+
+comment "Default contiguous memory area size:"
+
+config CMA_SIZE_MBYTES
+	int "Size in Mega Bytes"
+	depends on !CMA_SIZE_SEL_PERCENTAGE
+	default 16
+	help
+	  Defines the size (in MiB) of the default memory area for Contiguous
+	  Memory Allocator.
+
+config CMA_SIZE_PERCENTAGE
+	int "Percentage of total memory"
+	depends on !CMA_SIZE_SEL_MBYTES
+	default 10
+	help
+	  Defines the size of the default memory area for Contiguous Memory
+	  Allocator as a percentage of the total memory in the system.
+
+choice
+	prompt "Selected region size"
+	default CMA_SIZE_SEL_ABSOLUTE
+
+config CMA_SIZE_SEL_MBYTES
+	bool "Use mega bytes value only"
+
+config CMA_SIZE_SEL_PERCENTAGE
+	bool "Use percentage value only"
+
+config CMA_SIZE_SEL_MIN
+	bool "Use lower value (minimum)"
+
+config CMA_SIZE_SEL_MAX
+	bool "Use higher value (maximum)"
+
+endchoice
+
+config CMA_ALIGNMENT
+	int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
+	range 4 9
+	default 8
+	help
+	  DMA mapping framework by default aligns all buffers to the smallest
+	  PAGE_SIZE order which is greater than or equal to the requested buffer
+	  size. This works well for buffers up to a few hundreds kilobytes, but
+	  for larger buffers it just a memory waste. With this parameter you can
+	  specify the maximum PAGE_SIZE order for contiguous buffers. Larger
+	  buffers will be aligned only to this specified order. The order is
+	  expressed as a power of two multiplied by the PAGE_SIZE.
+
+	  For example, if your system defaults to 4KiB pages, the order value
+	  of 8 means that the buffers will be aligned up to 1MiB only.
+
+	  If unsure, leave the default value "8".
+
+config CMA_AREAS
+	int "Maximum count of the CMA device-private areas"
+	default 7
+	help
+	  CMA allows to create CMA areas for particular devices. This parameter
+	  sets the maximum number of such device private CMA areas in the
+	  system.
+
+	  If unsure, leave the default value "7".
+
+endif
+
 endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index f81ab90..cb4528f 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,6 +6,7 @@
 			   attribute_container.o transport_class.o \
 			   topology.o sys.o
 obj-$(CONFIG_DEVTMPFS)	+= devtmpfs.o
+obj-$(CONFIG_CMA) += dma-contiguous.o
 obj-y			+= power/
 obj-$(CONFIG_HAS_DMA)	+= dma-mapping.o
 obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
new file mode 100644
index 0000000..78efb03
--- /dev/null
+++ b/drivers/base/dma-contiguous.c
@@ -0,0 +1,401 @@
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ *	Marek Szyprowski <m.szyprowski@samsung.com>
+ *	Michal Nazarewicz <mina86@mina86.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+#define pr_fmt(fmt) "cma: " fmt
+
+#ifdef CONFIG_CMA_DEBUG
+#ifndef DEBUG
+#  define DEBUG
+#endif
+#endif
+
+#include <asm/page.h>
+#include <asm/dma-contiguous.h>
+
+#include <linux/memblock.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/page-isolation.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/mm_types.h>
+#include <linux/dma-contiguous.h>
+
+#ifndef SZ_1M
+#define SZ_1M (1 << 20)
+#endif
+
+struct cma {
+	unsigned long	base_pfn;
+	unsigned long	count;
+	unsigned long	*bitmap;
+};
+
+struct cma *dma_contiguous_default_area;
+
+#ifdef CONFIG_CMA_SIZE_MBYTES
+#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
+#else
+#define CMA_SIZE_MBYTES 0
+#endif
+
+/*
+ * Default global CMA area size can be defined in kernel's .config.
+ * This is usefull mainly for distro maintainers to create a kernel
+ * that works correctly for most supported systems.
+ * The size can be set in bytes or as a percentage of the total memory
+ * in the system.
+ *
+ * Users, who want to set the size of global CMA area for their system
+ * should use cma= kernel parameter.
+ */
+static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M;
+static long size_cmdline = -1;
+
+static int __init early_cma(char *p)
+{
+	pr_debug("%s(%s)\n", __func__, p);
+	size_cmdline = memparse(p, &p);
+	return 0;
+}
+early_param("cma", early_cma);
+
+#ifdef CONFIG_CMA_SIZE_PERCENTAGE
+
+static unsigned long __init __maybe_unused cma_early_percent_memory(void)
+{
+	struct memblock_region *reg;
+	unsigned long total_pages = 0;
+
+	/*
+	 * We cannot use memblock_phys_mem_size() here, because
+	 * memblock_analyze() has not been called yet.
+	 */
+	for_each_memblock(memory, reg)
+		total_pages += memblock_region_memory_end_pfn(reg) -
+			       memblock_region_memory_base_pfn(reg);
+
+	return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
+}
+
+#else
+
+static inline __maybe_unused unsigned long cma_early_percent_memory(void)
+{
+	return 0;
+}
+
+#endif
+
+/**
+ * dma_contiguous_reserve() - reserve area for contiguous memory handling
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory.
+ */
+void __init dma_contiguous_reserve(phys_addr_t limit)
+{
+	unsigned long selected_size = 0;
+
+	pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
+
+	if (size_cmdline != -1) {
+		selected_size = size_cmdline;
+	} else {
+#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
+		selected_size = size_bytes;
+#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
+		selected_size = cma_early_percent_memory();
+#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
+		selected_size = min(size_bytes, cma_early_percent_memory());
+#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
+		selected_size = max(size_bytes, cma_early_percent_memory());
+#endif
+	}
+
+	if (selected_size) {
+		pr_debug("%s: reserving %ld MiB for global area\n", __func__,
+			 selected_size / SZ_1M);
+
+		dma_declare_contiguous(NULL, selected_size, 0, limit);
+	}
+};
+
+static DEFINE_MUTEX(cma_mutex);
+
+static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
+{
+	unsigned long pfn = base_pfn;
+	unsigned i = count >> pageblock_order;
+	struct zone *zone;
+
+	WARN_ON_ONCE(!pfn_valid(pfn));
+	zone = page_zone(pfn_to_page(pfn));
+
+	do {
+		unsigned j;
+		base_pfn = pfn;
+		for (j = pageblock_nr_pages; j; --j, pfn++) {
+			WARN_ON_ONCE(!pfn_valid(pfn));
+			if (page_zone(pfn_to_page(pfn)) != zone)
+				return -EINVAL;
+		}
+		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
+	} while (--i);
+	return 0;
+}
+
+static __init struct cma *cma_create_area(unsigned long base_pfn,
+				     unsigned long count)
+{
+	int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+	struct cma *cma;
+	int ret = -ENOMEM;
+
+	pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
+
+	cma = kmalloc(sizeof *cma, GFP_KERNEL);
+	if (!cma)
+		return ERR_PTR(-ENOMEM);
+
+	cma->base_pfn = base_pfn;
+	cma->count = count;
+	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+
+	if (!cma->bitmap)
+		goto no_mem;
+
+	ret = cma_activate_area(base_pfn, count);
+	if (ret)
+		goto error;
+
+	pr_debug("%s: returned %p\n", __func__, (void *)cma);
+	return cma;
+
+error:
+	kfree(cma->bitmap);
+no_mem:
+	kfree(cma);
+	return ERR_PTR(ret);
+}
+
+static struct cma_reserved {
+	phys_addr_t start;
+	unsigned long size;
+	struct device *dev;
+} cma_reserved[MAX_CMA_AREAS] __initdata;
+static unsigned cma_reserved_count __initdata;
+
+static int __init cma_init_reserved_areas(void)
+{
+	struct cma_reserved *r = cma_reserved;
+	unsigned i = cma_reserved_count;
+
+	pr_debug("%s()\n", __func__);
+
+	for (; i; --i, ++r) {
+		struct cma *cma;
+		cma = cma_create_area(PFN_DOWN(r->start),
+				      r->size >> PAGE_SHIFT);
+		if (!IS_ERR(cma))
+			dev_set_cma_area(r->dev, cma);
+	}
+	return 0;
+}
+core_initcall(cma_init_reserved_areas);
+
+/**
+ * dma_declare_contiguous() - reserve area for contiguous memory handling
+ *			      for particular device
+ * @dev:   Pointer to device structure.
+ * @size:  Size of the reserved memory.
+ * @base:  Start address of the reserved memory (optional, 0 for any).
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory for specified device. It should be
+ * called by board specific code when early allocator (memblock or bootmem)
+ * is still activate.
+ */
+int __init dma_declare_contiguous(struct device *dev, unsigned long size,
+				  phys_addr_t base, phys_addr_t limit)
+{
+	struct cma_reserved *r = &cma_reserved[cma_reserved_count];
+	unsigned long alignment;
+
+	pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
+		 (unsigned long)size, (unsigned long)base,
+		 (unsigned long)limit);
+
+	/* Sanity checks */
+	if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
+		pr_err("Not enough slots for CMA reserved regions!\n");
+		return -ENOSPC;
+	}
+
+	if (!size)
+		return -EINVAL;
+
+	/* Sanitise input arguments */
+	alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
+	base = ALIGN(base, alignment);
+	size = ALIGN(size, alignment);
+	limit &= ~(alignment - 1);
+
+	/* Reserve memory */
+	if (base) {
+		if (memblock_is_region_reserved(base, size) ||
+		    memblock_reserve(base, size) < 0) {
+			base = -EBUSY;
+			goto err;
+		}
+	} else {
+		/*
+		 * Use __memblock_alloc_base() since
+		 * memblock_alloc_base() panic()s.
+		 */
+		phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
+		if (!addr) {
+			base = -ENOMEM;
+			goto err;
+		} else if (addr + size > ~(unsigned long)0) {
+			memblock_free(addr, size);
+			base = -EINVAL;
+			goto err;
+		} else {
+			base = addr;
+		}
+	}
+
+	/*
+	 * Each reserved area must be initialised later, when more kernel
+	 * subsystems (like slab allocator) are available.
+	 */
+	r->start = base;
+	r->size = size;
+	r->dev = dev;
+	cma_reserved_count++;
+	pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
+		(unsigned long)base);
+
+	/* Architecture specific contiguous memory fixup. */
+	dma_contiguous_early_fixup(base, size);
+	return 0;
+err:
+	pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
+	return base;
+}
+
+/**
+ * dma_alloc_from_contiguous() - allocate pages from contiguous area
+ * @dev:   Pointer to device for which the allocation is performed.
+ * @count: Requested number of pages.
+ * @align: Requested alignment of pages (in PAGE_SIZE order).
+ *
+ * This function allocates memory buffer for specified device. It uses
+ * device specific contiguous memory area if available or the default
+ * global one. Requires architecture specific get_dev_cma_area() helper
+ * function.
+ */
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+				       unsigned int align)
+{
+	unsigned long mask, pfn, pageno, start = 0;
+	struct cma *cma = dev_get_cma_area(dev);
+	int ret;
+
+	if (!cma || !cma->count)
+		return NULL;
+
+	if (align > CONFIG_CMA_ALIGNMENT)
+		align = CONFIG_CMA_ALIGNMENT;
+
+	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
+		 count, align);
+
+	if (!count)
+		return NULL;
+
+	mask = (1 << align) - 1;
+
+	mutex_lock(&cma_mutex);
+
+	for (;;) {
+		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
+						    start, count, mask);
+		if (pageno >= cma->count) {
+			ret = -ENOMEM;
+			goto error;
+		}
+
+		pfn = cma->base_pfn + pageno;
+		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
+		if (ret == 0) {
+			bitmap_set(cma->bitmap, pageno, count);
+			break;
+		} else if (ret != -EBUSY) {
+			goto error;
+		}
+		pr_debug("%s(): memory range at %p is busy, retrying\n",
+			 __func__, pfn_to_page(pfn));
+		/* try again with a bit different memory target */
+		start = pageno + mask + 1;
+	}
+
+	mutex_unlock(&cma_mutex);
+
+	pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
+	return pfn_to_page(pfn);
+error:
+	mutex_unlock(&cma_mutex);
+	return NULL;
+}
+
+/**
+ * dma_release_from_contiguous() - release allocated pages
+ * @dev:   Pointer to device for which the pages were allocated.
+ * @pages: Allocated pages.
+ * @count: Number of allocated pages.
+ *
+ * This function releases memory allocated by dma_alloc_from_contiguous().
+ * It returns false when provided pages do not belong to contiguous area and
+ * true otherwise.
+ */
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+				 int count)
+{
+	struct cma *cma = dev_get_cma_area(dev);
+	unsigned long pfn;
+
+	if (!cma || !pages)
+		return false;
+
+	pr_debug("%s(page %p)\n", __func__, (void *)pages);
+
+	pfn = page_to_pfn(pages);
+
+	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
+		return false;
+
+	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
+
+	mutex_lock(&cma_mutex);
+	bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
+	free_contig_range(pfn, count);
+	mutex_unlock(&cma_mutex);
+
+	return true;
+}
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 69aa411..1c0f14b 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -120,7 +120,7 @@
 		return 0;
 
 	if (driver->use_device_tree) {
-		if (machine_is_copper())
+		if (machine_is_msm8974())
 			return MSM8974_TOOLS_ID;
 		else
 			return 0;
@@ -134,7 +134,7 @@
 			return APQ8064_TOOLS_ID;
 		case MSM_CPU_8930:
 			return MSM8930_TOOLS_ID;
-		case MSM_CPU_COPPER:
+		case MSM_CPU_8974:
 			return MSM8974_TOOLS_ID;
 		case MSM_CPU_8625:
 			return MSM8625_TOOLS_ID;
@@ -159,7 +159,7 @@
 	case MSM_CPU_8930:
 	case MSM_CPU_8627:
 	case MSM_CPU_9615:
-	case MSM_CPU_COPPER:
+	case MSM_CPU_8974:
 		return 1;
 	default:
 		return 0;
diff --git a/drivers/char/msm_rotator.c b/drivers/char/msm_rotator.c
index 9c8f7ee..8650e83 100644
--- a/drivers/char/msm_rotator.c
+++ b/drivers/char/msm_rotator.c
@@ -170,35 +170,26 @@
 	CLK_SUSPEND,
 };
 
-int msm_rotator_iommu_map_buf(int mem_id, int domain,
+int msm_rotator_iommu_map_buf(int mem_id, unsigned char src,
 	unsigned long *start, unsigned long *len,
-	struct ion_handle **pihdl, unsigned int secure)
+	struct ion_handle **pihdl)
 {
 	if (!msm_rotator_dev->client)
 		return -EINVAL;
 
-	*pihdl = ion_import_fd(msm_rotator_dev->client, mem_id);
+	*pihdl = ion_import_dma_buf(msm_rotator_dev->client, mem_id);
 	if (IS_ERR_OR_NULL(*pihdl)) {
-		pr_err("ion_import_fd() failed\n");
+		pr_err("ion_import_dma_buf() failed\n");
 		return PTR_ERR(*pihdl);
 	}
-	pr_debug("%s(): ion_hdl %p, ion_buf %p\n", __func__, *pihdl,
-		ion_share(msm_rotator_dev->client, *pihdl));
+	pr_debug("%s(): ion_hdl %p, ion_fd %d\n", __func__, *pihdl,
+		ion_share_dma_buf(msm_rotator_dev->client, *pihdl));
 
-	if (secure) {
-		if (ion_phys(msm_rotator_dev->client,
-			*pihdl, start, (unsigned *)len)) {
-			pr_err("%s:%d: ion_phys map failed\n",
-				 __func__, __LINE__);
-			return -ENOMEM;
-		}
-	} else {
-		if (ion_map_iommu(msm_rotator_dev->client,
-			*pihdl,	domain, GEN_POOL,
-			SZ_4K, 0, start, len, 0, ION_IOMMU_UNMAP_DELAYED)) {
-			pr_err("ion_map_iommu() failed\n");
-			return -EINVAL;
-		}
+	if (ion_map_iommu(msm_rotator_dev->client,
+		*pihdl,	ROTATOR_DOMAIN, GEN_POOL,
+		SZ_4K, 0, start, len, 0, ION_IOMMU_UNMAP_DELAYED)) {
+		pr_err("ion_map_iommu() failed\n");
+		return -EINVAL;
 	}
 
 	pr_debug("%s(): mem_id %d, start 0x%lx, len 0x%lx\n",
@@ -815,9 +806,9 @@
 	return 0;
 }
 
-static int get_img(struct msmfb_data *fbd, int domain,
+static int get_img(struct msmfb_data *fbd, unsigned char src,
 	unsigned long *start, unsigned long *len, struct file **p_file,
-	int *p_need, struct ion_handle **p_ihdl, unsigned int secure)
+	int *p_need, struct ion_handle **p_ihdl)
 {
 	int ret = 0;
 #ifdef CONFIG_FB
@@ -859,8 +850,8 @@
 #endif
 
 #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
-	return msm_rotator_iommu_map_buf(fbd->memory_id, domain, start,
-		len, p_ihdl, secure);
+	return msm_rotator_iommu_map_buf(fbd->memory_id, src, start,
+		len, p_ihdl);
 #endif
 #ifdef CONFIG_ANDROID_PMEM
 	if (!get_pmem_file(fbd->memory_id, start, &vstart, len, p_file))
@@ -871,20 +862,17 @@
 
 }
 
-static void put_img(struct file *p_file, struct ion_handle *p_ihdl,
-	int domain, unsigned int secure)
+static void put_img(struct file *p_file, struct ion_handle *p_ihdl)
 {
 #ifdef CONFIG_ANDROID_PMEM
 	if (p_file != NULL)
 		put_pmem_file(p_file);
 #endif
-
 #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
 	if (!IS_ERR_OR_NULL(p_ihdl)) {
 		pr_debug("%s(): p_ihdl %p\n", __func__, p_ihdl);
-		if (!secure)
-			ion_unmap_iommu(msm_rotator_dev->client,
-				p_ihdl, domain, GEN_POOL);
+		ion_unmap_iommu(msm_rotator_dev->client,
+			p_ihdl, ROTATOR_DOMAIN, GEN_POOL);
 
 		ion_free(msm_rotator_dev->client, p_ihdl);
 	}
@@ -951,18 +939,18 @@
 		goto do_rotate_unlock_mutex;
 	}
 
-	rc = get_img(&info.src, ROTATOR_SRC_DOMAIN, (unsigned long *)&in_paddr,
+	rc = get_img(&info.src, 1, (unsigned long *)&in_paddr,
 			(unsigned long *)&src_len, &srcp0_file, &ps0_need,
-			&srcp0_ihdl, 0);
+			&srcp0_ihdl);
 	if (rc) {
 		pr_err("%s: in get_img() failed id=0x%08x\n",
 			DRIVER_NAME, info.src.memory_id);
 		goto do_rotate_unlock_mutex;
 	}
 
-	rc = get_img(&info.dst, ROTATOR_DST_DOMAIN, (unsigned long *)&out_paddr,
+	rc = get_img(&info.dst, 0, (unsigned long *)&out_paddr,
 			(unsigned long *)&dst_len, &dstp0_file, &p_need,
-			&dstp0_ihdl, img_info->secure);
+			&dstp0_ihdl);
 	if (rc) {
 		pr_err("%s: out get_img() failed id=0x%08x\n",
 		       DRIVER_NAME, info.dst.memory_id);
@@ -990,20 +978,20 @@
 			goto do_rotate_unlock_mutex;
 		}
 
-		rc = get_img(&info.src_chroma, ROTATOR_SRC_DOMAIN,
+		rc = get_img(&info.src_chroma, 1,
 				(unsigned long *)&in_chroma_paddr,
 				(unsigned long *)&src_len, &srcp1_file, &p_need,
-				&srcp1_ihdl, 0);
+				&srcp1_ihdl);
 		if (rc) {
 			pr_err("%s: in chroma get_img() failed id=0x%08x\n",
 				DRIVER_NAME, info.src_chroma.memory_id);
 			goto do_rotate_unlock_mutex;
 		}
 
-		rc = get_img(&info.dst_chroma, ROTATOR_DST_DOMAIN,
+		rc = get_img(&info.dst_chroma, 0,
 				(unsigned long *)&out_chroma_paddr,
 				(unsigned long *)&dst_len, &dstp1_file, &p_need,
-				&dstp1_ihdl, img_info->secure);
+				&dstp1_ihdl);
 		if (rc) {
 			pr_err("%s: out chroma get_img() failed id=0x%08x\n",
 				DRIVER_NAME, info.dst_chroma.memory_id);
@@ -1174,17 +1162,15 @@
 #endif
 	schedule_delayed_work(&msm_rotator_dev->rot_clk_work, HZ);
 do_rotate_unlock_mutex:
-	put_img(dstp1_file, dstp1_ihdl, ROTATOR_DST_DOMAIN,
-		msm_rotator_dev->img_info[s]->secure);
-	put_img(srcp1_file, srcp1_ihdl, ROTATOR_SRC_DOMAIN, 0);
-	put_img(dstp0_file, dstp0_ihdl, ROTATOR_DST_DOMAIN,
-		msm_rotator_dev->img_info[s]->secure);
+	put_img(dstp1_file, dstp1_ihdl);
+	put_img(srcp1_file, srcp1_ihdl);
+	put_img(dstp0_file, dstp0_ihdl);
 
 	/* only source may use frame buffer */
 	if (info.src.flags & MDP_MEMORY_ID_TYPE_FB)
 		fput_light(srcp0_file, ps0_need);
 	else
-		put_img(srcp0_file, srcp0_ihdl, ROTATOR_SRC_DOMAIN, 0);
+		put_img(srcp0_file, srcp0_ihdl);
 	mutex_unlock(&msm_rotator_dev->rotator_lock);
 	dev_dbg(msm_rotator_dev->device, "%s() returning rc = %d\n",
 		__func__, rc);
diff --git a/drivers/coresight/Kconfig b/drivers/coresight/Kconfig
new file mode 100644
index 0000000..1219af1
--- /dev/null
+++ b/drivers/coresight/Kconfig
@@ -0,0 +1,32 @@
+config MSM_QDSS
+	bool "CoreSight tracing"
+	help
+	  Enables support for CoreSight tracing. This uses CoreSight trace
+	  components and buses to support both hardware (eg. processor ETM)
+	  and hardware assisted software instrumentation based (eg. STM)
+	  tracing.
+
+	  For production builds, you should probably say 'N' here to avoid
+	  potential power, performance and memory penalty.
+
+config MSM_QDSS_STM_DEFAULT_ENABLE
+	bool "Turn on CoreSight STM tracing by default"
+	depends on MSM_QDSS
+	help
+	  Turns on CoreSight STM tracing (hardware assisted software
+	  instrumentation based tracing) by default. Otherwise, tracing is
+	  disabled by default but can be enabled via sysfs.
+
+	  For production builds, you should probably say 'N' here to avoid
+	  potential power, performance and memory penalty.
+
+config MSM_QDSS_ETM_DEFAULT_ENABLE
+	bool "Turn on CoreSight ETM tracing by default"
+	depends on MSM_QDSS
+	help
+	  Turns on CoreSight ETM tracing (processor tracing) by default.
+	  Otherwise, tracing is disabled by default but can be enabled via
+	  sysfs.
+
+	  For production builds, you should probably say 'N' here to avoid
+	  potential power, performance and memory penalty.
diff --git a/drivers/coresight/Makefile b/drivers/coresight/Makefile
new file mode 100644
index 0000000..2ee2093
--- /dev/null
+++ b/drivers/coresight/Makefile
@@ -0,0 +1,2 @@
+
+obj-$(CONFIG_MSM_QDSS) += coresight.o coresight-etb.o coresight-tpiu.o coresight-funnel.o coresight-stm.o coresight-etm.o
diff --git a/drivers/coresight/coresight-etb.c b/drivers/coresight/coresight-etb.c
new file mode 100644
index 0000000..2bffae5
--- /dev/null
+++ b/drivers/coresight/coresight-etb.c
@@ -0,0 +1,451 @@
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+
+#include "coresight-priv.h"
+
+#define etb_writel(drvdata, val, off)	__raw_writel((val), drvdata->base + off)
+#define etb_readl(drvdata, off)		__raw_readl(drvdata->base + off)
+
+#define ETB_RAM_DEPTH_REG	(0x004)
+#define ETB_STATUS_REG		(0x00C)
+#define ETB_RAM_READ_DATA_REG	(0x010)
+#define ETB_RAM_READ_POINTER	(0x014)
+#define ETB_RAM_WRITE_POINTER	(0x018)
+#define ETB_TRG			(0x01C)
+#define ETB_CTL_REG		(0x020)
+#define ETB_RWD_REG		(0x024)
+#define ETB_FFSR		(0x300)
+#define ETB_FFCR		(0x304)
+#define ETB_ITMISCOP0		(0xEE0)
+#define ETB_ITTRFLINACK		(0xEE4)
+#define ETB_ITTRFLIN		(0xEE8)
+#define ETB_ITATBDATA0		(0xEEC)
+#define ETB_ITATBCTR2		(0xEF0)
+#define ETB_ITATBCTR1		(0xEF4)
+#define ETB_ITATBCTR0		(0xEF8)
+
+
+#define BYTES_PER_WORD		4
+#define ETB_SIZE_WORDS		4096
+#define FRAME_SIZE_WORDS	4
+
+#define ETB_LOCK()							\
+do {									\
+	mb();								\
+	etb_writel(drvdata, 0x0, CORESIGHT_LAR);			\
+} while (0)
+#define ETB_UNLOCK()							\
+do {									\
+	etb_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR);		\
+	mb();								\
+} while (0)
+
+struct etb_drvdata {
+	uint8_t		*buf;
+	void __iomem	*base;
+	bool		enabled;
+	bool		reading;
+	spinlock_t	spinlock;
+	atomic_t	in_use;
+	struct device	*dev;
+	struct kobject	*kobj;
+	struct clk	*clk;
+	uint32_t	trigger_cntr;
+};
+
+static struct etb_drvdata *drvdata;
+
+static void __etb_enable(void)
+{
+	int i;
+
+	ETB_UNLOCK();
+
+	etb_writel(drvdata, 0x0, ETB_RAM_WRITE_POINTER);
+	for (i = 0; i < ETB_SIZE_WORDS; i++)
+		etb_writel(drvdata, 0x0, ETB_RWD_REG);
+
+	etb_writel(drvdata, 0x0, ETB_RAM_WRITE_POINTER);
+	etb_writel(drvdata, 0x0, ETB_RAM_READ_POINTER);
+
+	etb_writel(drvdata, drvdata->trigger_cntr, ETB_TRG);
+	etb_writel(drvdata, BIT(13) | BIT(0), ETB_FFCR);
+	etb_writel(drvdata, BIT(0), ETB_CTL_REG);
+
+	ETB_LOCK();
+}
+
+int etb_enable(void)
+{
+	int ret;
+	unsigned long flags;
+
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&drvdata->spinlock, flags);
+	__etb_enable();
+	drvdata->enabled = true;
+	dev_info(drvdata->dev, "ETB enabled\n");
+	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+	return 0;
+}
+
+static void __etb_disable(void)
+{
+	int count;
+	uint32_t ffcr;
+
+	ETB_UNLOCK();
+
+	ffcr = etb_readl(drvdata, ETB_FFCR);
+	ffcr |= (BIT(12) | BIT(6));
+	etb_writel(drvdata, ffcr, ETB_FFCR);
+
+	for (count = TIMEOUT_US; BVAL(etb_readl(drvdata, ETB_FFCR), 6) != 0
+				&& count > 0; count--)
+		udelay(1);
+	WARN(count == 0, "timeout while flushing DRVDATA, ETB_FFCR: %#x\n",
+	     etb_readl(drvdata, ETB_FFCR));
+
+	etb_writel(drvdata, 0x0, ETB_CTL_REG);
+
+	for (count = TIMEOUT_US; BVAL(etb_readl(drvdata, ETB_FFSR), 1) != 1
+				&& count > 0; count--)
+		udelay(1);
+	WARN(count == 0, "timeout while disabling DRVDATA, ETB_FFSR: %#x\n",
+	     etb_readl(drvdata, ETB_FFSR));
+
+	ETB_LOCK();
+}
+
+void etb_disable(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&drvdata->spinlock, flags);
+	__etb_disable();
+	drvdata->enabled = false;
+	dev_info(drvdata->dev, "ETB disabled\n");
+	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+	clk_disable_unprepare(drvdata->clk);
+}
+
+static void __etb_dump(void)
+{
+	int i;
+	uint8_t *buf_ptr;
+	uint32_t read_data;
+	uint32_t read_ptr;
+	uint32_t write_ptr;
+	uint32_t frame_off;
+	uint32_t frame_endoff;
+
+	ETB_UNLOCK();
+
+	read_ptr = etb_readl(drvdata, ETB_RAM_READ_POINTER);
+	write_ptr = etb_readl(drvdata, ETB_RAM_WRITE_POINTER);
+
+	frame_off = write_ptr % FRAME_SIZE_WORDS;
+	frame_endoff = FRAME_SIZE_WORDS - frame_off;
+	if (frame_off) {
+		dev_err(drvdata->dev, "write_ptr: %lu not aligned to formatter "
+				"frame size\n", (unsigned long)write_ptr);
+		dev_err(drvdata->dev, "frameoff: %lu, frame_endoff: %lu\n",
+			(unsigned long)frame_off, (unsigned long)frame_endoff);
+		write_ptr += frame_endoff;
+	}
+
+	if ((etb_readl(drvdata, ETB_STATUS_REG) & BIT(0)) == 0)
+		etb_writel(drvdata, 0x0, ETB_RAM_READ_POINTER);
+	else
+		etb_writel(drvdata, write_ptr, ETB_RAM_READ_POINTER);
+
+	buf_ptr = drvdata->buf;
+	for (i = 0; i < ETB_SIZE_WORDS; i++) {
+		read_data = etb_readl(drvdata, ETB_RAM_READ_DATA_REG);
+		*buf_ptr++ = read_data >> 0;
+		*buf_ptr++ = read_data >> 8;
+		*buf_ptr++ = read_data >> 16;
+		*buf_ptr++ = read_data >> 24;
+	}
+
+	if (frame_off) {
+		buf_ptr -= (frame_endoff * BYTES_PER_WORD);
+		for (i = 0; i < frame_endoff; i++) {
+			*buf_ptr++ = 0x0;
+			*buf_ptr++ = 0x0;
+			*buf_ptr++ = 0x0;
+			*buf_ptr++ = 0x0;
+		}
+	}
+
+	etb_writel(drvdata, read_ptr, ETB_RAM_READ_POINTER);
+
+	ETB_LOCK();
+}
+
+void etb_dump(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&drvdata->spinlock, flags);
+	if (drvdata->enabled) {
+		__etb_disable();
+		__etb_dump();
+		__etb_enable();
+
+		dev_info(drvdata->dev, "ETB dumped\n");
+	}
+	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+}
+
+static int etb_open(struct inode *inode, struct file *file)
+{
+	if (atomic_cmpxchg(&drvdata->in_use, 0, 1))
+		return -EBUSY;
+
+	dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
+	return 0;
+}
+
+static ssize_t etb_read(struct file *file, char __user *data,
+				size_t len, loff_t *ppos)
+{
+	if (drvdata->reading == false) {
+		etb_dump();
+		drvdata->reading = true;
+	}
+
+	if (*ppos + len > ETB_SIZE_WORDS * BYTES_PER_WORD)
+		len = ETB_SIZE_WORDS * BYTES_PER_WORD - *ppos;
+
+	if (copy_to_user(data, drvdata->buf + *ppos, len)) {
+		dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
+		return -EFAULT;
+	}
+
+	*ppos += len;
+
+	dev_dbg(drvdata->dev, "%s: %d bytes copied, %d bytes left\n",
+		__func__, len, (int) (ETB_SIZE_WORDS * BYTES_PER_WORD - *ppos));
+
+	return len;
+}
+
+static int etb_release(struct inode *inode, struct file *file)
+{
+	drvdata->reading = false;
+
+	atomic_set(&drvdata->in_use, 0);
+
+	dev_dbg(drvdata->dev, "%s: released\n", __func__);
+
+	return 0;
+}
+
+static const struct file_operations etb_fops = {
+	.owner =	THIS_MODULE,
+	.open =		etb_open,
+	.read =		etb_read,
+	.release =	etb_release,
+};
+
+static struct miscdevice etb_misc = {
+	.name =		"msm_etb",
+	.minor =	MISC_DYNAMIC_MINOR,
+	.fops =		&etb_fops,
+};
+
+static ssize_t etb_show_trigger_cntr(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->trigger_cntr;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etb_store_trigger_cntr(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	drvdata->trigger_cntr = val;
+	return size;
+}
+static DEVICE_ATTR(trigger_cntr, S_IRUGO | S_IWUSR, etb_show_trigger_cntr,
+		   etb_store_trigger_cntr);
+
+static int __devinit etb_sysfs_init(void)
+{
+	int ret;
+
+	drvdata->kobj = kobject_create_and_add("etb", qdss_get_modulekobj());
+	if (!drvdata->kobj) {
+		dev_err(drvdata->dev, "failed to create ETB sysfs kobject\n");
+		ret = -ENOMEM;
+		goto err_create;
+	}
+
+	ret = sysfs_create_file(drvdata->kobj, &dev_attr_trigger_cntr.attr);
+	if (ret) {
+		dev_err(drvdata->dev, "failed to create ETB sysfs trigger_cntr"
+		" attribute\n");
+		goto err_file;
+	}
+
+	return 0;
+err_file:
+	kobject_put(drvdata->kobj);
+err_create:
+	return ret;
+}
+
+static void __devexit etb_sysfs_exit(void)
+{
+	sysfs_remove_file(drvdata->kobj, &dev_attr_trigger_cntr.attr);
+	kobject_put(drvdata->kobj);
+}
+
+static int __devinit etb_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct resource *res;
+
+	drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata) {
+		ret = -ENOMEM;
+		goto err_kzalloc_drvdata;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -EINVAL;
+		goto err_res;
+	}
+
+	drvdata->base = ioremap_nocache(res->start, resource_size(res));
+	if (!drvdata->base) {
+		ret = -EINVAL;
+		goto err_ioremap;
+	}
+
+	drvdata->dev = &pdev->dev;
+
+	spin_lock_init(&drvdata->spinlock);
+
+	drvdata->clk = clk_get(drvdata->dev, "core_clk");
+	if (IS_ERR(drvdata->clk)) {
+		ret = PTR_ERR(drvdata->clk);
+		goto err_clk_get;
+	}
+
+	ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+	if (ret)
+		goto err_clk_rate;
+
+	ret = misc_register(&etb_misc);
+	if (ret)
+		goto err_misc;
+
+	drvdata->buf = kzalloc(ETB_SIZE_WORDS * BYTES_PER_WORD, GFP_KERNEL);
+	if (!drvdata->buf) {
+		ret = -ENOMEM;
+		goto err_alloc;
+	}
+
+	etb_sysfs_init();
+
+	dev_info(drvdata->dev, "ETB initialized\n");
+	return 0;
+
+err_alloc:
+	misc_deregister(&etb_misc);
+err_misc:
+err_clk_rate:
+	clk_put(drvdata->clk);
+err_clk_get:
+	iounmap(drvdata->base);
+err_ioremap:
+err_res:
+	kfree(drvdata);
+err_kzalloc_drvdata:
+	dev_err(drvdata->dev, "ETB init failed\n");
+	return ret;
+}
+
+static int __devexit etb_remove(struct platform_device *pdev)
+{
+	if (drvdata->enabled)
+		etb_disable();
+	etb_sysfs_exit();
+	kfree(drvdata->buf);
+	misc_deregister(&etb_misc);
+	clk_put(drvdata->clk);
+	iounmap(drvdata->base);
+	kfree(drvdata);
+
+	return 0;
+}
+
+static struct of_device_id etb_match[] = {
+	{.compatible = "qcom,msm-etb"},
+	{}
+};
+
+static struct platform_driver etb_driver = {
+	.probe          = etb_probe,
+	.remove         = __devexit_p(etb_remove),
+	.driver         = {
+		.name   = "msm_etb",
+		.owner	= THIS_MODULE,
+		.of_match_table = etb_match,
+	},
+};
+
+static int __init etb_init(void)
+{
+	return platform_driver_register(&etb_driver);
+}
+module_init(etb_init);
+
+static void __exit etb_exit(void)
+{
+	platform_driver_unregister(&etb_driver);
+}
+module_exit(etb_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Embedded Trace Buffer driver");
diff --git a/drivers/coresight/coresight-etm.c b/drivers/coresight/coresight-etm.c
new file mode 100644
index 0000000..b3d2a16
--- /dev/null
+++ b/drivers/coresight/coresight-etm.c
@@ -0,0 +1,1646 @@
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/wakelock.h>
+#include <linux/pm_qos.h>
+#include <linux/sysfs.h>
+#include <linux/stat.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+#include <asm/sections.h>
+#include <mach/socinfo.h>
+
+#include "coresight-priv.h"
+
+#define etm_writel(drvdata, cpu, val, off)	\
+			__raw_writel((val), drvdata->base + (SZ_4K * cpu) + off)
+#define etm_readl(drvdata, cpu, off)	\
+			__raw_readl(drvdata->base + (SZ_4K * cpu) + off)
+
+/*
+ * Device registers:
+ * 0x000 - 0x2FC: Trace		registers
+ * 0x300 - 0x314: Management	registers
+ * 0x318 - 0xEFC: Trace		registers
+ *
+ * Coresight registers
+ * 0xF00 - 0xF9C: Management	registers
+ * 0xFA0 - 0xFA4: Management	registers in PFTv1.0
+ *		  Trace		registers in PFTv1.1
+ * 0xFA8 - 0xFFC: Management	registers
+ */
+
+/* Trace registers (0x000-0x2FC) */
+#define ETMCR			(0x000)
+#define ETMCCR			(0x004)
+#define ETMTRIGGER		(0x008)
+#define ETMSR			(0x010)
+#define ETMSCR			(0x014)
+#define ETMTSSCR		(0x018)
+#define ETMTEEVR		(0x020)
+#define ETMTECR1		(0x024)
+#define ETMFFLR			(0x02C)
+#define ETMACVRn(n)		(0x040 + (n * 4))
+#define ETMACTRn(n)		(0x080 + (n * 4))
+#define ETMCNTRLDVRn(n)		(0x140 + (n * 4))
+#define ETMCNTENRn(n)		(0x150 + (n * 4))
+#define ETMCNTRLDEVRn(n)	(0x160 + (n * 4))
+#define ETMCNTVRn(n)		(0x170 + (n * 4))
+#define ETMSQ12EVR		(0x180)
+#define ETMSQ21EVR		(0x184)
+#define ETMSQ23EVR		(0x188)
+#define ETMSQ31EVR		(0x18C)
+#define ETMSQ32EVR		(0x190)
+#define ETMSQ13EVR		(0x194)
+#define ETMSQR			(0x19C)
+#define ETMEXTOUTEVRn(n)	(0x1A0 + (n * 4))
+#define ETMCIDCVRn(n)		(0x1B0 + (n * 4))
+#define ETMCIDCMR		(0x1BC)
+#define ETMIMPSPEC0		(0x1C0)
+#define ETMIMPSPEC1		(0x1C4)
+#define ETMIMPSPEC2		(0x1C8)
+#define ETMIMPSPEC3		(0x1CC)
+#define ETMIMPSPEC4		(0x1D0)
+#define ETMIMPSPEC5		(0x1D4)
+#define ETMIMPSPEC6		(0x1D8)
+#define ETMIMPSPEC7		(0x1DC)
+#define ETMSYNCFR		(0x1E0)
+#define ETMIDR			(0x1E4)
+#define ETMCCER			(0x1E8)
+#define ETMEXTINSELR		(0x1EC)
+#define ETMTESSEICR		(0x1F0)
+#define ETMEIBCR		(0x1F4)
+#define ETMTSEVR		(0x1F8)
+#define ETMAUXCR		(0x1FC)
+#define ETMTRACEIDR		(0x200)
+#define ETMVMIDCVR		(0x240)
+/* Management registers (0x300-0x314) */
+#define ETMOSLAR		(0x300)
+#define ETMOSLSR		(0x304)
+#define ETMOSSRR		(0x308)
+#define ETMPDCR			(0x310)
+#define ETMPDSR			(0x314)
+
+#define ETM_MAX_ADDR_CMP	(16)
+#define ETM_MAX_CNTR		(4)
+#define ETM_MAX_CTXID_CMP	(3)
+
+#define ETM_MODE_EXCLUDE	BIT(0)
+#define ETM_MODE_CYCACC		BIT(1)
+#define ETM_MODE_STALL		BIT(2)
+#define ETM_MODE_TIMESTAMP	BIT(3)
+#define ETM_MODE_CTXID		BIT(4)
+#define ETM_MODE_ALL		(0x1F)
+
+#define ETM_EVENT_MASK		(0x1FFFF)
+#define ETM_SYNC_MASK		(0xFFF)
+#define ETM_ALL_MASK		(0xFFFFFFFF)
+
+#define ETM_SEQ_STATE_MAX_VAL	(0x2)
+
+enum {
+	ETM_ADDR_TYPE_NONE,
+	ETM_ADDR_TYPE_SINGLE,
+	ETM_ADDR_TYPE_RANGE,
+	ETM_ADDR_TYPE_START,
+	ETM_ADDR_TYPE_STOP,
+};
+
+#define ETM_LOCK(cpu)							\
+do {									\
+	mb();								\
+	etm_writel(drvdata, cpu, 0x0, CORESIGHT_LAR);			\
+} while (0)
+#define ETM_UNLOCK(cpu)							\
+do {									\
+	etm_writel(drvdata, cpu, CORESIGHT_UNLOCK, CORESIGHT_LAR);	\
+	mb();								\
+} while (0)
+
+
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "coresight."
+
+#ifdef CONFIG_MSM_QDSS_ETM_DEFAULT_ENABLE
+static int etm_boot_enable = 1;
+#else
+static int etm_boot_enable;
+#endif
+module_param_named(
+	etm_boot_enable, etm_boot_enable, int, S_IRUGO
+);
+
+struct etm_drvdata {
+	void __iomem			*base;
+	bool				enabled;
+	struct wake_lock		wake_lock;
+	struct pm_qos_request		qos_req;
+	struct qdss_source		*src;
+	struct mutex			mutex;
+	struct device			*dev;
+	struct kobject			*kobj;
+	struct clk			*clk;
+	uint8_t				arch;
+	uint8_t				nr_addr_cmp;
+	uint8_t				nr_cntr;
+	uint8_t				nr_ext_inp;
+	uint8_t				nr_ext_out;
+	uint8_t				nr_ctxid_cmp;
+	uint8_t				reset;
+	uint32_t			mode;
+	uint32_t			ctrl;
+	uint32_t			trigger_event;
+	uint32_t			startstop_ctrl;
+	uint32_t			enable_event;
+	uint32_t			enable_ctrl1;
+	uint32_t			fifofull_level;
+	uint8_t				addr_idx;
+	uint32_t			addr_val[ETM_MAX_ADDR_CMP];
+	uint32_t			addr_acctype[ETM_MAX_ADDR_CMP];
+	uint32_t			addr_type[ETM_MAX_ADDR_CMP];
+	uint8_t				cntr_idx;
+	uint32_t			cntr_rld_val[ETM_MAX_CNTR];
+	uint32_t			cntr_event[ETM_MAX_CNTR];
+	uint32_t			cntr_rld_event[ETM_MAX_CNTR];
+	uint32_t			cntr_val[ETM_MAX_CNTR];
+	uint32_t			seq_12_event;
+	uint32_t			seq_21_event;
+	uint32_t			seq_23_event;
+	uint32_t			seq_31_event;
+	uint32_t			seq_32_event;
+	uint32_t			seq_13_event;
+	uint32_t			seq_curr_state;
+	uint8_t				ctxid_idx;
+	uint32_t			ctxid_val[ETM_MAX_CTXID_CMP];
+	uint32_t			ctxid_mask;
+	uint32_t			sync_freq;
+	uint32_t			timestamp_event;
+};
+
+static struct etm_drvdata *drvdata;
+
+
+/* ETM clock is derived from the processor clock and gets enabled on a
+ * logical OR of below items on Krait (pass2 onwards):
+ * 1.CPMR[ETMCLKEN] is 1
+ * 2.ETMCR[PD] is 0
+ * 3.ETMPDCR[PU] is 1
+ * 4.Reset is asserted (core or debug)
+ * 5.APB memory mapped requests (eg. EDAP access)
+ *
+ * 1., 2. and 3. above are permanent enables whereas 4. and 5. are temporary
+ * enables
+ *
+ * We rely on 5. to be able to access ETMCR and then use 2. above for ETM
+ * clock vote in the driver and the save-restore code uses 1. above
+ * for its vote
+ */
+static void etm_set_pwrdwn(int cpu)
+{
+	uint32_t etmcr;
+
+	etmcr = etm_readl(drvdata, cpu, ETMCR);
+	etmcr |= BIT(0);
+	etm_writel(drvdata, cpu, etmcr, ETMCR);
+}
+
+static void etm_clr_pwrdwn(int cpu)
+{
+	uint32_t etmcr;
+
+	etmcr = etm_readl(drvdata, cpu, ETMCR);
+	etmcr &= ~BIT(0);
+	etm_writel(drvdata, cpu, etmcr, ETMCR);
+}
+
+static void etm_set_prog(int cpu)
+{
+	uint32_t etmcr;
+	int count;
+
+	etmcr = etm_readl(drvdata, cpu, ETMCR);
+	etmcr |= BIT(10);
+	etm_writel(drvdata, cpu, etmcr, ETMCR);
+
+	for (count = TIMEOUT_US; BVAL(etm_readl(drvdata, cpu, ETMSR), 1) != 1
+				&& count > 0; count--)
+		udelay(1);
+	WARN(count == 0, "timeout while setting prog bit, ETMSR: %#x\n",
+	     etm_readl(drvdata, cpu, ETMSR));
+}
+
+static void etm_clr_prog(int cpu)
+{
+	uint32_t etmcr;
+	int count;
+
+	etmcr = etm_readl(drvdata, cpu, ETMCR);
+	etmcr &= ~BIT(10);
+	etm_writel(drvdata, cpu, etmcr, ETMCR);
+
+	for (count = TIMEOUT_US; BVAL(etm_readl(drvdata, cpu, ETMSR), 1) != 0
+				&& count > 0; count--)
+		udelay(1);
+	WARN(count == 0, "timeout while clearing prog bit, ETMSR: %#x\n",
+	     etm_readl(drvdata, cpu, ETMSR));
+}
+
+static void __etm_enable(int cpu)
+{
+	int i;
+
+	ETM_UNLOCK(cpu);
+	/* Vote for ETM power/clock enable */
+	etm_clr_pwrdwn(cpu);
+	etm_set_prog(cpu);
+
+	etm_writel(drvdata, cpu, drvdata->ctrl | BIT(10), ETMCR);
+	etm_writel(drvdata, cpu, drvdata->trigger_event, ETMTRIGGER);
+	etm_writel(drvdata, cpu, drvdata->startstop_ctrl, ETMTSSCR);
+	etm_writel(drvdata, cpu, drvdata->enable_event, ETMTEEVR);
+	etm_writel(drvdata, cpu, drvdata->enable_ctrl1, ETMTECR1);
+	etm_writel(drvdata, cpu, drvdata->fifofull_level, ETMFFLR);
+	for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+		etm_writel(drvdata, cpu, drvdata->addr_val[i], ETMACVRn(i));
+		etm_writel(drvdata, cpu, drvdata->addr_acctype[i], ETMACTRn(i));
+	}
+	for (i = 0; i < drvdata->nr_cntr; i++) {
+		etm_writel(drvdata, cpu, drvdata->cntr_rld_val[i],
+			   ETMCNTRLDVRn(i));
+		etm_writel(drvdata, cpu, drvdata->cntr_event[i], ETMCNTENRn(i));
+		etm_writel(drvdata, cpu, drvdata->cntr_rld_event[i],
+			   ETMCNTRLDEVRn(i));
+		etm_writel(drvdata, cpu, drvdata->cntr_val[i], ETMCNTVRn(i));
+	}
+	etm_writel(drvdata, cpu, drvdata->seq_12_event, ETMSQ12EVR);
+	etm_writel(drvdata, cpu, drvdata->seq_21_event, ETMSQ21EVR);
+	etm_writel(drvdata, cpu, drvdata->seq_23_event, ETMSQ23EVR);
+	etm_writel(drvdata, cpu, drvdata->seq_31_event, ETMSQ31EVR);
+	etm_writel(drvdata, cpu, drvdata->seq_32_event, ETMSQ32EVR);
+	etm_writel(drvdata, cpu, drvdata->seq_13_event, ETMSQ13EVR);
+	etm_writel(drvdata, cpu, drvdata->seq_curr_state, ETMSQR);
+	for (i = 0; i < drvdata->nr_ext_out; i++)
+		etm_writel(drvdata, cpu, 0x0000406F, ETMEXTOUTEVRn(i));
+	for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
+		etm_writel(drvdata, cpu, drvdata->ctxid_val[i], ETMCIDCVRn(i));
+	etm_writel(drvdata, cpu, drvdata->ctxid_mask, ETMCIDCMR);
+	etm_writel(drvdata, cpu, drvdata->sync_freq, ETMSYNCFR);
+	etm_writel(drvdata, cpu, 0x00000000, ETMEXTINSELR);
+	etm_writel(drvdata, cpu, drvdata->timestamp_event, ETMTSEVR);
+	etm_writel(drvdata, cpu, 0x00000000, ETMAUXCR);
+	etm_writel(drvdata, cpu, cpu+1, ETMTRACEIDR);
+	etm_writel(drvdata, cpu, 0x00000000, ETMVMIDCVR);
+
+	etm_clr_prog(cpu);
+	ETM_LOCK(cpu);
+}
+
+static int etm_enable(void)
+{
+	int ret, cpu;
+
+	if (drvdata->enabled) {
+		dev_err(drvdata->dev, "ETM tracing already enabled\n");
+		ret = -EPERM;
+		goto err;
+	}
+
+	wake_lock(&drvdata->wake_lock);
+	/* 1. causes all online cpus to come out of idle PC
+	 * 2. prevents idle PC until save restore flag is enabled atomically
+	 *
+	 * we rely on the user to prevent hotplug on/off racing with this
+	 * operation and to ensure cores where trace is expected to be turned
+	 * on are already hotplugged on
+	 */
+	pm_qos_update_request(&drvdata->qos_req, 0);
+
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret)
+		goto err_clk;
+
+	ret = qdss_enable(drvdata->src);
+	if (ret)
+		goto err_qdss;
+
+	for_each_online_cpu(cpu)
+		__etm_enable(cpu);
+
+	drvdata->enabled = true;
+
+	pm_qos_update_request(&drvdata->qos_req, PM_QOS_DEFAULT_VALUE);
+	wake_unlock(&drvdata->wake_lock);
+
+	dev_info(drvdata->dev, "ETM tracing enabled\n");
+	return 0;
+
+err_qdss:
+	clk_disable_unprepare(drvdata->clk);
+err_clk:
+	pm_qos_update_request(&drvdata->qos_req, PM_QOS_DEFAULT_VALUE);
+	wake_unlock(&drvdata->wake_lock);
+err:
+	return ret;
+}
+
+static void __etm_disable(int cpu)
+{
+	ETM_UNLOCK(cpu);
+	etm_set_prog(cpu);
+
+	/* program trace enable to low by using always false event */
+	etm_writel(drvdata, cpu, 0x6F | BIT(14), ETMTEEVR);
+
+	/* Vote for ETM power/clock disable */
+	etm_set_pwrdwn(cpu);
+	ETM_LOCK(cpu);
+}
+
+static int etm_disable(void)
+{
+	int ret, cpu;
+
+	if (!drvdata->enabled) {
+		dev_err(drvdata->dev, "ETM tracing already disabled\n");
+		ret = -EPERM;
+		goto err;
+	}
+
+	wake_lock(&drvdata->wake_lock);
+	/* 1. causes all online cpus to come out of idle PC
+	 * 2. prevents idle PC until save restore flag is disabled atomically
+	 *
+	 * we rely on the user to prevent hotplug on/off racing with this
+	 * operation and to ensure cores where trace is expected to be turned
+	 * off are already hotplugged on
+	 */
+	pm_qos_update_request(&drvdata->qos_req, 0);
+
+	for_each_online_cpu(cpu)
+		__etm_disable(cpu);
+
+	drvdata->enabled = false;
+
+	qdss_disable(drvdata->src);
+
+	clk_disable_unprepare(drvdata->clk);
+
+	pm_qos_update_request(&drvdata->qos_req, PM_QOS_DEFAULT_VALUE);
+	wake_unlock(&drvdata->wake_lock);
+
+	dev_info(drvdata->dev, "ETM tracing disabled\n");
+	return 0;
+err:
+	return ret;
+}
+
+/* Memory mapped writes to clear os lock not supported */
+static void etm_os_unlock(void *unused)
+{
+	unsigned long value = 0x0;
+
+	asm("mcr p14, 1, %0, c1, c0, 4\n\t" : : "r" (value));
+	asm("isb\n\t");
+}
+
+static ssize_t etm_show_enabled(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->enabled;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_enabled(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t size)
+{
+	int ret = 0;
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	if (val)
+		ret = etm_enable();
+	else
+		ret = etm_disable();
+	mutex_unlock(&drvdata->mutex);
+
+	if (ret)
+		return ret;
+	return size;
+}
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, etm_show_enabled,
+		   etm_store_enabled);
+
+static ssize_t etm_show_nr_addr_cmp(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->nr_addr_cmp;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR(nr_addr_cmp, S_IRUGO, etm_show_nr_addr_cmp, NULL);
+
+static ssize_t etm_show_nr_cntr(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->nr_cntr;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR(nr_cntr, S_IRUGO, etm_show_nr_cntr, NULL);
+
+static ssize_t etm_show_nr_ctxid_cmp(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->nr_ctxid_cmp;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR(nr_ctxid_cmp, S_IRUGO, etm_show_nr_ctxid_cmp, NULL);
+
+static ssize_t etm_show_reset(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	unsigned long val = drvdata->reset;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+/* Reset to trace everything i.e. exclude nothing. */
+static ssize_t etm_store_reset(struct device *dev,
+			       struct device_attribute *attr, const char *buf,
+			       size_t size)
+{
+	int i;
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	if (val) {
+		drvdata->mode = ETM_MODE_EXCLUDE;
+		drvdata->ctrl = 0x0;
+		if (cpu_is_krait_v1()) {
+			drvdata->mode |= ETM_MODE_CYCACC;
+			drvdata->ctrl |= BIT(12);
+		}
+		drvdata->trigger_event = 0x406F;
+		drvdata->startstop_ctrl = 0x0;
+		drvdata->enable_event = 0x6F;
+		drvdata->enable_ctrl1 = 0x1000000;
+		drvdata->fifofull_level = 0x28;
+		drvdata->addr_idx = 0x0;
+		for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+			drvdata->addr_val[i] = 0x0;
+			drvdata->addr_acctype[i] = 0x0;
+			drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
+		}
+		drvdata->cntr_idx = 0x0;
+		for (i = 0; i < drvdata->nr_cntr; i++) {
+			drvdata->cntr_rld_val[i] = 0x0;
+			drvdata->cntr_event[i] = 0x406F;
+			drvdata->cntr_rld_event[i] = 0x406F;
+			drvdata->cntr_val[i] = 0x0;
+		}
+		drvdata->seq_12_event = 0x406F;
+		drvdata->seq_21_event = 0x406F;
+		drvdata->seq_23_event = 0x406F;
+		drvdata->seq_31_event = 0x406F;
+		drvdata->seq_32_event = 0x406F;
+		drvdata->seq_13_event = 0x406F;
+		drvdata->seq_curr_state = 0x0;
+		drvdata->ctxid_idx = 0x0;
+		for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
+			drvdata->ctxid_val[i] = 0x0;
+		drvdata->ctxid_mask = 0x0;
+		drvdata->sync_freq = 0x80;
+		drvdata->timestamp_event = 0x406F;
+	}
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(reset, S_IRUGO | S_IWUSR, etm_show_reset, etm_store_reset);
+
+static ssize_t etm_show_mode(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	unsigned long val = drvdata->mode;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_mode(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	drvdata->mode = val & ETM_MODE_ALL;
+
+	if (drvdata->mode & ETM_MODE_EXCLUDE)
+		drvdata->enable_ctrl1 |= BIT(24);
+	else
+		drvdata->enable_ctrl1 &= ~BIT(24);
+
+	if (drvdata->mode & ETM_MODE_CYCACC)
+		drvdata->ctrl |= BIT(12);
+	else
+		drvdata->ctrl &= ~BIT(12);
+
+	if (drvdata->mode & ETM_MODE_STALL)
+		drvdata->ctrl |= BIT(7);
+	else
+		drvdata->ctrl &= ~BIT(7);
+
+	if (drvdata->mode & ETM_MODE_TIMESTAMP)
+		drvdata->ctrl |= BIT(28);
+	else
+		drvdata->ctrl &= ~BIT(28);
+	if (drvdata->mode & ETM_MODE_CTXID)
+		drvdata->ctrl |= (BIT(14) | BIT(15));
+	else
+		drvdata->ctrl &= ~(BIT(14) | BIT(15));
+	mutex_unlock(&drvdata->mutex);
+
+	return size;
+}
+static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, etm_show_mode, etm_store_mode);
+
+static ssize_t etm_show_trigger_event(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->trigger_event;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_trigger_event(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	drvdata->trigger_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR(trigger_event, S_IRUGO | S_IWUSR, etm_show_trigger_event,
+		   etm_store_trigger_event);
+
+static ssize_t etm_show_enable_event(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->enable_event;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_enable_event(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	drvdata->enable_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR(enable_event, S_IRUGO | S_IWUSR, etm_show_enable_event,
+		   etm_store_enable_event);
+
+static ssize_t etm_show_fifofull_level(struct device *dev,
+				       struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->fifofull_level;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_fifofull_level(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	drvdata->fifofull_level = val;
+	return size;
+}
+static DEVICE_ATTR(fifofull_level, S_IRUGO | S_IWUSR, etm_show_fifofull_level,
+		   etm_store_fifofull_level);
+
+static ssize_t etm_show_addr_idx(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->addr_idx;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_addr_idx(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+	if (val >= drvdata->nr_addr_cmp)
+		return -EINVAL;
+
+	/* Use mutex to ensure index doesn't change while it gets dereferenced
+	 * multiple times within a mutex block elsewhere.
+	 */
+	mutex_lock(&drvdata->mutex);
+	drvdata->addr_idx = val;
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(addr_idx, S_IRUGO | S_IWUSR, etm_show_addr_idx,
+		   etm_store_addr_idx);
+
+static ssize_t etm_show_addr_single(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	uint8_t idx;
+
+	mutex_lock(&drvdata->mutex);
+	idx = drvdata->addr_idx;
+	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+		mutex_unlock(&drvdata->mutex);
+		return -EPERM;
+	}
+
+	val = drvdata->addr_val[idx];
+	mutex_unlock(&drvdata->mutex);
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_addr_single(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t size)
+{
+	unsigned long val;
+	uint8_t idx;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	idx = drvdata->addr_idx;
+	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+		mutex_unlock(&drvdata->mutex);
+		return -EPERM;
+	}
+
+	drvdata->addr_val[idx] = val;
+	drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(addr_single, S_IRUGO | S_IWUSR, etm_show_addr_single,
+		   etm_store_addr_single);
+
+static ssize_t etm_show_addr_range(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	unsigned long val1, val2;
+	uint8_t idx;
+
+	mutex_lock(&drvdata->mutex);
+	idx = drvdata->addr_idx;
+	if (idx % 2 != 0) {
+		mutex_unlock(&drvdata->mutex);
+		return -EPERM;
+	}
+	if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+	       drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+	      (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+	       drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+		mutex_unlock(&drvdata->mutex);
+		return -EPERM;
+	}
+
+	val1 = drvdata->addr_val[idx];
+	val2 = drvdata->addr_val[idx + 1];
+	mutex_unlock(&drvdata->mutex);
+	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t etm_store_addr_range(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	unsigned long val1, val2;
+	uint8_t idx;
+
+	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+		return -EINVAL;
+	/* lower address comparator cannot have a higher address value */
+	if (val1 > val2)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	idx = drvdata->addr_idx;
+	if (idx % 2 != 0) {
+		mutex_unlock(&drvdata->mutex);
+		return -EPERM;
+	}
+	if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+	       drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+	      (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+	       drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+		mutex_unlock(&drvdata->mutex);
+		return -EPERM;
+	}
+
+	drvdata->addr_val[idx] = val1;
+	drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
+	drvdata->addr_val[idx + 1] = val2;
+	drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
+	drvdata->enable_ctrl1 |= (1 << (idx/2));
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(addr_range, S_IRUGO | S_IWUSR, etm_show_addr_range,
+		   etm_store_addr_range);
+
+static ssize_t etm_show_addr_start(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	uint8_t idx;
+
+	mutex_lock(&drvdata->mutex);
+	idx = drvdata->addr_idx;
+	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+		mutex_unlock(&drvdata->mutex);
+		return -EPERM;
+	}
+
+	val = drvdata->addr_val[idx];
+	mutex_unlock(&drvdata->mutex);
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_addr_start(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	unsigned long val;
+	uint8_t idx;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	idx = drvdata->addr_idx;
+	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+		mutex_unlock(&drvdata->mutex);
+		return -EPERM;
+	}
+
+	drvdata->addr_val[idx] = val;
+	drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
+	drvdata->startstop_ctrl |= (1 << idx);
+	drvdata->enable_ctrl1 |= BIT(25);
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(addr_start, S_IRUGO | S_IWUSR, etm_show_addr_start,
+		   etm_store_addr_start);
+
+static ssize_t etm_show_addr_stop(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	uint8_t idx;
+
+	mutex_lock(&drvdata->mutex);
+	idx = drvdata->addr_idx;
+	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+		mutex_unlock(&drvdata->mutex);
+		return -EPERM;
+	}
+
+	val = drvdata->addr_val[idx];
+	mutex_unlock(&drvdata->mutex);
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_addr_stop(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	unsigned long val;
+	uint8_t idx;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	idx = drvdata->addr_idx;
+	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+		mutex_unlock(&drvdata->mutex);
+		return -EPERM;
+	}
+
+	drvdata->addr_val[idx] = val;
+	drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
+	drvdata->startstop_ctrl |= (1 << (idx + 16));
+	drvdata->enable_ctrl1 |= BIT(25);
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(addr_stop, S_IRUGO | S_IWUSR, etm_show_addr_stop,
+		   etm_store_addr_stop);
+
+static ssize_t etm_show_addr_acctype(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+
+	mutex_lock(&drvdata->mutex);
+	val = drvdata->addr_acctype[drvdata->addr_idx];
+	mutex_unlock(&drvdata->mutex);
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_addr_acctype(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	drvdata->addr_acctype[drvdata->addr_idx] = val;
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(addr_acctype, S_IRUGO | S_IWUSR, etm_show_addr_acctype,
+		   etm_store_addr_acctype);
+
+static ssize_t etm_show_cntr_idx(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->addr_idx;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_cntr_idx(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+	if (val >= drvdata->nr_cntr)
+		return -EINVAL;
+
+	/* Use mutex to ensure index doesn't change while it gets dereferenced
+	 * multiple times within a mutex block elsewhere.
+	 */
+	mutex_lock(&drvdata->mutex);
+	drvdata->cntr_idx = val;
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(cntr_idx, S_IRUGO | S_IWUSR, etm_show_cntr_idx,
+		   etm_store_cntr_idx);
+
+static ssize_t etm_show_cntr_rld_val(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	mutex_lock(&drvdata->mutex);
+	val = drvdata->cntr_rld_val[drvdata->cntr_idx];
+	mutex_unlock(&drvdata->mutex);
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_cntr_rld_val(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(cntr_rld_val, S_IRUGO | S_IWUSR, etm_show_cntr_rld_val,
+		   etm_store_cntr_rld_val);
+
+static ssize_t etm_show_cntr_event(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+
+	mutex_lock(&drvdata->mutex);
+	val = drvdata->cntr_event[drvdata->cntr_idx];
+	mutex_unlock(&drvdata->mutex);
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_cntr_event(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(cntr_event, S_IRUGO | S_IWUSR, etm_show_cntr_event,
+		   etm_store_cntr_event);
+
+static ssize_t etm_show_cntr_rld_event(struct device *dev,
+				       struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+
+	mutex_lock(&drvdata->mutex);
+	val = drvdata->cntr_rld_event[drvdata->cntr_idx];
+	mutex_unlock(&drvdata->mutex);
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_cntr_rld_event(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(cntr_rld_event, S_IRUGO | S_IWUSR, etm_show_cntr_rld_event,
+		   etm_store_cntr_rld_event);
+
+static ssize_t etm_show_cntr_val(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+
+	mutex_lock(&drvdata->mutex);
+	val = drvdata->cntr_val[drvdata->cntr_idx];
+	mutex_unlock(&drvdata->mutex);
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_cntr_val(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	drvdata->cntr_val[drvdata->cntr_idx] = val;
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(cntr_val, S_IRUGO | S_IWUSR, etm_show_cntr_val,
+		   etm_store_cntr_val);
+
+static ssize_t etm_show_seq_12_event(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->seq_12_event;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_seq_12_event(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	drvdata->seq_12_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR(seq_12_event, S_IRUGO | S_IWUSR, etm_show_seq_12_event,
+		   etm_store_seq_12_event);
+
+static ssize_t etm_show_seq_21_event(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->seq_21_event;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_seq_21_event(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	drvdata->seq_21_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR(seq_21_event, S_IRUGO | S_IWUSR, etm_show_seq_21_event,
+		   etm_store_seq_21_event);
+
+static ssize_t etm_show_seq_23_event(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->seq_23_event;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_seq_23_event(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	drvdata->seq_23_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR(seq_23_event, S_IRUGO | S_IWUSR, etm_show_seq_23_event,
+		   etm_store_seq_23_event);
+
+static ssize_t etm_show_seq_31_event(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->seq_31_event;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_seq_31_event(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	drvdata->seq_31_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR(seq_31_event, S_IRUGO | S_IWUSR, etm_show_seq_31_event,
+		   etm_store_seq_31_event);
+
+static ssize_t etm_show_seq_32_event(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->seq_32_event;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_seq_32_event(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	drvdata->seq_32_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR(seq_32_event, S_IRUGO | S_IWUSR, etm_show_seq_32_event,
+		   etm_store_seq_32_event);
+
+static ssize_t etm_show_seq_13_event(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->seq_13_event;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_seq_13_event(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	drvdata->seq_13_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR(seq_13_event, S_IRUGO | S_IWUSR, etm_show_seq_13_event,
+		   etm_store_seq_13_event);
+
+static ssize_t etm_show_seq_curr_state(struct device *dev,
+				       struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->seq_curr_state;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_seq_curr_state(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+	if (val > ETM_SEQ_STATE_MAX_VAL)
+		return -EINVAL;
+
+	drvdata->seq_curr_state = val;
+	return size;
+}
+static DEVICE_ATTR(seq_curr_state, S_IRUGO | S_IWUSR, etm_show_seq_curr_state,
+		   etm_store_seq_curr_state);
+
+static ssize_t etm_show_ctxid_idx(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->ctxid_idx;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_ctxid_idx(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+	if (val >= drvdata->nr_ctxid_cmp)
+		return -EINVAL;
+
+	/* Use mutex to ensure index doesn't change while it gets dereferenced
+	 * multiple times within a mutex block elsewhere.
+	 */
+	mutex_lock(&drvdata->mutex);
+	drvdata->ctxid_idx = val;
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(ctxid_idx, S_IRUGO | S_IWUSR, etm_show_ctxid_idx,
+		   etm_store_ctxid_idx);
+
+static ssize_t etm_show_ctxid_val(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+
+	mutex_lock(&drvdata->mutex);
+	val = drvdata->ctxid_val[drvdata->ctxid_idx];
+	mutex_unlock(&drvdata->mutex);
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_ctxid_val(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	drvdata->ctxid_val[drvdata->ctxid_idx] = val;
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(ctxid_val, S_IRUGO | S_IWUSR, etm_show_ctxid_val,
+		   etm_store_ctxid_val);
+
+static ssize_t etm_show_ctxid_mask(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->ctxid_mask;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_ctxid_mask(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	drvdata->ctxid_mask = val;
+	return size;
+}
+static DEVICE_ATTR(ctxid_mask, S_IRUGO | S_IWUSR, etm_show_ctxid_mask,
+		   etm_store_ctxid_mask);
+
+static ssize_t etm_show_sync_freq(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->sync_freq;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_sync_freq(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	drvdata->sync_freq = val & ETM_SYNC_MASK;
+	return size;
+}
+static DEVICE_ATTR(sync_freq, S_IRUGO | S_IWUSR, etm_show_sync_freq,
+		   etm_store_sync_freq);
+
+static ssize_t etm_show_timestamp_event(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	unsigned long val = drvdata->timestamp_event;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_timestamp_event(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	drvdata->timestamp_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR(timestamp_event, S_IRUGO | S_IWUSR, etm_show_timestamp_event,
+		   etm_store_timestamp_event);
+
+static struct attribute *etm_attrs[] = {
+	&dev_attr_nr_addr_cmp.attr,
+	&dev_attr_nr_cntr.attr,
+	&dev_attr_nr_ctxid_cmp.attr,
+	&dev_attr_reset.attr,
+	&dev_attr_mode.attr,
+	&dev_attr_trigger_event.attr,
+	&dev_attr_enable_event.attr,
+	&dev_attr_fifofull_level.attr,
+	&dev_attr_addr_idx.attr,
+	&dev_attr_addr_single.attr,
+	&dev_attr_addr_range.attr,
+	&dev_attr_addr_start.attr,
+	&dev_attr_addr_stop.attr,
+	&dev_attr_addr_acctype.attr,
+	&dev_attr_cntr_idx.attr,
+	&dev_attr_cntr_rld_val.attr,
+	&dev_attr_cntr_event.attr,
+	&dev_attr_cntr_rld_event.attr,
+	&dev_attr_cntr_val.attr,
+	&dev_attr_seq_12_event.attr,
+	&dev_attr_seq_21_event.attr,
+	&dev_attr_seq_23_event.attr,
+	&dev_attr_seq_31_event.attr,
+	&dev_attr_seq_32_event.attr,
+	&dev_attr_seq_13_event.attr,
+	&dev_attr_seq_curr_state.attr,
+	&dev_attr_ctxid_idx.attr,
+	&dev_attr_ctxid_val.attr,
+	&dev_attr_ctxid_mask.attr,
+	&dev_attr_sync_freq.attr,
+	&dev_attr_timestamp_event.attr,
+	NULL,
+};
+
+static struct attribute_group etm_attr_grp = {
+	.attrs = etm_attrs,
+};
+
+static int __devinit etm_sysfs_init(void)
+{
+	int ret;
+
+	drvdata->kobj = kobject_create_and_add("etm", qdss_get_modulekobj());
+	if (!drvdata->kobj) {
+		dev_err(drvdata->dev, "failed to create ETM sysfs kobject\n");
+		ret = -ENOMEM;
+		goto err_create;
+	}
+
+	ret = sysfs_create_file(drvdata->kobj, &dev_attr_enabled.attr);
+	if (ret) {
+		dev_err(drvdata->dev, "failed to create ETM sysfs enabled"
+		" attribute\n");
+		goto err_file;
+	}
+
+	if (sysfs_create_group(drvdata->kobj, &etm_attr_grp))
+		dev_err(drvdata->dev, "failed to create ETM sysfs group\n");
+
+	return 0;
+err_file:
+	kobject_put(drvdata->kobj);
+err_create:
+	return ret;
+}
+
+static void __devexit etm_sysfs_exit(void)
+{
+	sysfs_remove_group(drvdata->kobj, &etm_attr_grp);
+	sysfs_remove_file(drvdata->kobj, &dev_attr_enabled.attr);
+	kobject_put(drvdata->kobj);
+}
+
+static bool __devinit etm_arch_supported(uint8_t arch)
+{
+	switch (arch) {
+	case PFT_ARCH_V1_1:
+		break;
+	default:
+		return false;
+	}
+	return true;
+}
+
+static int __devinit etm_init_arch_data(void)
+{
+	int ret;
+	/* use cpu 0 for setup */
+	int cpu = 0;
+	uint32_t etmidr;
+	uint32_t etmccr;
+
+	/* Unlock OS lock first to allow memory mapped reads and writes */
+	etm_os_unlock(NULL);
+	smp_call_function(etm_os_unlock, NULL, 1);
+	ETM_UNLOCK(cpu);
+	/* Vote for ETM power/clock enable */
+	etm_clr_pwrdwn(cpu);
+	/* Set prog bit. It will be set from reset but this is included to
+	 * ensure it is set
+	 */
+	etm_set_prog(cpu);
+
+	/* find all capabilities */
+	etmidr = etm_readl(drvdata, cpu, ETMIDR);
+	drvdata->arch = BMVAL(etmidr, 4, 11);
+	if (etm_arch_supported(drvdata->arch) == false) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	etmccr = etm_readl(drvdata, cpu, ETMCCR);
+	drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
+	drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
+	drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
+	drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
+	drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
+
+	/* Vote for ETM power/clock disable */
+	etm_set_pwrdwn(cpu);
+	ETM_LOCK(cpu);
+
+	return 0;
+err:
+	return ret;
+}
+
+static void __devinit etm_init_default_data(void)
+{
+	int i;
+
+	drvdata->trigger_event = 0x406F;
+	drvdata->enable_event = 0x6F;
+	drvdata->enable_ctrl1 = 0x1;
+	drvdata->fifofull_level	= 0x28;
+	if (drvdata->nr_addr_cmp >= 2) {
+		drvdata->addr_val[0] = (uint32_t) _stext;
+		drvdata->addr_val[1] = (uint32_t) _etext;
+		drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
+		drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
+	}
+	for (i = 0; i < drvdata->nr_cntr; i++) {
+		drvdata->cntr_event[i] = 0x406F;
+		drvdata->cntr_rld_event[i] = 0x406F;
+	}
+	drvdata->seq_12_event = 0x406F;
+	drvdata->seq_21_event = 0x406F;
+	drvdata->seq_23_event = 0x406F;
+	drvdata->seq_31_event = 0x406F;
+	drvdata->seq_32_event = 0x406F;
+	drvdata->seq_13_event = 0x406F;
+	drvdata->sync_freq = 0x80;
+	drvdata->timestamp_event = 0x406F;
+
+	/* Overrides for Krait pass1 */
+	if (cpu_is_krait_v1()) {
+		/* Krait pass1 doesn't support include filtering and non-cycle
+		 * accurate tracing
+		 */
+		drvdata->mode = (ETM_MODE_EXCLUDE | ETM_MODE_CYCACC);
+		drvdata->ctrl = 0x1000;
+		drvdata->enable_ctrl1 = 0x1000000;
+		for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+			drvdata->addr_val[i] = 0x0;
+			drvdata->addr_acctype[i] = 0x0;
+			drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
+		}
+	}
+}
+
+static int __devinit etm_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct resource *res;
+
+	drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata) {
+		ret = -ENOMEM;
+		goto err_kzalloc_drvdata;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -EINVAL;
+		goto err_res;
+	}
+
+	drvdata->base = ioremap_nocache(res->start, resource_size(res));
+	if (!drvdata->base) {
+		ret = -EINVAL;
+		goto err_ioremap;
+	}
+
+	drvdata->dev = &pdev->dev;
+
+	mutex_init(&drvdata->mutex);
+	wake_lock_init(&drvdata->wake_lock, WAKE_LOCK_SUSPEND, "msm_etm");
+	pm_qos_add_request(&drvdata->qos_req, PM_QOS_CPU_DMA_LATENCY,
+			   PM_QOS_DEFAULT_VALUE);
+	drvdata->src = qdss_get("msm_etm");
+	if (IS_ERR(drvdata->src)) {
+		ret = PTR_ERR(drvdata->src);
+		goto err_qdssget;
+	}
+
+	drvdata->clk = clk_get(drvdata->dev, "core_clk");
+	if (IS_ERR(drvdata->clk)) {
+		ret = PTR_ERR(drvdata->clk);
+		goto err_clk_get;
+	}
+
+	ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+	if (ret)
+		goto err_clk_rate;
+
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret)
+		goto err_clk_enable;
+
+	ret = etm_init_arch_data();
+	if (ret)
+		goto err_arch;
+
+	etm_init_default_data();
+
+	ret = etm_sysfs_init();
+	if (ret)
+		goto err_sysfs;
+
+	drvdata->enabled = false;
+
+	clk_disable_unprepare(drvdata->clk);
+
+	dev_info(drvdata->dev, "ETM initialized\n");
+
+	if (etm_boot_enable)
+		etm_enable();
+
+	return 0;
+
+err_sysfs:
+err_arch:
+	clk_disable_unprepare(drvdata->clk);
+err_clk_enable:
+err_clk_rate:
+	clk_put(drvdata->clk);
+err_clk_get:
+	qdss_put(drvdata->src);
+err_qdssget:
+	pm_qos_remove_request(&drvdata->qos_req);
+	wake_lock_destroy(&drvdata->wake_lock);
+	mutex_destroy(&drvdata->mutex);
+	iounmap(drvdata->base);
+err_ioremap:
+err_res:
+	kfree(drvdata);
+err_kzalloc_drvdata:
+	dev_err(drvdata->dev, "ETM init failed\n");
+	return ret;
+}
+
+static int __devexit etm_remove(struct platform_device *pdev)
+{
+	if (drvdata->enabled)
+		etm_disable();
+	etm_sysfs_exit();
+	clk_put(drvdata->clk);
+	qdss_put(drvdata->src);
+	pm_qos_remove_request(&drvdata->qos_req);
+	wake_lock_destroy(&drvdata->wake_lock);
+	mutex_destroy(&drvdata->mutex);
+	iounmap(drvdata->base);
+	kfree(drvdata);
+
+	return 0;
+}
+
+static struct of_device_id etm_match[] = {
+	{.compatible = "qcom,msm-etm"},
+	{}
+};
+
+static struct platform_driver etm_driver = {
+	.probe          = etm_probe,
+	.remove         = __devexit_p(etm_remove),
+	.driver         = {
+		.name   = "msm_etm",
+		.owner	= THIS_MODULE,
+		.of_match_table = etm_match,
+	},
+};
+
+int __init etm_init(void)
+{
+	return platform_driver_register(&etm_driver);
+}
+module_init(etm_init);
+
+void __exit etm_exit(void)
+{
+	platform_driver_unregister(&etm_driver);
+}
+module_exit(etm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");
diff --git a/drivers/coresight/coresight-funnel.c b/drivers/coresight/coresight-funnel.c
new file mode 100644
index 0000000..79a27f4
--- /dev/null
+++ b/drivers/coresight/coresight-funnel.c
@@ -0,0 +1,275 @@
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+
+#include "coresight-priv.h"
+
+#define funnel_writel(drvdata, id, val, off)	\
+			__raw_writel((val), drvdata->base + (SZ_4K * id) + off)
+#define funnel_readl(drvdata, id, off)		\
+			__raw_readl(drvdata->base + (SZ_4K * id) + off)
+
+#define FUNNEL_FUNCTL			(0x000)
+#define FUNNEL_PRICTL			(0x004)
+#define FUNNEL_ITATBDATA0		(0xEEC)
+#define FUNNEL_ITATBCTR2		(0xEF0)
+#define FUNNEL_ITATBCTR1		(0xEF4)
+#define FUNNEL_ITATBCTR0		(0xEF8)
+
+
+#define FUNNEL_LOCK(id)							\
+do {									\
+	mb();								\
+	funnel_writel(drvdata, id, 0x0, CORESIGHT_LAR);			\
+} while (0)
+#define FUNNEL_UNLOCK(id)						\
+do {									\
+	funnel_writel(drvdata, id, CORESIGHT_UNLOCK, CORESIGHT_LAR);	\
+	mb();								\
+} while (0)
+
+#define FUNNEL_HOLDTIME_MASK		(0xF00)
+#define FUNNEL_HOLDTIME_SHFT		(0x8)
+#define FUNNEL_HOLDTIME			(0x7 << FUNNEL_HOLDTIME_SHFT)
+
+struct funnel_drvdata {
+	void __iomem	*base;
+	bool		enabled;
+	struct mutex	mutex;
+	struct device	*dev;
+	struct kobject	*kobj;
+	struct clk	*clk;
+	uint32_t	priority;
+};
+
+static struct funnel_drvdata *drvdata;
+
+static void __funnel_enable(uint8_t id, uint32_t port_mask)
+{
+	uint32_t functl;
+
+	FUNNEL_UNLOCK(id);
+
+	functl = funnel_readl(drvdata, id, FUNNEL_FUNCTL);
+	functl &= ~FUNNEL_HOLDTIME_MASK;
+	functl |= FUNNEL_HOLDTIME;
+	functl |= port_mask;
+	funnel_writel(drvdata, id, functl, FUNNEL_FUNCTL);
+	funnel_writel(drvdata, id, drvdata->priority, FUNNEL_PRICTL);
+
+	FUNNEL_LOCK(id);
+}
+
+int funnel_enable(uint8_t id, uint32_t port_mask)
+{
+	int ret;
+
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret)
+		return ret;
+
+	mutex_lock(&drvdata->mutex);
+	__funnel_enable(id, port_mask);
+	drvdata->enabled = true;
+	dev_info(drvdata->dev, "FUNNEL port mask 0x%lx enabled\n",
+					(unsigned long) port_mask);
+	mutex_unlock(&drvdata->mutex);
+
+	return 0;
+}
+
+static void __funnel_disable(uint8_t id, uint32_t port_mask)
+{
+	uint32_t functl;
+
+	FUNNEL_UNLOCK(id);
+
+	functl = funnel_readl(drvdata, id, FUNNEL_FUNCTL);
+	functl &= ~port_mask;
+	funnel_writel(drvdata, id, functl, FUNNEL_FUNCTL);
+
+	FUNNEL_LOCK(id);
+}
+
+void funnel_disable(uint8_t id, uint32_t port_mask)
+{
+	mutex_lock(&drvdata->mutex);
+	__funnel_disable(id, port_mask);
+	drvdata->enabled = false;
+	dev_info(drvdata->dev, "FUNNEL port mask 0x%lx disabled\n",
+					(unsigned long) port_mask);
+	mutex_unlock(&drvdata->mutex);
+
+	clk_disable_unprepare(drvdata->clk);
+}
+
+static ssize_t funnel_show_priority(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->priority;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t funnel_store_priority(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t size)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	drvdata->priority = val;
+	return size;
+}
+static DEVICE_ATTR(priority, S_IRUGO | S_IWUSR, funnel_show_priority,
+		   funnel_store_priority);
+
+static int __devinit funnel_sysfs_init(void)
+{
+	int ret;
+
+	drvdata->kobj = kobject_create_and_add("funnel", qdss_get_modulekobj());
+	if (!drvdata->kobj) {
+		dev_err(drvdata->dev, "failed to create FUNNEL sysfs kobject\n");
+		ret = -ENOMEM;
+		goto err_create;
+	}
+
+	ret = sysfs_create_file(drvdata->kobj, &dev_attr_priority.attr);
+	if (ret) {
+		dev_err(drvdata->dev, "failed to create FUNNEL sysfs priority"
+		" attribute\n");
+		goto err_file;
+	}
+
+	return 0;
+err_file:
+	kobject_put(drvdata->kobj);
+err_create:
+	return ret;
+}
+
+static void __devexit funnel_sysfs_exit(void)
+{
+	sysfs_remove_file(drvdata->kobj, &dev_attr_priority.attr);
+	kobject_put(drvdata->kobj);
+}
+
+static int __devinit funnel_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct resource *res;
+
+	drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata) {
+		ret = -ENOMEM;
+		goto err_kzalloc_drvdata;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -EINVAL;
+		goto err_res;
+	}
+
+	drvdata->base = ioremap_nocache(res->start, resource_size(res));
+	if (!drvdata->base) {
+		ret = -EINVAL;
+		goto err_ioremap;
+	}
+
+	drvdata->dev = &pdev->dev;
+
+	mutex_init(&drvdata->mutex);
+
+	drvdata->clk = clk_get(drvdata->dev, "core_clk");
+	if (IS_ERR(drvdata->clk)) {
+		ret = PTR_ERR(drvdata->clk);
+		goto err_clk_get;
+	}
+
+	ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+	if (ret)
+		goto err_clk_rate;
+
+	funnel_sysfs_init();
+
+	dev_info(drvdata->dev, "FUNNEL initialized\n");
+	return 0;
+
+err_clk_rate:
+	clk_put(drvdata->clk);
+err_clk_get:
+	mutex_destroy(&drvdata->mutex);
+	iounmap(drvdata->base);
+err_ioremap:
+err_res:
+	kfree(drvdata);
+err_kzalloc_drvdata:
+	dev_err(drvdata->dev, "FUNNEL init failed\n");
+	return ret;
+}
+
+static int __devexit funnel_remove(struct platform_device *pdev)
+{
+	if (drvdata->enabled)
+		funnel_disable(0x0, 0xFF);
+	funnel_sysfs_exit();
+	clk_put(drvdata->clk);
+	mutex_destroy(&drvdata->mutex);
+	iounmap(drvdata->base);
+	kfree(drvdata);
+
+	return 0;
+}
+
+static struct of_device_id funnel_match[] = {
+	{.compatible = "qcom,msm-funnel"},
+	{}
+};
+
+static struct platform_driver funnel_driver = {
+	.probe          = funnel_probe,
+	.remove         = __devexit_p(funnel_remove),
+	.driver         = {
+		.name   = "msm_funnel",
+		.owner	= THIS_MODULE,
+		.of_match_table = funnel_match,
+	},
+};
+
+static int __init funnel_init(void)
+{
+	return platform_driver_register(&funnel_driver);
+}
+module_init(funnel_init);
+
+static void __exit funnel_exit(void)
+{
+	platform_driver_unregister(&funnel_driver);
+}
+module_exit(funnel_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Funnel driver");
diff --git a/drivers/coresight/coresight-priv.h b/drivers/coresight/coresight-priv.h
new file mode 100644
index 0000000..dab854c
--- /dev/null
+++ b/drivers/coresight/coresight-priv.h
@@ -0,0 +1,48 @@
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CORESIGHT_PRIV_H
+#define _CORESIGHT_PRIV_H
+
+#include <linux/bitops.h>
+
+/* Coresight management registers (0xF00-0xFCC)
+ * 0xFA0 - 0xFA4: Management	registers in PFTv1.0
+ *		  Trace		registers in PFTv1.1
+ */
+#define CORESIGHT_ITCTRL	(0xF00)
+#define CORESIGHT_CLAIMSET	(0xFA0)
+#define CORESIGHT_CLAIMCLR	(0xFA4)
+#define CORESIGHT_LAR		(0xFB0)
+#define CORESIGHT_LSR		(0xFB4)
+#define CORESIGHT_AUTHSTATUS	(0xFB8)
+#define CORESIGHT_DEVID		(0xFC8)
+#define CORESIGHT_DEVTYPE	(0xFCC)
+
+#define CORESIGHT_UNLOCK	(0xC5ACCE55)
+
+#define TIMEOUT_US		(100)
+
+#define BM(lsb, msb)		((BIT(msb) - BIT(lsb)) + BIT(msb))
+#define BMVAL(val, lsb, msb)	((val & BM(lsb, msb)) >> lsb)
+#define BVAL(val, n)		((val & BIT(n)) >> n)
+
+int etb_enable(void);
+void etb_disable(void);
+void etb_dump(void);
+void tpiu_disable(void);
+int funnel_enable(uint8_t id, uint32_t port_mask);
+void funnel_disable(uint8_t id, uint32_t port_mask);
+
+struct kobject *qdss_get_modulekobj(void);
+
+#endif
diff --git a/arch/arm/mach-msm/qdss-stm.c b/drivers/coresight/coresight-stm.c
similarity index 66%
rename from arch/arm/mach-msm/qdss-stm.c
rename to drivers/coresight/coresight-stm.c
index 9ce6318..6387947 100644
--- a/arch/arm/mach-msm/qdss-stm.c
+++ b/drivers/coresight/coresight-stm.c
@@ -22,15 +22,17 @@
 #include <linux/miscdevice.h>
 #include <linux/uaccess.h>
 #include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+#include <linux/coresight-stm.h>
 #include <asm/unaligned.h>
-#include <mach/stm.h>
 
-#include "qdss-priv.h"
+#include "coresight-priv.h"
 
-#define stm_writel(stm, val, off)	\
-			__raw_writel((val), stm.base + off)
-#define stm_readl(stm, val, off)	\
-			__raw_readl(stm.base + off)
+#define stm_writel(drvdata, val, off)	\
+			__raw_writel((val), drvdata->base + off)
+#define stm_readl(drvdata, val, off)	\
+			__raw_readl(drvdata->base + off)
 
 #define NR_STM_CHANNEL		(32)
 #define BYTES_PER_CHANNEL	(256)
@@ -51,17 +53,17 @@
 #define OST_VERSION		(0x1)
 
 #define stm_channel_addr(ch)						\
-				(stm.chs.base + (ch * BYTES_PER_CHANNEL))
+				(drvdata->chs.base + (ch * BYTES_PER_CHANNEL))
 #define stm_channel_off(type, opts)	(type & ~opts)
 
 #define STM_LOCK()							\
 do {									\
 	mb();								\
-	stm_writel(stm, 0x0, CS_LAR);					\
+	stm_writel(drvdata, 0x0, CORESIGHT_LAR);			\
 } while (0)
 #define STM_UNLOCK()							\
 do {									\
-	stm_writel(stm, CS_UNLOCK_MAGIC, CS_LAR);			\
+	stm_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR);		\
 	mb();								\
 } while (0)
 
@@ -91,29 +93,27 @@
 	unsigned long		*bitmap;
 };
 
-struct stm_ctx {
+struct stm_drvdata {
 	void __iomem		*base;
 	bool			enabled;
 	struct qdss_source	*src;
 	struct device		*dev;
 	struct kobject		*kobj;
+	struct clk		*clk;
 	uint32_t		entity;
 	struct channel_space	chs;
 };
 
-static struct stm_ctx stm = {
-	.entity		= OST_ENTITY_ALL,
-};
-
+static struct stm_drvdata *drvdata;
 
 static void __stm_enable(void)
 {
 	STM_UNLOCK();
 
-	stm_writel(stm, 0x80, STMSYNCR);
-	stm_writel(stm, 0xFFFFFFFF, STMSPTER);
-	stm_writel(stm, 0xFFFFFFFF, STMSPER);
-	stm_writel(stm, 0x30003, STMTCSR);
+	stm_writel(drvdata, 0x80, STMSYNCR);
+	stm_writel(drvdata, 0xFFFFFFFF, STMSPTER);
+	stm_writel(drvdata, 0xFFFFFFFF, STMSPER);
+	stm_writel(drvdata, 0x30003, STMTCSR);
 
 	STM_LOCK();
 }
@@ -122,23 +122,30 @@
 {
 	int ret;
 
-	if (stm.enabled) {
-		dev_err(stm.dev, "STM tracing already enabled\n");
+	if (drvdata->enabled) {
+		dev_err(drvdata->dev, "STM tracing already enabled\n");
 		ret = -EINVAL;
 		goto err;
 	}
 
-	ret = qdss_enable(stm.src);
+	ret = clk_prepare_enable(drvdata->clk);
 	if (ret)
-		goto err;
+		goto err_clk;
+
+	ret = qdss_enable(drvdata->src);
+	if (ret)
+		goto err_qdss;
 
 	__stm_enable();
 
-	stm.enabled = true;
+	drvdata->enabled = true;
 
-	dev_info(stm.dev, "STM tracing enabled\n");
+	dev_info(drvdata->dev, "STM tracing enabled\n");
 	return 0;
 
+err_qdss:
+	clk_disable_unprepare(drvdata->clk);
+err_clk:
 err:
 	return ret;
 }
@@ -147,9 +154,9 @@
 {
 	STM_UNLOCK();
 
-	stm_writel(stm, 0x30000, STMTCSR);
-	stm_writel(stm, 0x0, STMSPER);
-	stm_writel(stm, 0x0, STMSPTER);
+	stm_writel(drvdata, 0x30000, STMTCSR);
+	stm_writel(drvdata, 0x0, STMSPER);
+	stm_writel(drvdata, 0x0, STMSPTER);
 
 	STM_LOCK();
 }
@@ -158,19 +165,21 @@
 {
 	int ret;
 
-	if (!stm.enabled) {
-		dev_err(stm.dev, "STM tracing already disabled\n");
+	if (!drvdata->enabled) {
+		dev_err(drvdata->dev, "STM tracing already disabled\n");
 		ret = -EINVAL;
 		goto err;
 	}
 
 	__stm_disable();
 
-	qdss_disable(stm.src);
+	drvdata->enabled = false;
 
-	stm.enabled = false;
+	qdss_disable(drvdata->src);
 
-	dev_info(stm.dev, "STM tracing disabled\n");
+	clk_disable_unprepare(drvdata->clk);
+
+	dev_info(drvdata->dev, "STM tracing disabled\n");
 	return 0;
 
 err:
@@ -182,15 +191,17 @@
 	uint32_t ch;
 
 	do {
-		ch = find_next_zero_bit(stm.chs.bitmap,	NR_STM_CHANNEL, off);
-	} while ((ch < NR_STM_CHANNEL) && test_and_set_bit(ch, stm.chs.bitmap));
+		ch = find_next_zero_bit(drvdata->chs.bitmap,
+					NR_STM_CHANNEL, off);
+	} while ((ch < NR_STM_CHANNEL) &&
+		 test_and_set_bit(ch, drvdata->chs.bitmap));
 
 	return ch;
 }
 
 static void stm_channel_free(uint32_t ch)
 {
-	clear_bit(ch, stm.chs.bitmap);
+	clear_bit(ch, drvdata->chs.bitmap);
 }
 
 static int stm_send(void *addr, const void *data, uint32_t size)
@@ -331,10 +342,10 @@
  * number of bytes transfered over STM
  */
 int stm_trace(uint32_t options, uint8_t entity_id, uint8_t proto_id,
-	      const void *data, uint32_t size)
+			const void *data, uint32_t size)
 {
 	/* we don't support sizes more than 24bits (0 to 23) */
-	if (!(stm.enabled && (stm.entity & entity_id) &&
+	if (!(drvdata->enabled && (drvdata->entity & entity_id) &&
 	      (size < 0x1000000)))
 		return 0;
 
@@ -347,10 +358,10 @@
 {
 	char *buf;
 
-	if (!stm.enabled)
+	if (!drvdata->enabled)
 		return -EINVAL;
 
-	if (!(stm.entity & OST_ENTITY_DEV_NODE))
+	if (!(drvdata->entity & OST_ENTITY_DEV_NODE))
 		return size;
 
 	if (size > STM_TRACE_BUF_SIZE)
@@ -362,7 +373,7 @@
 
 	if (copy_from_user(buf, data, size)) {
 		kfree(buf);
-		dev_dbg(stm.dev, "%s: copy_from_user failed\n", __func__);
+		dev_dbg(drvdata->dev, "%s: copy_from_user failed\n", __func__);
 		return -EFAULT;
 	}
 
@@ -385,13 +396,16 @@
 	.fops		= &stm_fops,
 };
 
-#define STM_ATTR(__name)						\
-static struct kobj_attribute __name##_attr =				\
-	__ATTR(__name, S_IRUGO | S_IWUSR, __name##_show, __name##_store)
+static ssize_t stm_show_enabled(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	unsigned long val = drvdata->enabled;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
 
-static ssize_t enabled_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
+static ssize_t stm_store_enabled(struct device *dev,
+				 struct device_attribute *attr,
+				const char *buf, size_t size)
 {
 	int ret = 0;
 	unsigned long val;
@@ -406,70 +420,65 @@
 
 	if (ret)
 		return ret;
-	return n;
+	return size;
 }
-static ssize_t enabled_show(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			char *buf)
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, stm_show_enabled,
+		   stm_store_enabled);
+
+static ssize_t stm_show_entity(struct device *dev,
+			       struct device_attribute *attr, char *buf)
 {
-	unsigned long val = stm.enabled;
+	unsigned long val = drvdata->entity;
 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 }
-STM_ATTR(enabled);
 
-static ssize_t entity_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
+static ssize_t stm_store_entity(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
 {
 	unsigned long val;
 
 	if (sscanf(buf, "%lx", &val) != 1)
 		return -EINVAL;
 
-	stm.entity = val;
-	return n;
+	drvdata->entity = val;
+	return size;
 }
-static ssize_t entity_show(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			char *buf)
-{
-	unsigned long val = stm.entity;
-	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-STM_ATTR(entity);
+static DEVICE_ATTR(entity, S_IRUGO | S_IWUSR, stm_show_entity,
+		   stm_store_entity);
 
 static int __devinit stm_sysfs_init(void)
 {
 	int ret;
 
-	stm.kobj = kobject_create_and_add("stm", qdss_get_modulekobj());
-	if (!stm.kobj) {
-		dev_err(stm.dev, "failed to create STM sysfs kobject\n");
+	drvdata->kobj = kobject_create_and_add("stm", qdss_get_modulekobj());
+	if (!drvdata->kobj) {
+		dev_err(drvdata->dev, "failed to create STM sysfs kobject\n");
 		ret = -ENOMEM;
 		goto err_create;
 	}
 
-	ret = sysfs_create_file(stm.kobj, &enabled_attr.attr);
+	ret = sysfs_create_file(drvdata->kobj, &dev_attr_enabled.attr);
 	if (ret) {
-		dev_err(stm.dev, "failed to create STM sysfs enabled attr\n");
+		dev_err(drvdata->dev, "failed to create STM sysfs enabled attr\n");
 		goto err_file;
 	}
 
-	if (sysfs_create_file(stm.kobj, &entity_attr.attr))
-		dev_err(stm.dev, "failed to create STM sysfs entity attr\n");
+	if (sysfs_create_file(drvdata->kobj, &dev_attr_entity.attr))
+		dev_err(drvdata->dev, "failed to create STM sysfs entity attr\n");
 
 	return 0;
 err_file:
-	kobject_put(stm.kobj);
+	kobject_put(drvdata->kobj);
 err_create:
 	return ret;
 }
 
 static void __devexit stm_sysfs_exit(void)
 {
-	sysfs_remove_file(stm.kobj, &entity_attr.attr);
-	sysfs_remove_file(stm.kobj, &enabled_attr.attr);
-	kobject_put(stm.kobj);
+	sysfs_remove_file(drvdata->kobj, &dev_attr_entity.attr);
+	sysfs_remove_file(drvdata->kobj, &dev_attr_enabled.attr);
+	kobject_put(drvdata->kobj);
 }
 
 static int __devinit stm_probe(struct platform_device *pdev)
@@ -478,14 +487,20 @@
 	struct resource *res;
 	size_t res_size, bitmap_size;
 
+	drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata) {
+		ret = -ENOMEM;
+		goto err_kzalloc_drvdata;
+	}
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
 		ret = -EINVAL;
 		goto err_res0;
 	}
 
-	stm.base = ioremap_nocache(res->start, resource_size(res));
-	if (!stm.base) {
+	drvdata->base = ioremap_nocache(res->start, resource_size(res));
+	if (!drvdata->base) {
 		ret = -EINVAL;
 		goto err_ioremap0;
 	}
@@ -506,30 +521,42 @@
 		bitmap_size = NR_STM_CHANNEL * sizeof(long);
 	}
 
-	stm.chs.bitmap = kzalloc(bitmap_size, GFP_KERNEL);
-	if (!stm.chs.bitmap) {
+	drvdata->chs.bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	if (!drvdata->chs.bitmap) {
 		ret = -ENOMEM;
 		goto err_bitmap;
 	}
 
-	stm.chs.base = ioremap_nocache(res->start, res_size);
-	if (!stm.chs.base) {
+	drvdata->chs.base = ioremap_nocache(res->start, res_size);
+	if (!drvdata->chs.base) {
 		ret = -EINVAL;
 		goto err_ioremap1;
 	}
 
-	stm.dev = &pdev->dev;
+	drvdata->dev = &pdev->dev;
 
 	ret = misc_register(&stm_misc);
 	if (ret)
 		goto err_misc;
 
-	stm.src = qdss_get("msm_stm");
-	if (IS_ERR(stm.src)) {
-		ret = PTR_ERR(stm.src);
+	drvdata->src = qdss_get("msm_stm");
+	if (IS_ERR(drvdata->src)) {
+		ret = PTR_ERR(drvdata->src);
 		goto err_qdssget;
 	}
 
+	drvdata->clk = clk_get(drvdata->dev, "core_clk");
+	if (IS_ERR(drvdata->clk)) {
+		ret = PTR_ERR(drvdata->clk);
+		goto err_clk_get;
+	}
+
+	ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+	if (ret)
+		goto err_clk_rate;
+
+	drvdata->entity = OST_ENTITY_ALL;
+
 	ret = stm_sysfs_init();
 	if (ret)
 		goto err_sysfs;
@@ -537,45 +564,60 @@
 	if (stm_boot_enable)
 		stm_enable();
 
-	dev_info(stm.dev, "STM initialized\n");
+	dev_info(drvdata->dev, "STM initialized\n");
 	return 0;
 
 err_sysfs:
-	qdss_put(stm.src);
+err_clk_rate:
+	clk_put(drvdata->clk);
+err_clk_get:
+	qdss_put(drvdata->src);
 err_qdssget:
 	misc_deregister(&stm_misc);
 err_misc:
-	iounmap(stm.chs.base);
+	iounmap(drvdata->chs.base);
 err_ioremap1:
-	kfree(stm.chs.bitmap);
+	kfree(drvdata->chs.bitmap);
 err_bitmap:
 err_res1:
-	iounmap(stm.base);
+	iounmap(drvdata->base);
 err_ioremap0:
 err_res0:
-	dev_err(stm.dev, "STM init failed\n");
+	kfree(drvdata);
+err_kzalloc_drvdata:
+
+	dev_err(drvdata->dev, "STM init failed\n");
 	return ret;
 }
 
 static int __devexit stm_remove(struct platform_device *pdev)
 {
-	if (stm.enabled)
+	if (drvdata->enabled)
 		stm_disable();
 	stm_sysfs_exit();
-	qdss_put(stm.src);
+	clk_put(drvdata->clk);
+	qdss_put(drvdata->src);
 	misc_deregister(&stm_misc);
-	iounmap(stm.chs.base);
-	kfree(stm.chs.bitmap);
-	iounmap(stm.base);
+	iounmap(drvdata->chs.base);
+	kfree(drvdata->chs.bitmap);
+	iounmap(drvdata->base);
+	kfree(drvdata);
 
 	return 0;
 }
 
+static struct of_device_id stm_match[] = {
+	{.compatible = "qcom,msm-stm"},
+	{}
+};
+
 static struct platform_driver stm_driver = {
 	.probe          = stm_probe,
 	.remove         = __devexit_p(stm_remove),
 	.driver         = {
 		.name   = "msm_stm",
+		.owner	= THIS_MODULE,
+		.of_match_table = stm_match,
 	},
 };
 
diff --git a/arch/arm/mach-msm/qdss-tpiu.c b/drivers/coresight/coresight-tpiu.c
similarity index 63%
rename from arch/arm/mach-msm/qdss-tpiu.c
rename to drivers/coresight/coresight-tpiu.c
index fa15635..4b52c4d 100644
--- a/arch/arm/mach-msm/qdss-tpiu.c
+++ b/drivers/coresight/coresight-tpiu.c
@@ -17,11 +17,14 @@
 #include <linux/platform_device.h>
 #include <linux/io.h>
 #include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
 
-#include "qdss-priv.h"
+#include "coresight-priv.h"
 
-#define tpiu_writel(tpiu, val, off)	__raw_writel((val), tpiu.base + off)
-#define tpiu_readl(tpiu, off)		__raw_readl(tpiu.base + off)
+#define tpiu_writel(drvdata, val, off)	__raw_writel((val), drvdata->base + off)
+#define tpiu_readl(drvdata, off)	__raw_readl(drvdata->base + off)
 
 #define TPIU_SUPP_PORTSZ				(0x000)
 #define TPIU_CURR_PORTSZ				(0x004)
@@ -47,28 +50,29 @@
 #define TPIU_LOCK()							\
 do {									\
 	mb();								\
-	tpiu_writel(tpiu, 0x0, CS_LAR);					\
+	tpiu_writel(drvdata, 0x0, CORESIGHT_LAR);			\
 } while (0)
 #define TPIU_UNLOCK()							\
 do {									\
-	tpiu_writel(tpiu, CS_UNLOCK_MAGIC, CS_LAR);			\
+	tpiu_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR);		\
 	mb();								\
 } while (0)
 
-struct tpiu_ctx {
+struct tpiu_drvdata {
 	void __iomem	*base;
 	bool		enabled;
 	struct device	*dev;
+	struct clk	*clk;
 };
 
-static struct tpiu_ctx tpiu;
+static struct tpiu_drvdata *drvdata;
 
 static void __tpiu_disable(void)
 {
 	TPIU_UNLOCK();
 
-	tpiu_writel(tpiu, 0x3000, TPIU_FFCR);
-	tpiu_writel(tpiu, 0x3040, TPIU_FFCR);
+	tpiu_writel(drvdata, 0x3000, TPIU_FFCR);
+	tpiu_writel(drvdata, 0x3040, TPIU_FFCR);
 
 	TPIU_LOCK();
 }
@@ -76,8 +80,8 @@
 void tpiu_disable(void)
 {
 	__tpiu_disable();
-	tpiu.enabled = false;
-	dev_info(tpiu.dev, "TPIU disabled\n");
+	drvdata->enabled = false;
+	dev_info(drvdata->dev, "TPIU disabled\n");
 }
 
 static int __devinit tpiu_probe(struct platform_device *pdev)
@@ -85,43 +89,74 @@
 	int ret;
 	struct resource *res;
 
+	drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata) {
+		ret = -ENOMEM;
+		goto err_kzalloc_drvdata;
+	}
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
 		ret = -EINVAL;
 		goto err_res;
 	}
 
-	tpiu.base = ioremap_nocache(res->start, resource_size(res));
-	if (!tpiu.base) {
+	drvdata->base = ioremap_nocache(res->start, resource_size(res));
+	if (!drvdata->base) {
 		ret = -EINVAL;
 		goto err_ioremap;
 	}
 
-	tpiu.dev = &pdev->dev;
+	drvdata->dev = &pdev->dev;
 
-	dev_info(tpiu.dev, "TPIU initialized\n");
+	drvdata->clk = clk_get(drvdata->dev, "core_clk");
+	if (IS_ERR(drvdata->clk)) {
+		ret = PTR_ERR(drvdata->clk);
+		goto err_clk_get;
+	}
+
+	ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+	if (ret)
+		goto err_clk_rate;
+
+	dev_info(drvdata->dev, "TPIU initialized\n");
 	return 0;
 
+err_clk_rate:
+	clk_put(drvdata->clk);
+err_clk_get:
+	iounmap(drvdata->base);
 err_ioremap:
 err_res:
-	dev_err(tpiu.dev, "TPIU init failed\n");
+	kfree(drvdata);
+err_kzalloc_drvdata:
+	dev_err(drvdata->dev, "TPIU init failed\n");
 	return ret;
 }
 
 static int __devexit tpiu_remove(struct platform_device *pdev)
 {
-	if (tpiu.enabled)
+	if (drvdata->enabled)
 		tpiu_disable();
-	iounmap(tpiu.base);
+	clk_put(drvdata->clk);
+	iounmap(drvdata->base);
+	kfree(drvdata);
 
 	return 0;
 }
 
+static struct of_device_id tpiu_match[] = {
+	{.compatible = "qcom,msm-tpiu"},
+	{}
+};
+
 static struct platform_driver tpiu_driver = {
 	.probe          = tpiu_probe,
 	.remove         = __devexit_p(tpiu_remove),
 	.driver         = {
 		.name   = "msm_tpiu",
+		.owner	= THIS_MODULE,
+		.of_match_table = tpiu_match,
 	},
 };
 
diff --git a/drivers/coresight/coresight.c b/drivers/coresight/coresight.c
new file mode 100644
index 0000000..055ef55
--- /dev/null
+++ b/drivers/coresight/coresight.c
@@ -0,0 +1,606 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+
+#include "coresight-priv.h"
+
+#define MAX_STR_LEN	(65535)
+
+
+static LIST_HEAD(coresight_orph_conns);
+static DEFINE_MUTEX(coresight_conns_mutex);
+static LIST_HEAD(coresight_devs);
+static DEFINE_MUTEX(coresight_devs_mutex);
+
+
+int coresight_enable(struct coresight_device *csdev, int port)
+{
+	int i;
+	int ret;
+	struct coresight_connection *conn;
+
+	mutex_lock(&csdev->mutex);
+	if (csdev->refcnt[port] == 0) {
+		for (i = 0; i < csdev->nr_conns; i++) {
+			conn = &csdev->conns[i];
+			ret = coresight_enable(conn->child_dev,
+					       conn->child_port);
+			if (ret)
+				goto err_enable_child;
+		}
+		if (csdev->ops->enable)
+			ret = csdev->ops->enable(csdev, port);
+		if (ret)
+			goto err_enable;
+	}
+	csdev->refcnt[port]++;
+	mutex_unlock(&csdev->mutex);
+	return 0;
+err_enable_child:
+	while (i) {
+		conn = &csdev->conns[--i];
+		coresight_disable(conn->child_dev, conn->child_port);
+	}
+err_enable:
+	mutex_unlock(&csdev->mutex);
+	return ret;
+}
+EXPORT_SYMBOL(coresight_enable);
+
+void coresight_disable(struct coresight_device *csdev, int port)
+{
+	int i;
+	struct coresight_connection *conn;
+
+	mutex_lock(&csdev->mutex);
+	if (csdev->refcnt[port] == 1) {
+		if (csdev->ops->disable)
+			csdev->ops->disable(csdev, port);
+		for (i = 0; i < csdev->nr_conns; i++) {
+			conn = &csdev->conns[i];
+			coresight_disable(conn->child_dev, conn->child_port);
+		}
+	}
+	csdev->refcnt[port]--;
+	mutex_unlock(&csdev->mutex);
+}
+EXPORT_SYMBOL(coresight_disable);
+
+static ssize_t coresight_show_type(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", dev->type->name);
+}
+
+static struct device_attribute coresight_dev_attrs[] = {
+	__ATTR(type, S_IRUGO, coresight_show_type, NULL),
+	{ },
+};
+
+struct bus_type coresight_bus_type = {
+	.name		= "coresight",
+	.dev_attrs	= coresight_dev_attrs,
+};
+
+static ssize_t coresight_show_enable(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct coresight_device *csdev = to_coresight_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->enable);
+}
+
+static ssize_t coresight_store_enable(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	int ret = 0;
+	unsigned long val;
+	struct coresight_device *csdev = to_coresight_device(dev);
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	if (val)
+		ret = coresight_enable(csdev, 0);
+	else
+		coresight_disable(csdev, 0);
+
+	if (ret)
+		return ret;
+	return size;
+}
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, coresight_show_enable,
+		   coresight_store_enable);
+
+static struct attribute *coresight_attrs[] = {
+	&dev_attr_enable.attr,
+	NULL,
+};
+
+static struct attribute_group coresight_attr_grp = {
+	.attrs = coresight_attrs,
+};
+
+static const struct attribute_group *coresight_attr_grps[] = {
+	&coresight_attr_grp,
+	NULL,
+};
+
+static struct device_type coresight_dev_type[CORESIGHT_DEV_TYPE_MAX] = {
+	{
+		.name = "source",
+		.groups = coresight_attr_grps,
+	},
+	{
+		.name = "link",
+	},
+	{
+		.name = "sink",
+		.groups = coresight_attr_grps,
+	},
+};
+
+static void coresight_device_release(struct device *dev)
+{
+	struct coresight_device *csdev = to_coresight_device(dev);
+	mutex_destroy(&csdev->mutex);
+	kfree(csdev);
+}
+
+static void coresight_fixup_orphan_conns(struct coresight_device *csdev)
+{
+	struct coresight_connection *conn, *temp;
+
+	mutex_lock(&coresight_conns_mutex);
+	list_for_each_entry_safe(conn, temp, &coresight_orph_conns, link) {
+		if (conn->child_id == csdev->id) {
+			conn->child_dev = csdev;
+			list_del(&conn->link);
+		}
+	}
+	mutex_unlock(&coresight_conns_mutex);
+}
+
+static void coresight_fixup_device_conns(struct coresight_device *csdev)
+{
+	int i;
+	struct coresight_device *cd;
+	bool found;
+
+	for (i = 0; i < csdev->nr_conns; i++) {
+		found = false;
+		mutex_lock(&coresight_devs_mutex);
+		list_for_each_entry(cd, &coresight_devs, link) {
+			if (csdev->conns[i].child_id == cd->id) {
+				csdev->conns[i].child_dev = cd;
+				found = true;
+				break;
+			}
+		}
+		mutex_unlock(&coresight_devs_mutex);
+		if (!found) {
+			mutex_lock(&coresight_conns_mutex);
+			list_add_tail(&csdev->conns[i].link,
+				      &coresight_orph_conns);
+			mutex_unlock(&coresight_conns_mutex);
+		}
+	}
+}
+
+struct coresight_device *coresight_register(struct coresight_desc *desc)
+{
+	int i;
+	int ret;
+	int *refcnt;
+	struct coresight_device *csdev;
+	struct coresight_connection *conns;
+
+	csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
+	if (!csdev) {
+		ret = -ENOMEM;
+		goto err_kzalloc_csdev;
+	}
+
+	mutex_init(&csdev->mutex);
+	csdev->id = desc->pdata->id;
+
+	refcnt = kzalloc(sizeof(*refcnt) * desc->pdata->nr_ports, GFP_KERNEL);
+	if (!refcnt) {
+		ret = -ENOMEM;
+		goto err_kzalloc_refcnt;
+	}
+	csdev->refcnt = refcnt;
+
+	csdev->nr_conns = desc->pdata->nr_children;
+	conns = kzalloc(sizeof(*conns) * csdev->nr_conns, GFP_KERNEL);
+	if (!conns) {
+		ret = -ENOMEM;
+		goto err_kzalloc_conns;
+	}
+	for (i = 0; i < csdev->nr_conns; i++) {
+		conns[i].child_id = desc->pdata->child_ids[i];
+		conns[i].child_port = desc->pdata->child_ports[i];
+	}
+	csdev->conns = conns;
+
+	csdev->ops = desc->ops;
+	csdev->owner = desc->owner;
+
+	csdev->dev.type = &coresight_dev_type[desc->type];
+	csdev->dev.groups = desc->groups;
+	csdev->dev.parent = desc->dev;
+	csdev->dev.bus = &coresight_bus_type;
+	csdev->dev.release = coresight_device_release;
+	dev_set_name(&csdev->dev, "%s", desc->pdata->name);
+
+	coresight_fixup_device_conns(csdev);
+	ret = device_register(&csdev->dev);
+	if (ret)
+		goto err_dev_reg;
+	coresight_fixup_orphan_conns(csdev);
+
+	mutex_lock(&coresight_devs_mutex);
+	list_add_tail(&csdev->link, &coresight_devs);
+	mutex_unlock(&coresight_devs_mutex);
+
+	return csdev;
+err_dev_reg:
+	put_device(&csdev->dev);
+	kfree(conns);
+err_kzalloc_conns:
+	kfree(refcnt);
+err_kzalloc_refcnt:
+	mutex_destroy(&csdev->mutex);
+	kfree(csdev);
+err_kzalloc_csdev:
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(coresight_register);
+
+void coresight_unregister(struct coresight_device *csdev)
+{
+	if (IS_ERR_OR_NULL(csdev))
+		return;
+
+	if (get_device(&csdev->dev)) {
+		mutex_lock(&csdev->mutex);
+		device_unregister(&csdev->dev);
+		mutex_unlock(&csdev->mutex);
+		put_device(&csdev->dev);
+	}
+}
+EXPORT_SYMBOL(coresight_unregister);
+
+static int __init coresight_init(void)
+{
+	return bus_register(&coresight_bus_type);
+}
+subsys_initcall(coresight_init);
+
+static void __exit coresight_exit(void)
+{
+	bus_unregister(&coresight_bus_type);
+}
+module_exit(coresight_exit);
+
+MODULE_LICENSE("GPL v2");
+/*
+ * Exclusion rules for structure fields.
+ *
+ * S: qdss.sources_mutex protected.
+ * I: qdss.sink_mutex protected.
+ * C: qdss.clk_mutex protected.
+ */
+struct qdss_ctx {
+	struct kobject			*modulekobj;
+	uint8_t				afamily;
+	struct list_head		sources;	/* S: sources list */
+	struct mutex			sources_mutex;
+	uint8_t				sink_count;	/* I: sink count */
+	struct mutex			sink_mutex;
+	uint8_t				max_clk;
+	struct clk			*clk;
+};
+
+static struct qdss_ctx qdss;
+
+/**
+ * qdss_get - get the qdss source handle
+ * @name: name of the qdss source
+ *
+ * Searches the sources list to get the qdss source handle for this source.
+ *
+ * CONTEXT:
+ * Typically called from init or probe functions
+ *
+ * RETURNS:
+ * pointer to struct qdss_source on success, %NULL on failure
+ */
+struct qdss_source *qdss_get(const char *name)
+{
+	struct qdss_source *src, *source = NULL;
+
+	mutex_lock(&qdss.sources_mutex);
+	list_for_each_entry(src, &qdss.sources, link) {
+		if (src->name) {
+			if (strncmp(src->name, name, MAX_STR_LEN))
+				continue;
+			source = src;
+			break;
+		}
+	}
+	mutex_unlock(&qdss.sources_mutex);
+
+	return source ? source : ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(qdss_get);
+
+/**
+ * qdss_put - release the qdss source handle
+ * @name: name of the qdss source
+ *
+ * CONTEXT:
+ * Typically called from driver remove or exit functions
+ */
+void qdss_put(struct qdss_source *src)
+{
+}
+EXPORT_SYMBOL(qdss_put);
+
+/**
+ * qdss_enable - enable qdss for the source
+ * @src: handle for the source making the call
+ *
+ * Enables qdss block (relevant funnel ports and sink) if not already
+ * enabled, otherwise increments the reference count
+ *
+ * CONTEXT:
+ * Might sleep. Uses a mutex lock. Should be called from a non-atomic context.
+ *
+ * RETURNS:
+ * 0 on success, non-zero on failure
+ */
+int qdss_enable(struct qdss_source *src)
+{
+	if (!src)
+		return -EINVAL;
+
+	if (qdss.afamily) {
+		mutex_lock(&qdss.sink_mutex);
+		if (qdss.sink_count == 0) {
+			tpiu_disable();
+			/* enable ETB first to avoid losing any trace data */
+			etb_enable();
+		}
+		qdss.sink_count++;
+		mutex_unlock(&qdss.sink_mutex);
+	}
+
+	funnel_enable(0x0, src->fport_mask);
+	return 0;
+}
+EXPORT_SYMBOL(qdss_enable);
+
+/**
+ * qdss_disable - disable qdss for the source
+ * @src: handle for the source making the call
+ *
+ * Disables qdss block (relevant funnel ports and sink) if the reference count
+ * is one, otherwise decrements the reference count
+ *
+ * CONTEXT:
+ * Might sleep. Uses a mutex lock. Should be called from a non-atomic context.
+ */
+void qdss_disable(struct qdss_source *src)
+{
+	if (!src)
+		return;
+
+	if (qdss.afamily) {
+		mutex_lock(&qdss.sink_mutex);
+		if (WARN(qdss.sink_count == 0, "qdss is unbalanced\n"))
+			goto out;
+		if (qdss.sink_count == 1) {
+			etb_dump();
+			etb_disable();
+		}
+		qdss.sink_count--;
+		mutex_unlock(&qdss.sink_mutex);
+	}
+
+	funnel_disable(0x0, src->fport_mask);
+	return;
+out:
+	mutex_unlock(&qdss.sink_mutex);
+}
+EXPORT_SYMBOL(qdss_disable);
+
+/**
+ * qdss_disable_sink - force disable the current qdss sink(s)
+ *
+ * Force disable the current qdss sink(s) to stop the sink from accepting any
+ * trace generated subsequent to this call. This function should only be used
+ * as a way to stop the sink from getting polluted with trace data that is
+ * uninteresting after an event of interest has occured.
+ *
+ * CONTEXT:
+ * Can be called from atomic or non-atomic context.
+ */
+void qdss_disable_sink(void)
+{
+	if (qdss.afamily) {
+		etb_dump();
+		etb_disable();
+	}
+}
+EXPORT_SYMBOL(qdss_disable_sink);
+
+struct kobject *qdss_get_modulekobj(void)
+{
+	return qdss.modulekobj;
+}
+
+#define QDSS_ATTR(name)						\
+static struct kobj_attribute name##_attr =				\
+		__ATTR(name, S_IRUGO | S_IWUSR, name##_show, name##_store)
+
+static ssize_t max_clk_store(struct kobject *kobj,
+			struct kobj_attribute *attr,
+			const char *buf, size_t n)
+{
+	unsigned long val;
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	qdss.max_clk = val;
+	return n;
+}
+static ssize_t max_clk_show(struct kobject *kobj,
+			struct kobj_attribute *attr,
+			char *buf)
+{
+	unsigned long val = qdss.max_clk;
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+QDSS_ATTR(max_clk);
+
+static void __devinit qdss_add_sources(struct qdss_source *srcs, size_t num)
+{
+	mutex_lock(&qdss.sources_mutex);
+	while (num--) {
+		list_add_tail(&srcs->link, &qdss.sources);
+		srcs++;
+	}
+	mutex_unlock(&qdss.sources_mutex);
+}
+
+static int __init qdss_sysfs_init(void)
+{
+	int ret;
+
+	qdss.modulekobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!qdss.modulekobj) {
+		pr_err("failed to find QDSS sysfs module kobject\n");
+		ret = -ENOENT;
+		goto err;
+	}
+
+	ret = sysfs_create_file(qdss.modulekobj, &max_clk_attr.attr);
+	if (ret) {
+		pr_err("failed to create QDSS sysfs max_clk attribute\n");
+		goto err;
+	}
+
+	return 0;
+err:
+	return ret;
+}
+
+static void __devexit qdss_sysfs_exit(void)
+{
+	sysfs_remove_file(qdss.modulekobj, &max_clk_attr.attr);
+}
+
+static int __devinit qdss_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct msm_qdss_platform_data *pdata;
+
+	mutex_init(&qdss.sources_mutex);
+	mutex_init(&qdss.sink_mutex);
+
+	INIT_LIST_HEAD(&qdss.sources);
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata)
+		goto err_pdata;
+
+	qdss.afamily = pdata->afamily;
+	qdss_add_sources(pdata->src_table, pdata->size);
+
+	pr_info("QDSS arch initialized\n");
+	return 0;
+err_pdata:
+	mutex_destroy(&qdss.sink_mutex);
+	mutex_destroy(&qdss.sources_mutex);
+	pr_err("QDSS init failed\n");
+	return ret;
+}
+
+static int __devexit qdss_remove(struct platform_device *pdev)
+{
+	qdss_sysfs_exit();
+	mutex_destroy(&qdss.sink_mutex);
+	mutex_destroy(&qdss.sources_mutex);
+
+	return 0;
+}
+
+static struct of_device_id qdss_match[] = {
+	{.compatible = "qcom,msm-qdss"},
+	{}
+};
+
+static struct platform_driver qdss_driver = {
+	.probe          = qdss_probe,
+	.remove         = __devexit_p(qdss_remove),
+	.driver         = {
+		.name   = "msm_qdss",
+		.owner	= THIS_MODULE,
+		.of_match_table = qdss_match,
+	},
+};
+
+static int __init qdss_init(void)
+{
+	return platform_driver_register(&qdss_driver);
+}
+arch_initcall(qdss_init);
+
+static int __init qdss_module_init(void)
+{
+	int ret;
+
+	ret = qdss_sysfs_init();
+	if (ret)
+		goto err_sysfs;
+
+	pr_info("QDSS module initialized\n");
+	return 0;
+err_sysfs:
+	return ret;
+}
+module_init(qdss_module_init);
+
+static void __exit qdss_exit(void)
+{
+	platform_driver_unregister(&qdss_driver);
+}
+module_exit(qdss_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Debug SubSystem Driver");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 0dcf1a4..03fdc5a 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -555,20 +555,24 @@
 	  This option enables support for on-chip GPIO found on Qualcomm PM8xxx
 	  PMICs through RPC.
 
-config GPIO_QPNP
-	depends on ARCH_MSMCOPPER
+config GPIO_QPNP_PIN
+	depends on ARCH_MSM8974
+	depends on SPMI
 	depends on OF_SPMI
 	depends on MSM_QPNP_INT
-	tristate "Qualcomm QPNP GPIO support"
+	tristate "Qualcomm QPNP gpio support"
 	help
 	  Say 'y' here to include support for the Qualcomm QPNP gpio
-	  support. QPNP is a SPMI based PMIC implementation.
+	  driver. This driver supports Device Tree and allows a
+	  device_node to be registered as a gpio-controller. It
+	  does not handle gpio interrupts directly. That work is handled
+	  by CONFIG_MSM_QPNP_INT.
 
-config GPIO_QPNP_DEBUG
-	depends on GPIO_QPNP
+config GPIO_QPNP_PIN_DEBUG
+	depends on GPIO_QPNP_PIN
 	depends on DEBUG_FS
 	bool "Qualcomm QPNP GPIO debug support"
 	help
 	  Say 'y' here to include debug support for the Qualcomm
-	  QPNP gpio support
+	  QPNP gpio driver.
 endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index d15b628..405e498 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -48,7 +48,7 @@
 obj-$(CONFIG_GPIO_PM8XXX_MPP) 	+= pm8xxx-mpp.o
 obj-$(CONFIG_GPIO_PM8XXX_RPC)	+= gpio-pm8xxx-rpc.o
 obj-$(CONFIG_GPIO_PXA)		+= gpio-pxa.o
-obj-$(CONFIG_GPIO_QPNP)		+= qpnp-gpio.o
+obj-$(CONFIG_GPIO_QPNP_PIN)	+= qpnp-pin.o
 obj-$(CONFIG_GPIO_RDC321X)	+= gpio-rdc321x.o
 obj-$(CONFIG_PLAT_SAMSUNG)	+= gpio-samsung.o
 obj-$(CONFIG_ARCH_SA1100)	+= gpio-sa1100.o
diff --git a/drivers/gpio/gpio-msm-common.c b/drivers/gpio/gpio-msm-common.c
index 9a9a783..5539950 100644
--- a/drivers/gpio/gpio-msm-common.c
+++ b/drivers/gpio/gpio-msm-common.c
@@ -100,7 +100,7 @@
 	DECLARE_BITMAP(enabled_irqs, NR_MSM_GPIOS);
 	DECLARE_BITMAP(wake_irqs, NR_MSM_GPIOS);
 	DECLARE_BITMAP(dual_edge_irqs, NR_MSM_GPIOS);
-	struct irq_domain domain;
+	struct irq_domain *domain;
 };
 
 static DEFINE_SPINLOCK(tlmm_lock);
@@ -152,15 +152,14 @@
 static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
 {
 	struct msm_gpio_dev *g_dev = to_msm_gpio_dev(chip);
-	struct irq_domain *domain = &g_dev->domain;
-	return domain->irq_base + (offset - chip->base);
+	struct irq_domain *domain = g_dev->domain;
+	return irq_linear_revmap(domain, offset - chip->base);
 }
 
 static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq)
 {
-	struct msm_gpio_dev *g_dev = to_msm_gpio_dev(chip);
-	struct irq_domain *domain = &g_dev->domain;
-	return irq - domain->irq_base;
+	struct irq_data *irq_data = irq_get_irq_data(irq);
+	return irq_data->hwirq;
 }
 #else
 static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -391,6 +390,7 @@
  */
 static struct lock_class_key msm_gpio_lock_class;
 
+/* TODO: This should be a real platform_driver */
 static int __devinit msm_gpio_probe(void)
 {
 	int i, irq, ret;
@@ -573,12 +573,12 @@
 EXPORT_SYMBOL(msm_gpio_install_direct_irq);
 
 #ifdef CONFIG_OF
-static int msm_gpio_domain_dt_translate(struct irq_domain *d,
-					struct device_node *controller,
-					const u32 *intspec,
-					unsigned int intsize,
-					unsigned long *out_hwirq,
-					unsigned int *out_type)
+static int msm_gpio_irq_domain_xlate(struct irq_domain *d,
+				     struct device_node *controller,
+				     const u32 *intspec,
+				     unsigned int intsize,
+				     unsigned long *out_hwirq,
+				     unsigned int *out_type)
 {
 	if (d->of_node != controller)
 		return -EINVAL;
@@ -593,32 +593,32 @@
 	return 0;
 }
 
+/*
+ * TODO: this really should be doing all the things that msm_gpio_probe() does,
+ * but since the msm_gpio_probe is called unconditionally for DT and non-DT
+ * configs, we can't duplicate it here. This should be fixed.
+ */
+int msm_gpio_irq_domain_map(struct irq_domain *d, unsigned int irq,
+			  irq_hw_number_t hwirq)
+{
+	return 0;
+}
+
 static struct irq_domain_ops msm_gpio_irq_domain_ops = {
-	.dt_translate = msm_gpio_domain_dt_translate,
+	.xlate = msm_gpio_irq_domain_xlate,
+	.map = msm_gpio_irq_domain_map,
 };
 
 int __init msm_gpio_of_init(struct device_node *node,
 			    struct device_node *parent)
 {
-	struct irq_domain *domain = &msm_gpio.domain;
-	int start;
-
-	start = irq_domain_find_free_range(0, NR_MSM_GPIOS);
-	domain->irq_base = irq_alloc_descs(start, 0, NR_MSM_GPIOS,
-							numa_node_id());
-	if (IS_ERR_VALUE(domain->irq_base)) {
-		WARN(1, "Cannot allocate irq_descs @ IRQ%d\n", start);
-		return domain->irq_base;
+	msm_gpio.domain = irq_domain_add_linear(node, NR_MSM_GPIOS,
+			&msm_gpio_irq_domain_ops, &msm_gpio);
+	if (!msm_gpio.domain) {
+		WARN(1, "Cannot allocate irq_domain\n");
+		return -ENOMEM;
 	}
 
-	domain->nr_irq = NR_MSM_GPIOS;
-	domain->of_node = of_node_get(node);
-	domain->priv = &msm_gpio;
-	domain->ops = &msm_gpio_irq_domain_ops;
-	irq_domain_add(domain);
-	msm_gpio.gpio_chip.of_node = of_node_get(node);
-	pr_debug("%s: irq_base = %u\n", __func__, domain->irq_base);
-
 	return 0;
 }
 #endif
diff --git a/drivers/gpio/qpnp-gpio.c b/drivers/gpio/qpnp-gpio.c
deleted file mode 100644
index 97859e5..0000000
--- a/drivers/gpio/qpnp-gpio.c
+++ /dev/null
@@ -1,1091 +0,0 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
-#include <linux/interrupt.h>
-#include <linux/types.h>
-#include <linux/spmi.h>
-#include <linux/platform_device.h>
-#include <linux/debugfs.h>
-#include <linux/gpio.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/of_irq.h>
-#include <linux/export.h>
-#include <linux/module.h>
-#include <linux/qpnp/gpio.h>
-#include <linux/export.h>
-
-#include <mach/qpnp.h>
-
-#define Q_REG_ADDR(q_spec, reg_index)	\
-		((q_spec)->offset + reg_index)
-
-#define Q_REG_STATUS1			0x8
-#define Q_NUM_CTL_REGS			7
-
-/* type registers base address offsets */
-#define Q_REG_TYPE			0x10
-#define Q_REG_SUBTYPE			0x11
-
-/* gpio peripheral type and subtype values */
-#define Q_GPIO_TYPE			0x10
-#define Q_GPIO_SUBTYPE_GPIO_4CH		0x1
-#define Q_GPIO_SUBTYPE_GPIOC_4CH	0x5
-#define Q_GPIO_SUBTYPE_GPIO_8CH		0x9
-#define Q_GPIO_SUBTYPE_GPIOC_8CH	0xD
-
-/* control register base address offsets */
-#define Q_REG_MODE_CTL			0x40
-#define Q_REG_DIG_PULL_CTL		0x42
-#define Q_REG_DIG_IN_CTL		0x43
-#define Q_REG_DIG_VIN_CTL		0x44
-#define Q_REG_DIG_OUT_CTL		0x45
-#define Q_REG_EN_CTL			0x46
-
-/* control register regs array indices */
-#define Q_REG_I_MODE_CTL		0
-#define Q_REG_I_DIG_PULL_CTL		2
-#define Q_REG_I_DIG_IN_CTL		3
-#define Q_REG_I_DIG_VIN_CTL		4
-#define Q_REG_I_DIG_OUT_CTL		5
-#define Q_REG_I_EN_CTL			6
-
-/* control reg: mode */
-#define Q_REG_OUT_INVERT_SHIFT		0
-#define Q_REG_OUT_INVERT_MASK		0x1
-#define Q_REG_SRC_SEL_SHIFT		1
-#define Q_REG_SRC_SEL_MASK		0xE
-#define Q_REG_MODE_SEL_SHIFT		4
-#define Q_REG_MODE_SEL_MASK		0x70
-
-/* control reg: dig_vin */
-#define Q_REG_VIN_SHIFT			0
-#define Q_REG_VIN_MASK			0x7
-
-/* control reg: dig_pull */
-#define Q_REG_PULL_SHIFT		0
-#define Q_REG_PULL_MASK			0x7
-
-/* control reg: dig_out */
-#define Q_REG_OUT_STRENGTH_SHIFT	0
-#define Q_REG_OUT_STRENGTH_MASK		0x3
-#define Q_REG_OUT_TYPE_SHIFT		4
-#define Q_REG_OUT_TYPE_MASK		0x30
-
-/* control reg: en */
-#define Q_REG_MASTER_EN_SHIFT		7
-#define Q_REG_MASTER_EN_MASK		0x80
-
-enum qpnp_gpio_param_type {
-	Q_GPIO_CFG_DIRECTION,
-	Q_GPIO_CFG_OUTPUT_TYPE,
-	Q_GPIO_CFG_INVERT,
-	Q_GPIO_CFG_PULL,
-	Q_GPIO_CFG_VIN_SEL,
-	Q_GPIO_CFG_OUT_STRENGTH,
-	Q_GPIO_CFG_SRC_SELECT,
-	Q_GPIO_CFG_MASTER_EN,
-	Q_GPIO_CFG_INVALID,
-};
-
-#define Q_NUM_PARAMS			Q_GPIO_CFG_INVALID
-
-/* param error checking */
-#define QPNP_GPIO_DIR_INVALID		3
-#define QPNP_GPIO_INVERT_INVALID	2
-#define QPNP_GPIO_OUT_BUF_INVALID	3
-#define QPNP_GPIO_VIN_INVALID		8
-#define QPNP_GPIO_PULL_INVALID		6
-#define QPNP_GPIO_OUT_STRENGTH_INVALID	4
-#define QPNP_GPIO_SRC_INVALID		8
-#define QPNP_GPIO_MASTER_INVALID	2
-
-struct qpnp_gpio_spec {
-	uint8_t slave;			/* 0-15 */
-	uint16_t offset;		/* 0-255 */
-	uint32_t gpio_chip_idx;		/* offset from gpio_chip base */
-	uint32_t pmic_gpio;		/* PMIC gpio number */
-	int irq;			/* logical IRQ number */
-	u8 regs[Q_NUM_CTL_REGS];	/* Control regs */
-	u8 type;			/* peripheral type */
-	u8 subtype;			/* peripheral subtype */
-	struct device_node *node;
-	enum qpnp_gpio_param_type params[Q_NUM_PARAMS];
-	struct qpnp_gpio_chip *q_chip;
-};
-
-struct qpnp_gpio_chip {
-	struct gpio_chip	gpio_chip;
-	struct spmi_device	*spmi;
-	struct qpnp_gpio_spec	**pmic_gpios;
-	struct qpnp_gpio_spec	**chip_gpios;
-	uint32_t		pmic_gpio_lowest;
-	uint32_t		pmic_gpio_highest;
-	struct device_node	*int_ctrl;
-	struct list_head	chip_list;
-	struct dentry		*dfs_dir;
-};
-
-static LIST_HEAD(qpnp_gpio_chips);
-static DEFINE_MUTEX(qpnp_gpio_chips_lock);
-
-static inline void qpnp_pmic_gpio_set_spec(struct qpnp_gpio_chip *q_chip,
-					      uint32_t pmic_gpio,
-					      struct qpnp_gpio_spec *spec)
-{
-	q_chip->pmic_gpios[pmic_gpio - q_chip->pmic_gpio_lowest] = spec;
-}
-
-static inline struct qpnp_gpio_spec *qpnp_pmic_gpio_get_spec(
-						struct qpnp_gpio_chip *q_chip,
-						uint32_t pmic_gpio)
-{
-	if (pmic_gpio < q_chip->pmic_gpio_lowest ||
-	    pmic_gpio > q_chip->pmic_gpio_highest)
-		return NULL;
-
-	return q_chip->pmic_gpios[pmic_gpio - q_chip->pmic_gpio_lowest];
-}
-
-static inline struct qpnp_gpio_spec *qpnp_chip_gpio_get_spec(
-						struct qpnp_gpio_chip *q_chip,
-						uint32_t chip_gpio)
-{
-	if (chip_gpio > q_chip->gpio_chip.ngpio)
-		return NULL;
-
-	return q_chip->chip_gpios[chip_gpio];
-}
-
-static inline void qpnp_chip_gpio_set_spec(struct qpnp_gpio_chip *q_chip,
-					      uint32_t chip_gpio,
-					      struct qpnp_gpio_spec *spec)
-{
-	q_chip->chip_gpios[chip_gpio] = spec;
-}
-
-static int qpnp_gpio_check_config(struct qpnp_gpio_spec *q_spec,
-				  struct qpnp_gpio_cfg *param)
-{
-	int gpio = q_spec->pmic_gpio;
-
-	if (param->direction >= QPNP_GPIO_DIR_INVALID)
-		pr_err("invalid direction for gpio %d\n", gpio);
-	else if (param->invert >= QPNP_GPIO_INVERT_INVALID)
-		pr_err("invalid invert polarity for gpio %d\n", gpio);
-	else if (param->src_select >= QPNP_GPIO_SRC_INVALID)
-		pr_err("invalid source select for gpio %d\n", gpio);
-	else if (param->out_strength >= QPNP_GPIO_OUT_STRENGTH_INVALID ||
-		 param->out_strength == 0)
-		pr_err("invalid out strength for gpio %d\n", gpio);
-	else if (param->output_type >= QPNP_GPIO_OUT_BUF_INVALID)
-		pr_err("invalid out type for gpio %d\n", gpio);
-	else if ((param->output_type == QPNP_GPIO_OUT_BUF_OPEN_DRAIN_NMOS ||
-		 param->output_type == QPNP_GPIO_OUT_BUF_OPEN_DRAIN_PMOS) &&
-		 (q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_4CH ||
-		 (q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_8CH)))
-		pr_err("invalid out type for gpio %d\n"
-		       "gpioc does not support open-drain\n", gpio);
-	else if (param->vin_sel >= QPNP_GPIO_VIN_INVALID)
-		pr_err("invalid vin select value for gpio %d\n", gpio);
-	else if (param->pull >= QPNP_GPIO_PULL_INVALID)
-		pr_err("invalid pull value for gpio %d\n", gpio);
-	else if (param->master_en >= QPNP_GPIO_MASTER_INVALID)
-		pr_err("invalid master_en value for gpio %d\n", gpio);
-	else
-		return 0;
-
-	return -EINVAL;
-}
-
-static inline u8 q_reg_get(u8 *reg, int shift, int mask)
-{
-	return (*reg & mask) >> shift;
-}
-
-static inline void q_reg_set(u8 *reg, int shift, int mask, int value)
-{
-	*reg |= (value << shift) & mask;
-}
-
-static inline void q_reg_clr_set(u8 *reg, int shift, int mask, int value)
-{
-	*reg &= ~mask;
-	*reg |= (value << shift) & mask;
-}
-
-static int qpnp_gpio_cache_regs(struct qpnp_gpio_chip *q_chip,
-				struct qpnp_gpio_spec *q_spec)
-{
-	int rc;
-	struct device *dev = &q_chip->spmi->dev;
-
-	rc = spmi_ext_register_readl(q_chip->spmi->ctrl, q_spec->slave,
-				     Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
-				     &q_spec->regs[Q_REG_I_MODE_CTL],
-				     Q_NUM_CTL_REGS);
-	if (rc)
-		dev_err(dev, "%s: unable to read control regs\n", __func__);
-
-	return rc;
-}
-
-static int _qpnp_gpio_config(struct qpnp_gpio_chip *q_chip,
-			     struct qpnp_gpio_spec *q_spec,
-			     struct qpnp_gpio_cfg *param)
-{
-	struct device *dev = &q_chip->spmi->dev;
-	int rc;
-
-	rc = qpnp_gpio_check_config(q_spec, param);
-	if (rc)
-		goto gpio_cfg;
-
-	/* set direction */
-	q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
-			  Q_REG_MODE_SEL_SHIFT, Q_REG_MODE_SEL_MASK,
-			  param->direction);
-
-	/* output specific configuration */
-	q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
-			  Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK,
-			  param->invert);
-	q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
-			  Q_REG_SRC_SEL_SHIFT, Q_REG_SRC_SEL_MASK,
-			  param->src_select);
-	q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
-			  Q_REG_OUT_STRENGTH_SHIFT, Q_REG_OUT_STRENGTH_MASK,
-			  param->out_strength);
-	q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
-			  Q_REG_OUT_TYPE_SHIFT, Q_REG_OUT_TYPE_MASK,
-			  param->output_type);
-
-	/* config applicable for both input / output */
-	q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_VIN_CTL],
-			  Q_REG_VIN_SHIFT, Q_REG_VIN_MASK,
-			  param->vin_sel);
-	q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_PULL_CTL],
-			  Q_REG_PULL_SHIFT, Q_REG_PULL_MASK,
-			  param->pull);
-	q_reg_clr_set(&q_spec->regs[Q_REG_I_EN_CTL],
-			  Q_REG_MASTER_EN_SHIFT, Q_REG_MASTER_EN_MASK,
-			  param->master_en);
-
-	rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
-			      Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
-			      &q_spec->regs[Q_REG_I_MODE_CTL], Q_NUM_CTL_REGS);
-	if (rc) {
-		dev_err(&q_chip->spmi->dev, "%s: unable to write master"
-						" enable\n", __func__);
-		goto gpio_cfg;
-	}
-
-	return 0;
-
-gpio_cfg:
-	dev_err(dev, "%s: unable to set default config for"
-		     " pmic gpio %d\n", __func__, q_spec->pmic_gpio);
-
-	return rc;
-}
-
-int qpnp_gpio_config(int gpio, struct qpnp_gpio_cfg *param)
-{
-	int rc, chip_offset;
-	struct qpnp_gpio_chip *q_chip;
-	struct qpnp_gpio_spec *q_spec = NULL;
-	struct gpio_chip *gpio_chip;
-
-	if (param == NULL)
-		return -EINVAL;
-
-	mutex_lock(&qpnp_gpio_chips_lock);
-	list_for_each_entry(q_chip, &qpnp_gpio_chips, chip_list) {
-		gpio_chip = &q_chip->gpio_chip;
-		if (gpio >= gpio_chip->base
-				&& gpio < gpio_chip->base + gpio_chip->ngpio) {
-			chip_offset = gpio - gpio_chip->base;
-			q_spec = qpnp_chip_gpio_get_spec(q_chip, chip_offset);
-			if (WARN_ON(!q_spec)) {
-				mutex_unlock(&qpnp_gpio_chips_lock);
-				return -ENODEV;
-			}
-			break;
-		}
-	}
-	mutex_unlock(&qpnp_gpio_chips_lock);
-
-	rc = _qpnp_gpio_config(q_chip, q_spec, param);
-
-	return rc;
-}
-EXPORT_SYMBOL(qpnp_gpio_config);
-
-int qpnp_gpio_map_gpio(uint16_t slave_id, uint32_t pmic_gpio)
-{
-	struct qpnp_gpio_chip *q_chip;
-	struct qpnp_gpio_spec *q_spec = NULL;
-
-	mutex_lock(&qpnp_gpio_chips_lock);
-	list_for_each_entry(q_chip, &qpnp_gpio_chips, chip_list) {
-		if (q_chip->spmi->sid != slave_id)
-			continue;
-		if (q_chip->pmic_gpio_lowest <= pmic_gpio &&
-		    q_chip->pmic_gpio_highest >= pmic_gpio) {
-			q_spec = qpnp_pmic_gpio_get_spec(q_chip, pmic_gpio);
-			mutex_unlock(&qpnp_gpio_chips_lock);
-			if (WARN_ON(!q_spec))
-				return -ENODEV;
-			return q_chip->gpio_chip.base + q_spec->gpio_chip_idx;
-		}
-	}
-	mutex_unlock(&qpnp_gpio_chips_lock);
-	return -EINVAL;
-}
-EXPORT_SYMBOL(qpnp_gpio_map_gpio);
-
-static int qpnp_gpio_to_irq(struct gpio_chip *gpio_chip, unsigned offset)
-{
-	struct qpnp_gpio_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
-	struct qpnp_gpio_spec *q_spec;
-
-	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
-	if (!q_spec)
-		return -EINVAL;
-
-	return q_spec->irq;
-}
-
-static int qpnp_gpio_get(struct gpio_chip *gpio_chip, unsigned offset)
-{
-	int rc, ret_val;
-	struct qpnp_gpio_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
-	struct qpnp_gpio_spec *q_spec = NULL;
-	u8 buf[1];
-
-	if (WARN_ON(!q_chip))
-		return -ENODEV;
-
-	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
-	if (WARN_ON(!q_spec))
-		return -ENODEV;
-
-	/* gpio val is from RT status iff input is enabled */
-	if ((q_spec->regs[Q_REG_I_MODE_CTL] & Q_REG_MODE_SEL_MASK)
-						== QPNP_GPIO_DIR_IN) {
-		/* INT_RT_STS */
-		rc = spmi_ext_register_readl(q_chip->spmi->ctrl, q_spec->slave,
-				Q_REG_ADDR(q_spec, Q_REG_STATUS1),
-				&buf[0], 1);
-		return buf[0];
-
-	} else {
-		ret_val = (q_spec->regs[Q_REG_I_MODE_CTL] &
-			       Q_REG_OUT_INVERT_MASK) >> Q_REG_OUT_INVERT_SHIFT;
-		return ret_val;
-	}
-
-	return 0;
-}
-
-static int __qpnp_gpio_set(struct qpnp_gpio_chip *q_chip,
-			   struct qpnp_gpio_spec *q_spec, int value)
-{
-	int rc;
-
-	if (!q_chip || !q_spec)
-		return -EINVAL;
-
-	if (value)
-		q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
-			  Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK, 1);
-	else
-		q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
-			  Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK, 0);
-
-	rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
-			      Q_REG_ADDR(q_spec, Q_REG_I_MODE_CTL),
-			      &q_spec->regs[Q_REG_I_MODE_CTL], 1);
-	if (rc)
-		dev_err(&q_chip->spmi->dev, "%s: spmi write failed\n",
-								__func__);
-	return rc;
-}
-
-
-static void qpnp_gpio_set(struct gpio_chip *gpio_chip,
-		unsigned offset, int value)
-{
-	struct qpnp_gpio_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
-	struct qpnp_gpio_spec *q_spec;
-
-	if (WARN_ON(!q_chip))
-		return;
-
-	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
-	if (WARN_ON(!q_spec))
-		return;
-
-	__qpnp_gpio_set(q_chip, q_spec, value);
-}
-
-static int qpnp_gpio_set_direction(struct qpnp_gpio_chip *q_chip,
-				   struct qpnp_gpio_spec *q_spec, int direction)
-{
-	int rc;
-
-	if (!q_chip || !q_spec)
-		return -EINVAL;
-
-	if (direction >= QPNP_GPIO_DIR_INVALID) {
-		pr_err("invalid direction specification %d\n", direction);
-		return -EINVAL;
-	}
-
-	q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
-			Q_REG_MODE_SEL_SHIFT,
-			Q_REG_MODE_SEL_MASK,
-			direction);
-
-	rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
-			      Q_REG_ADDR(q_spec, Q_REG_I_MODE_CTL),
-			      &q_spec->regs[Q_REG_I_MODE_CTL], 1);
-	return rc;
-}
-
-static int qpnp_gpio_direction_input(struct gpio_chip *gpio_chip,
-		unsigned offset)
-{
-	struct qpnp_gpio_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
-	struct qpnp_gpio_spec *q_spec;
-
-	if (WARN_ON(!q_chip))
-		return -ENODEV;
-
-	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
-	if (WARN_ON(!q_spec))
-		return -ENODEV;
-
-	return qpnp_gpio_set_direction(q_chip, q_spec, QPNP_GPIO_DIR_IN);
-}
-
-static int qpnp_gpio_direction_output(struct gpio_chip *gpio_chip,
-		unsigned offset,
-		int val)
-{
-	int rc;
-	struct qpnp_gpio_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
-	struct qpnp_gpio_spec *q_spec;
-
-	if (WARN_ON(!q_chip))
-		return -ENODEV;
-
-	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
-	if (WARN_ON(!q_spec))
-		return -ENODEV;
-
-	rc = __qpnp_gpio_set(q_chip, q_spec, val);
-	if (rc)
-		return rc;
-
-	rc = qpnp_gpio_set_direction(q_chip, q_spec, QPNP_GPIO_DIR_OUT);
-
-	return rc;
-}
-
-static int qpnp_gpio_of_gpio_xlate(struct gpio_chip *gpio_chip,
-				   const struct of_phandle_args *gpio_spec,
-				   u32 *flags)
-{
-	struct qpnp_gpio_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
-	struct qpnp_gpio_spec *q_spec;
-
-	if (WARN_ON(gpio_chip->of_gpio_n_cells < 2)) {
-		pr_err("of_gpio_n_cells < 2\n");
-		return -EINVAL;
-	}
-
-	q_spec = qpnp_pmic_gpio_get_spec(q_chip, gpio_spec->args[0]);
-	if (!q_spec) {
-		pr_err("no such PMIC gpio %u in device topology\n",
-							gpio_spec->args[0]);
-		return -EINVAL;
-	}
-
-	if (flags)
-		*flags = gpio_spec->args[1];
-
-	return q_spec->gpio_chip_idx;
-}
-
-static int qpnp_gpio_apply_config(struct qpnp_gpio_chip *q_chip,
-				  struct qpnp_gpio_spec *q_spec)
-{
-	struct qpnp_gpio_cfg param;
-	struct device_node *node = q_spec->node;
-	int rc;
-
-	param.direction    = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
-				       Q_REG_MODE_SEL_SHIFT,
-				       Q_REG_MODE_SEL_MASK);
-	param.output_type  = q_reg_get(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
-				       Q_REG_OUT_TYPE_SHIFT,
-				       Q_REG_OUT_TYPE_MASK);
-	param.invert	   = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
-				       Q_REG_OUT_INVERT_MASK,
-				       Q_REG_OUT_INVERT_MASK);
-	param.pull	   = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
-				       Q_REG_PULL_SHIFT, Q_REG_PULL_MASK);
-	param.vin_sel	   = q_reg_get(&q_spec->regs[Q_REG_I_DIG_VIN_CTL],
-				       Q_REG_VIN_SHIFT, Q_REG_VIN_MASK);
-	param.out_strength = q_reg_get(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
-				       Q_REG_OUT_STRENGTH_SHIFT,
-				       Q_REG_OUT_STRENGTH_MASK);
-	param.src_select   = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
-				       Q_REG_SRC_SEL_SHIFT, Q_REG_SRC_SEL_MASK);
-	param.master_en    = q_reg_get(&q_spec->regs[Q_REG_I_EN_CTL],
-				       Q_REG_MASTER_EN_SHIFT,
-				       Q_REG_MASTER_EN_MASK);
-
-	of_property_read_u32(node, "qcom,direction",
-		&param.direction);
-	of_property_read_u32(node, "qcom,output-type",
-		&param.output_type);
-	of_property_read_u32(node, "qcom,invert",
-		&param.invert);
-	of_property_read_u32(node, "qcom,pull",
-		&param.pull);
-	of_property_read_u32(node, "qcom,vin-sel",
-		&param.vin_sel);
-	of_property_read_u32(node, "qcom,out-strength",
-		&param.out_strength);
-	of_property_read_u32(node, "qcom,src-select",
-		&param.src_select);
-	rc = of_property_read_u32(node, "qcom,master-en",
-		&param.master_en);
-
-	rc = _qpnp_gpio_config(q_chip, q_spec, &param);
-
-	return rc;
-}
-
-static int qpnp_gpio_free_chip(struct qpnp_gpio_chip *q_chip)
-{
-	struct spmi_device *spmi = q_chip->spmi;
-	int rc, i;
-
-	if (q_chip->chip_gpios)
-		for (i = 0; i < spmi->num_dev_node; i++)
-			kfree(q_chip->chip_gpios[i]);
-
-	mutex_lock(&qpnp_gpio_chips_lock);
-	list_del(&q_chip->chip_list);
-	mutex_unlock(&qpnp_gpio_chips_lock);
-	rc = gpiochip_remove(&q_chip->gpio_chip);
-	if (rc)
-		dev_err(&q_chip->spmi->dev, "%s: unable to remove gpio\n",
-				__func__);
-	kfree(q_chip->chip_gpios);
-	kfree(q_chip->pmic_gpios);
-	kfree(q_chip);
-	return rc;
-}
-
-#ifdef CONFIG_GPIO_QPNP_DEBUG
-struct qpnp_gpio_reg {
-	uint32_t addr;
-	uint32_t idx;
-	uint32_t shift;
-	uint32_t mask;
-};
-
-static struct dentry *driver_dfs_dir;
-
-static int qpnp_gpio_reg_attr(enum qpnp_gpio_param_type type,
-			     struct qpnp_gpio_reg *cfg)
-{
-	switch (type) {
-	case Q_GPIO_CFG_DIRECTION:
-		cfg->addr = Q_REG_MODE_CTL;
-		cfg->idx = Q_REG_I_MODE_CTL;
-		cfg->shift = Q_REG_MODE_SEL_SHIFT;
-		cfg->mask = Q_REG_MODE_SEL_MASK;
-		break;
-	case Q_GPIO_CFG_OUTPUT_TYPE:
-		cfg->addr = Q_REG_DIG_OUT_CTL;
-		cfg->idx = Q_REG_I_DIG_OUT_CTL;
-		cfg->shift = Q_REG_OUT_TYPE_SHIFT;
-		cfg->mask = Q_REG_OUT_TYPE_MASK;
-		break;
-	case Q_GPIO_CFG_INVERT:
-		cfg->addr = Q_REG_MODE_CTL;
-		cfg->idx = Q_REG_I_MODE_CTL;
-		cfg->shift = Q_REG_OUT_INVERT_SHIFT;
-		cfg->mask = Q_REG_OUT_INVERT_MASK;
-		break;
-	case Q_GPIO_CFG_PULL:
-		cfg->addr = Q_REG_DIG_PULL_CTL;
-		cfg->idx = Q_REG_I_DIG_PULL_CTL;
-		cfg->shift = Q_REG_PULL_SHIFT;
-		cfg->mask = Q_REG_PULL_MASK;
-		break;
-	case Q_GPIO_CFG_VIN_SEL:
-		cfg->addr = Q_REG_DIG_VIN_CTL;
-		cfg->idx = Q_REG_I_DIG_VIN_CTL;
-		cfg->shift = Q_REG_VIN_SHIFT;
-		cfg->mask = Q_REG_VIN_MASK;
-		break;
-	case Q_GPIO_CFG_OUT_STRENGTH:
-		cfg->addr = Q_REG_DIG_OUT_CTL;
-		cfg->idx = Q_REG_I_DIG_OUT_CTL;
-		cfg->shift = Q_REG_OUT_STRENGTH_SHIFT;
-		cfg->mask = Q_REG_OUT_STRENGTH_MASK;
-		break;
-	case Q_GPIO_CFG_SRC_SELECT:
-		cfg->addr = Q_REG_MODE_CTL;
-		cfg->idx = Q_REG_I_MODE_CTL;
-		cfg->shift = Q_REG_SRC_SEL_SHIFT;
-		cfg->mask = Q_REG_SRC_SEL_MASK;
-		break;
-	case Q_GPIO_CFG_MASTER_EN:
-		cfg->addr = Q_REG_EN_CTL;
-		cfg->idx = Q_REG_I_EN_CTL;
-		cfg->shift = Q_REG_MASTER_EN_SHIFT;
-		cfg->mask = Q_REG_MASTER_EN_MASK;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int qpnp_gpio_debugfs_get(void *data, u64 *val)
-{
-	enum qpnp_gpio_param_type *idx = data;
-	struct qpnp_gpio_spec *q_spec;
-	struct qpnp_gpio_reg cfg = {};
-	int rc;
-
-	rc = qpnp_gpio_reg_attr(*idx, &cfg);
-	if (rc)
-		return rc;
-	q_spec = container_of(idx, struct qpnp_gpio_spec, params[*idx]);
-	*val = q_reg_get(&q_spec->regs[cfg.idx], cfg.shift, cfg.mask);
-	return 0;
-}
-
-static int qpnp_gpio_check_reg_val(enum qpnp_gpio_param_type idx,
-				   struct qpnp_gpio_spec *q_spec,
-				   uint32_t val)
-{
-	switch (idx) {
-	case Q_GPIO_CFG_DIRECTION:
-		if (val >= QPNP_GPIO_DIR_INVALID)
-			return -EINVAL;
-		break;
-	case Q_GPIO_CFG_OUTPUT_TYPE:
-		if ((val >= QPNP_GPIO_OUT_BUF_INVALID) ||
-		   ((val == QPNP_GPIO_OUT_BUF_OPEN_DRAIN_NMOS ||
-		   val == QPNP_GPIO_OUT_BUF_OPEN_DRAIN_PMOS) &&
-		   (q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_4CH ||
-		   (q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_8CH))))
-			return -EINVAL;
-		break;
-	case Q_GPIO_CFG_INVERT:
-		if (val >= QPNP_GPIO_INVERT_INVALID)
-			return -EINVAL;
-		break;
-	case Q_GPIO_CFG_PULL:
-		if (val >= QPNP_GPIO_PULL_INVALID)
-			return -EINVAL;
-		break;
-	case Q_GPIO_CFG_VIN_SEL:
-		if (val >= QPNP_GPIO_VIN_INVALID)
-			return -EINVAL;
-		break;
-	case Q_GPIO_CFG_OUT_STRENGTH:
-		if (val >= QPNP_GPIO_OUT_STRENGTH_INVALID ||
-		    val == 0)
-			return -EINVAL;
-		break;
-	case Q_GPIO_CFG_SRC_SELECT:
-		if (val >= QPNP_GPIO_SRC_INVALID)
-			return -EINVAL;
-		break;
-	case Q_GPIO_CFG_MASTER_EN:
-		if (val >= QPNP_GPIO_MASTER_INVALID)
-			return -EINVAL;
-		break;
-	default:
-		pr_err("invalid param type %u specified\n", idx);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static int qpnp_gpio_debugfs_set(void *data, u64 val)
-{
-	enum qpnp_gpio_param_type *idx = data;
-	struct qpnp_gpio_spec *q_spec;
-	struct qpnp_gpio_chip *q_chip;
-	struct qpnp_gpio_reg cfg = {};
-	int rc;
-
-	q_spec = container_of(idx, struct qpnp_gpio_spec, params[*idx]);
-	q_chip = q_spec->q_chip;
-
-	rc = qpnp_gpio_check_reg_val(*idx, q_spec, val);
-	if (rc)
-		return rc;
-
-	rc = qpnp_gpio_reg_attr(*idx, &cfg);
-	if (rc)
-		return rc;
-	q_reg_clr_set(&q_spec->regs[cfg.idx], cfg.shift, cfg.mask, val);
-	rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
-				      Q_REG_ADDR(q_spec, cfg.addr),
-				      &q_spec->regs[cfg.idx], 1);
-
-	return rc;
-}
-DEFINE_SIMPLE_ATTRIBUTE(qpnp_gpio_fops, qpnp_gpio_debugfs_get,
-			qpnp_gpio_debugfs_set, "%llu\n");
-
-#define DEBUGFS_BUF_SIZE 11 /* supports 2^32 in decimal */
-
-struct qpnp_gpio_debugfs_args {
-	enum qpnp_gpio_param_type type;
-	const char *filename;
-};
-
-static struct qpnp_gpio_debugfs_args dfs_args[] = {
-	{ Q_GPIO_CFG_DIRECTION, "direction" },
-	{ Q_GPIO_CFG_OUTPUT_TYPE, "output_type" },
-	{ Q_GPIO_CFG_INVERT, "invert" },
-	{ Q_GPIO_CFG_PULL, "pull" },
-	{ Q_GPIO_CFG_VIN_SEL, "vin_sel" },
-	{ Q_GPIO_CFG_OUT_STRENGTH, "out_strength" },
-	{ Q_GPIO_CFG_SRC_SELECT, "src_select" },
-	{ Q_GPIO_CFG_MASTER_EN, "master_en" }
-};
-
-static int qpnp_gpio_debugfs_create(struct qpnp_gpio_chip *q_chip)
-{
-	struct spmi_device *spmi = q_chip->spmi;
-	struct device *dev = &spmi->dev;
-	struct qpnp_gpio_spec *q_spec;
-	enum qpnp_gpio_param_type *params;
-	enum qpnp_gpio_param_type type;
-	char pmic_gpio[DEBUGFS_BUF_SIZE];
-	const char *filename;
-	struct dentry *dfs, *dfs_io_dir;
-	int i, j;
-
-	BUG_ON(Q_NUM_PARAMS != ARRAY_SIZE(dfs_args));
-
-	q_chip->dfs_dir = debugfs_create_dir(dev->of_node->name,
-							driver_dfs_dir);
-	if (q_chip->dfs_dir == NULL) {
-		dev_err(dev, "%s: cannot register chip debugfs directory %s\n",
-						__func__, dev->of_node->name);
-		return -ENODEV;
-	}
-
-	for (i = 0; i < spmi->num_dev_node; i++) {
-		q_spec = qpnp_chip_gpio_get_spec(q_chip, i);
-		params = q_spec->params;
-		snprintf(pmic_gpio, DEBUGFS_BUF_SIZE, "%u", q_spec->pmic_gpio);
-		dfs_io_dir = debugfs_create_dir(pmic_gpio,
-							q_chip->dfs_dir);
-		if (dfs_io_dir == NULL)
-			goto dfs_err;
-
-		for (j = 0; j < Q_NUM_PARAMS; j++) {
-			type = dfs_args[j].type;
-			filename = dfs_args[j].filename;
-
-			params[type] = type;
-			dfs = debugfs_create_file(
-					filename,
-					S_IRUGO | S_IWUSR,
-					dfs_io_dir,
-					&q_spec->params[type],
-					&qpnp_gpio_fops);
-			if (dfs == NULL)
-				goto dfs_err;
-		}
-	}
-	return 0;
-dfs_err:
-	dev_err(dev, "%s: cannot register debugfs for pmic gpio %u on"
-				     " chip %s\n", __func__,
-				     q_spec->pmic_gpio, dev->of_node->name);
-	debugfs_remove_recursive(q_chip->dfs_dir);
-	return -ENFILE;
-}
-#else
-static int qpnp_gpio_debugfs_create(struct qpnp_gpio_chip *q_chip)
-{
-	return 0;
-}
-#endif
-
-static int qpnp_gpio_probe(struct spmi_device *spmi)
-{
-	struct qpnp_gpio_chip *q_chip;
-	struct resource *res;
-	struct qpnp_gpio_spec *q_spec;
-	int i, rc;
-	int lowest_gpio = UINT_MAX, highest_gpio = 0;
-	u32 intspec[3], gpio;
-	char buf[2];
-
-	q_chip = kzalloc(sizeof(*q_chip), GFP_KERNEL);
-	if (!q_chip) {
-		dev_err(&spmi->dev, "%s: Can't allocate gpio_chip\n",
-								__func__);
-		return -ENOMEM;
-	}
-	q_chip->spmi = spmi;
-	dev_set_drvdata(&spmi->dev, q_chip);
-
-	mutex_lock(&qpnp_gpio_chips_lock);
-	list_add(&q_chip->chip_list, &qpnp_gpio_chips);
-	mutex_unlock(&qpnp_gpio_chips_lock);
-
-	/* first scan through nodes to find the range required for allocation */
-	for (i = 0; i < spmi->num_dev_node; i++) {
-		rc = of_property_read_u32(spmi->dev_node[i].of_node,
-							"qcom,gpio-num", &gpio);
-		if (rc) {
-			dev_err(&spmi->dev, "%s: unable to get"
-				" qcom,gpio-num property\n", __func__);
-			goto err_probe;
-		}
-
-		if (gpio < lowest_gpio)
-			lowest_gpio = gpio;
-		if (gpio > highest_gpio)
-			highest_gpio = gpio;
-	}
-
-	if (highest_gpio < lowest_gpio) {
-		dev_err(&spmi->dev, "%s: no device nodes specified in"
-					" topology\n", __func__);
-		rc = -EINVAL;
-		goto err_probe;
-	} else if (lowest_gpio == 0) {
-		dev_err(&spmi->dev, "%s: 0 is not a valid PMIC GPIO\n",
-								__func__);
-		rc = -EINVAL;
-		goto err_probe;
-	}
-
-	q_chip->pmic_gpio_lowest = lowest_gpio;
-	q_chip->pmic_gpio_highest = highest_gpio;
-
-	/* allocate gpio lookup tables */
-	q_chip->pmic_gpios = kzalloc(sizeof(struct qpnp_gpio_spec *) *
-						highest_gpio - lowest_gpio + 1,
-						GFP_KERNEL);
-	q_chip->chip_gpios = kzalloc(sizeof(struct qpnp_gpio_spec *) *
-						spmi->num_dev_node, GFP_KERNEL);
-	if (!q_chip->pmic_gpios || !q_chip->chip_gpios) {
-		dev_err(&spmi->dev, "%s: unable to allocate memory\n",
-								__func__);
-		rc = -ENOMEM;
-		goto err_probe;
-	}
-
-	/* get interrupt controller device_node */
-	q_chip->int_ctrl = of_irq_find_parent(spmi->dev.of_node);
-	if (!q_chip->int_ctrl) {
-		dev_err(&spmi->dev, "%s: Can't find interrupt parent\n",
-								__func__);
-		rc = -EINVAL;
-		goto err_probe;
-	}
-
-	/* now scan through again and populate the lookup table */
-	for (i = 0; i < spmi->num_dev_node; i++) {
-		res = qpnp_get_resource(spmi, i, IORESOURCE_MEM, 0);
-		if (!res) {
-			dev_err(&spmi->dev, "%s: node %s is missing has no"
-				" base address definition\n",
-				__func__, spmi->dev_node[i].of_node->full_name);
-		}
-
-		rc = of_property_read_u32(spmi->dev_node[i].of_node,
-							"qcom,gpio-num", &gpio);
-		if (rc) {
-			dev_err(&spmi->dev, "%s: unable to get"
-				" qcom,gpio-num property\n", __func__);
-			goto err_probe;
-		}
-
-		q_spec = kzalloc(sizeof(struct qpnp_gpio_spec),
-							GFP_KERNEL);
-		if (!q_spec) {
-			dev_err(&spmi->dev, "%s: unable to allocate"
-						" memory\n",
-					__func__);
-			rc = -ENOMEM;
-			goto err_probe;
-		}
-
-		q_spec->slave = spmi->sid;
-		q_spec->offset = res->start;
-		q_spec->gpio_chip_idx = i;
-		q_spec->pmic_gpio = gpio;
-		q_spec->node = spmi->dev_node[i].of_node;
-		q_spec->q_chip = q_chip;
-
-		rc = spmi_ext_register_readl(spmi->ctrl, q_spec->slave,
-				Q_REG_ADDR(q_spec, Q_REG_TYPE), &buf[0], 2);
-		if (rc) {
-			dev_err(&spmi->dev, "%s: unable to read type regs\n",
-						__func__);
-			goto err_probe;
-		}
-		q_spec->type	= buf[0];
-		q_spec->subtype = buf[1];
-
-		/* call into irq_domain to get irq mapping */
-		intspec[0] = q_chip->spmi->sid;
-		intspec[1] = (q_spec->offset >> 8) & 0xFF;
-		intspec[2] = 0;
-		q_spec->irq = irq_create_of_mapping(q_chip->int_ctrl,
-							intspec, 3);
-		if (!q_spec->irq) {
-			dev_err(&spmi->dev, "%s: invalid irq for gpio"
-					" %u\n", __func__, gpio);
-			rc = -EINVAL;
-			goto err_probe;
-		}
-		/* initialize lookup table params */
-		qpnp_pmic_gpio_set_spec(q_chip, gpio, q_spec);
-		qpnp_chip_gpio_set_spec(q_chip, i, q_spec);
-	}
-
-	q_chip->gpio_chip.base = -1;
-	q_chip->gpio_chip.ngpio = spmi->num_dev_node;
-	q_chip->gpio_chip.label = "qpnp-gpio";
-	q_chip->gpio_chip.direction_input = qpnp_gpio_direction_input;
-	q_chip->gpio_chip.direction_output = qpnp_gpio_direction_output;
-	q_chip->gpio_chip.to_irq = qpnp_gpio_to_irq;
-	q_chip->gpio_chip.get = qpnp_gpio_get;
-	q_chip->gpio_chip.set = qpnp_gpio_set;
-	q_chip->gpio_chip.dev = &spmi->dev;
-	q_chip->gpio_chip.of_xlate = qpnp_gpio_of_gpio_xlate;
-	q_chip->gpio_chip.of_gpio_n_cells = 2;
-	q_chip->gpio_chip.can_sleep = 0;
-
-	rc = gpiochip_add(&q_chip->gpio_chip);
-	if (rc) {
-		dev_err(&spmi->dev, "%s: Can't add gpio chip, rc = %d\n",
-								__func__, rc);
-		goto err_probe;
-	}
-
-	/* now configure gpio config defaults if they exist */
-	for (i = 0; i < spmi->num_dev_node; i++) {
-		q_spec = qpnp_chip_gpio_get_spec(q_chip, i);
-		if (WARN_ON(!q_spec)) {
-			rc = -ENODEV;
-			goto err_probe;
-		}
-
-		rc = qpnp_gpio_cache_regs(q_chip, q_spec);
-		if (rc)
-			goto err_probe;
-
-		rc = qpnp_gpio_apply_config(q_chip, q_spec);
-		if (rc)
-			goto err_probe;
-	}
-
-	dev_dbg(&spmi->dev, "%s: gpio_chip registered between %d-%u\n",
-			__func__, q_chip->gpio_chip.base,
-			(q_chip->gpio_chip.base + q_chip->gpio_chip.ngpio) - 1);
-
-	rc = qpnp_gpio_debugfs_create(q_chip);
-	if (rc) {
-		dev_err(&spmi->dev, "%s: debugfs creation failed\n", __func__);
-		goto err_probe;
-	}
-
-	return 0;
-
-err_probe:
-	qpnp_gpio_free_chip(q_chip);
-	return rc;
-}
-
-static int qpnp_gpio_remove(struct spmi_device *spmi)
-{
-	struct qpnp_gpio_chip *q_chip = dev_get_drvdata(&spmi->dev);
-
-	debugfs_remove_recursive(q_chip->dfs_dir);
-
-	return qpnp_gpio_free_chip(q_chip);
-}
-
-static struct of_device_id spmi_match_table[] = {
-	{	.compatible = "qcom,qpnp-gpio",
-	},
-	{}
-};
-
-static const struct spmi_device_id qpnp_gpio_id[] = {
-	{ "qcom,qpnp-gpio", 0 },
-	{ }
-};
-MODULE_DEVICE_TABLE(spmi, qpnp_gpio_id);
-
-static struct spmi_driver qpnp_gpio_driver = {
-	.driver		= {
-		.name	= "qcom,qpnp-gpio",
-		.of_match_table = spmi_match_table,
-	},
-	.probe		= qpnp_gpio_probe,
-	.remove		= qpnp_gpio_remove,
-	.id_table	= qpnp_gpio_id,
-};
-
-static int __init qpnp_gpio_init(void)
-{
-#ifdef CONFIG_GPIO_QPNP_DEBUG
-	driver_dfs_dir = debugfs_create_dir("qpnp_gpio", NULL);
-	if (driver_dfs_dir == NULL)
-		pr_err("Cannot register top level debugfs directory\n");
-#endif
-
-	return spmi_driver_register(&qpnp_gpio_driver);
-}
-
-static void __exit qpnp_gpio_exit(void)
-{
-#ifdef CONFIG_GPIO_QPNP_DEBUG
-	debugfs_remove_recursive(driver_dfs_dir);
-#endif
-	spmi_driver_unregister(&qpnp_gpio_driver);
-}
-
-MODULE_DESCRIPTION("QPNP PMIC gpio driver");
-MODULE_LICENSE("GPL v2");
-
-module_init(qpnp_gpio_init);
-module_exit(qpnp_gpio_exit);
diff --git a/drivers/gpio/qpnp-pin.c b/drivers/gpio/qpnp-pin.c
new file mode 100644
index 0000000..bbcba81
--- /dev/null
+++ b/drivers/gpio/qpnp-pin.c
@@ -0,0 +1,1335 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/qpnp/pin.h>
+
+#define Q_REG_ADDR(q_spec, reg_index)	\
+		((q_spec)->offset + reg_index)
+
+#define Q_REG_STATUS1			0x8
+#define Q_NUM_CTL_REGS			0xD
+
+/* type registers base address offsets */
+#define Q_REG_TYPE			0x4
+#define Q_REG_SUBTYPE			0x5
+
+/* gpio peripheral type and subtype values */
+#define Q_GPIO_TYPE			0x10
+#define Q_GPIO_SUBTYPE_GPIO_4CH		0x0
+#define Q_GPIO_SUBTYPE_GPIOC_4CH	0x2
+#define Q_GPIO_SUBTYPE_GPIO_8CH		0x4
+#define Q_GPIO_SUBTYPE_GPIOC_8CH	0x6
+
+/* mpp peripheral type and subtype values */
+#define Q_MPP_TYPE			0x11
+#define Q_MPP_SUBTYPE_4CH_NO_ANA_OUT	0x1
+#define Q_MPP_SUBTYPE_4CH_NO_SINK	0x2
+#define Q_MPP_SUBTYPE_4CH_FULL_FUNC	0x3
+#define Q_MPP_SUBTYPE_8CH_FULL_FUNC	0x7
+
+/* control register base address offsets */
+#define Q_REG_MODE_CTL			0x40
+#define Q_REG_DIG_VIN_CTL		0x41
+#define Q_REG_DIG_PULL_CTL		0x42
+#define Q_REG_DIG_IN_CTL		0x43
+#define Q_REG_DIG_OUT_CTL		0x45
+#define Q_REG_EN_CTL			0x46
+#define Q_REG_AOUT_CTL			0x48
+#define Q_REG_AIN_CTL			0x4A
+#define Q_REG_SINK_CTL			0x4C
+
+/* control register regs array indices */
+#define Q_REG_I_MODE_CTL		0
+#define Q_REG_I_DIG_VIN_CTL		1
+#define Q_REG_I_DIG_PULL_CTL		2
+#define Q_REG_I_DIG_IN_CTL		3
+#define Q_REG_I_DIG_OUT_CTL		5
+#define Q_REG_I_EN_CTL			6
+#define Q_REG_I_AOUT_CTL		8
+#define Q_REG_I_AIN_CTL			10
+#define Q_REG_I_SINK_CTL		12
+
+/* control reg: mode */
+#define Q_REG_OUT_INVERT_SHIFT		0
+#define Q_REG_OUT_INVERT_MASK		0x1
+#define Q_REG_SRC_SEL_SHIFT		1
+#define Q_REG_SRC_SEL_MASK		0xE
+#define Q_REG_MODE_SEL_SHIFT		4
+#define Q_REG_MODE_SEL_MASK		0x70
+
+/* control reg: dig_vin */
+#define Q_REG_VIN_SHIFT			0
+#define Q_REG_VIN_MASK			0x7
+
+/* control reg: dig_pull */
+#define Q_REG_PULL_SHIFT		0
+#define Q_REG_PULL_MASK			0x7
+
+/* control reg: dig_out */
+#define Q_REG_OUT_STRENGTH_SHIFT	0
+#define Q_REG_OUT_STRENGTH_MASK		0x3
+#define Q_REG_OUT_TYPE_SHIFT		4
+#define Q_REG_OUT_TYPE_MASK		0x30
+
+/* control reg: en */
+#define Q_REG_MASTER_EN_SHIFT		7
+#define Q_REG_MASTER_EN_MASK		0x80
+
+/* control reg: ana_out */
+#define Q_REG_AOUT_REF_SHIFT		0
+#define Q_REG_AOUT_REF_MASK		0x7
+
+/* control reg: ana_in */
+#define Q_REG_AIN_ROUTE_SHIFT		0
+#define Q_REG_AIN_ROUTE_MASK		0x7
+
+/* control reg: sink */
+#define Q_REG_CS_OUT_SHIFT		0
+#define Q_REG_CS_OUT_MASK		0x7
+
+enum qpnp_pin_param_type {
+	Q_PIN_CFG_MODE,
+	Q_PIN_CFG_OUTPUT_TYPE,
+	Q_PIN_CFG_INVERT,
+	Q_PIN_CFG_PULL,
+	Q_PIN_CFG_VIN_SEL,
+	Q_PIN_CFG_OUT_STRENGTH,
+	Q_PIN_CFG_SELECT,
+	Q_PIN_CFG_MASTER_EN,
+	Q_PIN_CFG_AOUT_REF,
+	Q_PIN_CFG_AIN_ROUTE,
+	Q_PIN_CFG_CS_OUT,
+	Q_PIN_CFG_INVALID,
+};
+
+#define Q_NUM_PARAMS			Q_PIN_CFG_INVALID
+
+/* param error checking */
+#define QPNP_PIN_MODE_INVALID		3
+#define QPNP_PIN_INVERT_INVALID		2
+#define QPNP_PIN_OUT_BUF_INVALID	3
+#define QPNP_PIN_VIN_4CH_INVALID	5
+#define QPNP_PIN_VIN_8CH_INVALID	8
+#define QPNP_PIN_GPIO_PULL_INVALID	6
+#define QPNP_PIN_MPP_PULL_INVALID	4
+#define QPNP_PIN_OUT_STRENGTH_INVALID	4
+#define QPNP_PIN_SRC_INVALID		8
+#define QPNP_PIN_MASTER_INVALID		2
+#define QPNP_PIN_AOUT_REF_INVALID	8
+#define QPNP_PIN_AIN_ROUTE_INVALID	8
+#define QPNP_PIN_CS_OUT_INVALID		8
+
+struct qpnp_pin_spec {
+	uint8_t slave;			/* 0-15 */
+	uint16_t offset;		/* 0-255 */
+	uint32_t gpio_chip_idx;		/* offset from gpio_chip base */
+	uint32_t pmic_pin;		/* PMIC pin number */
+	int irq;			/* logical IRQ number */
+	u8 regs[Q_NUM_CTL_REGS];	/* Control regs */
+	u8 num_ctl_regs;		/* usable number on this pin */
+	u8 type;			/* peripheral type */
+	u8 subtype;			/* peripheral subtype */
+	struct device_node *node;
+	enum qpnp_pin_param_type params[Q_NUM_PARAMS];
+	struct qpnp_pin_chip *q_chip;
+};
+
+struct qpnp_pin_chip {
+	struct gpio_chip	gpio_chip;
+	struct spmi_device	*spmi;
+	struct qpnp_pin_spec	**pmic_pins;
+	struct qpnp_pin_spec	**chip_gpios;
+	uint32_t		pmic_pin_lowest;
+	uint32_t		pmic_pin_highest;
+	struct device_node	*int_ctrl;
+	struct list_head	chip_list;
+	struct dentry		*dfs_dir;
+};
+
+static LIST_HEAD(qpnp_pin_chips);
+static DEFINE_MUTEX(qpnp_pin_chips_lock);
+
+static inline void qpnp_pmic_pin_set_spec(struct qpnp_pin_chip *q_chip,
+					      uint32_t pmic_pin,
+					      struct qpnp_pin_spec *spec)
+{
+	q_chip->pmic_pins[pmic_pin - q_chip->pmic_pin_lowest] = spec;
+}
+
+static inline struct qpnp_pin_spec *qpnp_pmic_pin_get_spec(
+						struct qpnp_pin_chip *q_chip,
+						uint32_t pmic_pin)
+{
+	if (pmic_pin < q_chip->pmic_pin_lowest ||
+	    pmic_pin > q_chip->pmic_pin_highest)
+		return NULL;
+
+	return q_chip->pmic_pins[pmic_pin - q_chip->pmic_pin_lowest];
+}
+
+static inline struct qpnp_pin_spec *qpnp_chip_gpio_get_spec(
+						struct qpnp_pin_chip *q_chip,
+						uint32_t chip_gpio)
+{
+	if (chip_gpio > q_chip->gpio_chip.ngpio)
+		return NULL;
+
+	return q_chip->chip_gpios[chip_gpio];
+}
+
+static inline void qpnp_chip_gpio_set_spec(struct qpnp_pin_chip *q_chip,
+					      uint32_t chip_gpio,
+					      struct qpnp_pin_spec *spec)
+{
+	q_chip->chip_gpios[chip_gpio] = spec;
+}
+
+/*
+ * Determines whether a specified param's configuration is correct.
+ * This check is two tier. First a check is done whether the hardware
+ * supports this param and value requested. The second check validates
+ * that the configuration is correct, given the fact that the hardware
+ * supports it.
+ *
+ * Returns
+ *	-ENXIO is the hardware does not support this param.
+ *	-EINVAL if the the hardware does support this param, but the
+ *	requested value is outside the supported range.
+ */
+static int qpnp_pin_check_config(enum qpnp_pin_param_type idx,
+				 struct qpnp_pin_spec *q_spec, uint32_t val)
+{
+	switch (idx) {
+	case Q_PIN_CFG_MODE:
+		if (val >= QPNP_PIN_MODE_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_OUTPUT_TYPE:
+		if (q_spec->type != Q_GPIO_TYPE)
+			return -ENXIO;
+		if ((val == QPNP_PIN_OUT_BUF_OPEN_DRAIN_NMOS ||
+		    val == QPNP_PIN_OUT_BUF_OPEN_DRAIN_PMOS) &&
+		    (q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_4CH ||
+		    (q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_8CH)))
+			return -EINVAL;
+		else if (val >= QPNP_PIN_OUT_BUF_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_INVERT:
+		if (val >= QPNP_PIN_INVERT_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_PULL:
+		if (q_spec->type == Q_GPIO_TYPE &&
+		    val >= QPNP_PIN_GPIO_PULL_INVALID)
+			return -EINVAL;
+		if (q_spec->type == Q_MPP_TYPE &&
+		    val >= QPNP_PIN_MPP_PULL_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_VIN_SEL:
+		if (val >= QPNP_PIN_VIN_8CH_INVALID)
+			return -EINVAL;
+		else if (val >= QPNP_PIN_VIN_4CH_INVALID) {
+			if (q_spec->type == Q_GPIO_TYPE &&
+			   (q_spec->subtype == Q_GPIO_SUBTYPE_GPIO_4CH ||
+			    q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_4CH))
+				return -EINVAL;
+			if (q_spec->type == Q_MPP_TYPE &&
+			   (q_spec->subtype == Q_MPP_SUBTYPE_4CH_NO_ANA_OUT ||
+			    q_spec->subtype == Q_MPP_SUBTYPE_4CH_NO_SINK ||
+			    q_spec->subtype == Q_MPP_SUBTYPE_4CH_FULL_FUNC))
+				return -EINVAL;
+		}
+		break;
+	case Q_PIN_CFG_OUT_STRENGTH:
+		if (q_spec->type != Q_GPIO_TYPE)
+			return -ENXIO;
+		if (val >= QPNP_PIN_OUT_STRENGTH_INVALID ||
+		    val == 0)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_SELECT:
+		if (q_spec->type == Q_MPP_TYPE &&
+		    (val == QPNP_PIN_SEL_FUNC_1 ||
+		     val == QPNP_PIN_SEL_FUNC_2))
+			return -EINVAL;
+		if (val >= QPNP_PIN_SRC_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_MASTER_EN:
+		if (val >= QPNP_PIN_MASTER_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_AOUT_REF:
+		if (q_spec->type != Q_MPP_TYPE)
+			return -ENXIO;
+		if (q_spec->subtype == Q_MPP_SUBTYPE_4CH_NO_ANA_OUT)
+			return -ENXIO;
+		if (val >= QPNP_PIN_AOUT_REF_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_AIN_ROUTE:
+		if (q_spec->type != Q_MPP_TYPE)
+			return -ENXIO;
+		if (val >= QPNP_PIN_AIN_ROUTE_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_CS_OUT:
+		if (q_spec->type != Q_MPP_TYPE)
+			return -ENXIO;
+		if (q_spec->subtype == Q_MPP_SUBTYPE_4CH_NO_SINK)
+			return -ENXIO;
+		if (val >= QPNP_PIN_CS_OUT_INVALID)
+			return -EINVAL;
+		break;
+
+	default:
+		pr_err("invalid param type %u specified\n", idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#define Q_CHK_INVALID(idx, q_spec, val) \
+	(qpnp_pin_check_config(idx, q_spec, val) == -EINVAL)
+
+static int qpnp_pin_check_constraints(struct qpnp_pin_spec *q_spec,
+				      struct qpnp_pin_cfg *param)
+{
+	int pin = q_spec->pmic_pin;
+	const char *name;
+
+	name = (q_spec->type == Q_GPIO_TYPE) ? "gpio" : "mpp";
+
+	if (Q_CHK_INVALID(Q_PIN_CFG_MODE, q_spec, param->mode))
+		pr_err("invalid direction for %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_INVERT, q_spec, param->invert))
+		pr_err("invalid invert polarity for %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_SELECT, q_spec, param->select))
+		pr_err("invalid source select for %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_OUT_STRENGTH,
+						q_spec, param->out_strength))
+		pr_err("invalid out strength for %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_OUTPUT_TYPE,
+						 q_spec, param->output_type))
+		pr_err("invalid out type for %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_VIN_SEL, q_spec, param->vin_sel))
+		pr_err("invalid vin select value for %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_PULL, q_spec, param->pull))
+		pr_err("invalid pull value for pin %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_MASTER_EN, q_spec, param->master_en))
+		pr_err("invalid master_en value for %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_AOUT_REF, q_spec, param->aout_ref))
+		pr_err("invalid aout_reg value for %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_AIN_ROUTE, q_spec, param->ain_route))
+		pr_err("invalid ain_route value for %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_CS_OUT, q_spec, param->cs_out))
+		pr_err("invalid cs_out value for %s %d\n", name, pin);
+	else
+		return 0;
+
+	return -EINVAL;
+}
+
+static inline u8 q_reg_get(u8 *reg, int shift, int mask)
+{
+	return (*reg & mask) >> shift;
+}
+
+static inline void q_reg_set(u8 *reg, int shift, int mask, int value)
+{
+	*reg |= (value << shift) & mask;
+}
+
+static inline void q_reg_clr_set(u8 *reg, int shift, int mask, int value)
+{
+	*reg &= ~mask;
+	*reg |= (value << shift) & mask;
+}
+
+/*
+ * Calculate the minimum number of registers that must be read / written
+ * in order to satisfy the full feature set of the given pin.
+ */
+static int qpnp_pin_ctl_regs_init(struct qpnp_pin_spec *q_spec)
+{
+	if (q_spec->type == Q_GPIO_TYPE)
+		q_spec->num_ctl_regs = 7;
+	else if (q_spec->type == Q_MPP_TYPE)
+		switch (q_spec->subtype) {
+		case Q_MPP_SUBTYPE_4CH_NO_SINK:
+			q_spec->num_ctl_regs = 12;
+			break;
+		case Q_MPP_SUBTYPE_4CH_NO_ANA_OUT:
+		case Q_MPP_SUBTYPE_4CH_FULL_FUNC:
+		case Q_MPP_SUBTYPE_8CH_FULL_FUNC:
+			q_spec->num_ctl_regs = 13;
+			break;
+		default:
+			pr_err("Invalid MPP subtype 0x%x\n", q_spec->subtype);
+			return -EINVAL;
+		}
+	else {
+		pr_err("Invalid type 0x%x\n", q_spec->type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qpnp_pin_read_regs(struct qpnp_pin_chip *q_chip,
+			       struct qpnp_pin_spec *q_spec, u16 addr, u8 *buf)
+{
+	int bytes_left = q_spec->num_ctl_regs;
+	int rc;
+	char *reg_p = &q_spec->regs[0];
+
+	while (bytes_left > 0) {
+		rc = spmi_ext_register_readl(q_chip->spmi->ctrl, q_spec->slave,
+					Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
+					reg_p, bytes_left < 8 ? bytes_left : 8);
+		if (rc)
+			return rc;
+		bytes_left -= 8;
+		reg_p += 8;
+	}
+	return 0;
+}
+
+static int qpnp_pin_write_regs(struct qpnp_pin_chip *q_chip,
+				struct qpnp_pin_spec *q_spec, u16 addr, u8 *buf)
+{
+	int bytes_left = q_spec->num_ctl_regs;
+	int rc;
+	char *reg_p = &q_spec->regs[0];
+
+	while (bytes_left > 0) {
+		rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
+					Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
+					reg_p, bytes_left < 8 ? bytes_left : 8);
+		if (rc)
+			return rc;
+		bytes_left -= 8;
+		reg_p += 8;
+	}
+	return 0;
+}
+
+static int qpnp_pin_cache_regs(struct qpnp_pin_chip *q_chip,
+			       struct qpnp_pin_spec *q_spec)
+{
+	int rc;
+	struct device *dev = &q_chip->spmi->dev;
+
+	rc = qpnp_pin_read_regs(q_chip, q_spec,
+				 Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
+				 &q_spec->regs[Q_REG_I_MODE_CTL]);
+	if (rc)
+		dev_err(dev, "%s: unable to read control regs\n", __func__);
+
+	return rc;
+}
+
+#define Q_HAVE_HW_SP(idx, q_spec, val) \
+	(qpnp_pin_check_config(idx, q_spec, val) == 0)
+
+static int _qpnp_pin_config(struct qpnp_pin_chip *q_chip,
+			    struct qpnp_pin_spec *q_spec,
+			    struct qpnp_pin_cfg *param)
+{
+	struct device *dev = &q_chip->spmi->dev;
+	int rc;
+
+	rc = qpnp_pin_check_constraints(q_spec, param);
+	if (rc)
+		goto gpio_cfg;
+
+	/* set mode */
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_MODE, q_spec, param->mode))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+			  Q_REG_MODE_SEL_SHIFT, Q_REG_MODE_SEL_MASK,
+			  param->mode);
+
+	/* output specific configuration */
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_INVERT, q_spec, param->invert))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+			  Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK,
+			  param->invert);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_SELECT, q_spec, param->select))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+			  Q_REG_SRC_SEL_SHIFT, Q_REG_SRC_SEL_MASK,
+			  param->select);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_OUT_STRENGTH, q_spec, param->out_strength))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+			  Q_REG_OUT_STRENGTH_SHIFT, Q_REG_OUT_STRENGTH_MASK,
+			  param->out_strength);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_OUTPUT_TYPE, q_spec, param->output_type))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+			  Q_REG_OUT_TYPE_SHIFT, Q_REG_OUT_TYPE_MASK,
+			  param->output_type);
+
+	/* config applicable for both input / output */
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_VIN_SEL, q_spec, param->vin_sel))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_VIN_CTL],
+			  Q_REG_VIN_SHIFT, Q_REG_VIN_MASK,
+			  param->vin_sel);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_PULL, q_spec, param->pull))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_PULL_CTL],
+			  Q_REG_PULL_SHIFT, Q_REG_PULL_MASK,
+			  param->pull);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_MASTER_EN, q_spec, param->master_en))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_EN_CTL],
+			  Q_REG_MASTER_EN_SHIFT, Q_REG_MASTER_EN_MASK,
+			  param->master_en);
+
+	/* mpp specific config */
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_AOUT_REF, q_spec, param->aout_ref))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_AOUT_CTL],
+			  Q_REG_AOUT_REF_SHIFT, Q_REG_AOUT_REF_MASK,
+			  param->aout_ref);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_AIN_ROUTE, q_spec, param->ain_route))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_AIN_CTL],
+			  Q_REG_AIN_ROUTE_SHIFT, Q_REG_AIN_ROUTE_MASK,
+			  param->ain_route);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_CS_OUT, q_spec, param->cs_out))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_SINK_CTL],
+			  Q_REG_CS_OUT_SHIFT, Q_REG_CS_OUT_MASK,
+			  param->cs_out);
+
+	rc = qpnp_pin_write_regs(q_chip, q_spec,
+				 Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
+				 &q_spec->regs[Q_REG_I_MODE_CTL]);
+	if (rc) {
+		dev_err(&q_chip->spmi->dev, "%s: unable to write master enable\n",
+								__func__);
+		goto gpio_cfg;
+	}
+
+	return 0;
+
+gpio_cfg:
+	dev_err(dev, "%s: unable to set default config for pmic gpio %d\n",
+						__func__, q_spec->pmic_pin);
+
+	return rc;
+}
+
+int qpnp_pin_config(int gpio, struct qpnp_pin_cfg *param)
+{
+	int rc, chip_offset;
+	struct qpnp_pin_chip *q_chip;
+	struct qpnp_pin_spec *q_spec = NULL;
+	struct gpio_chip *gpio_chip;
+
+	if (param == NULL)
+		return -EINVAL;
+
+	mutex_lock(&qpnp_pin_chips_lock);
+	list_for_each_entry(q_chip, &qpnp_pin_chips, chip_list) {
+		gpio_chip = &q_chip->gpio_chip;
+		if (gpio >= gpio_chip->base
+				&& gpio < gpio_chip->base + gpio_chip->ngpio) {
+			chip_offset = gpio - gpio_chip->base;
+			q_spec = qpnp_chip_gpio_get_spec(q_chip, chip_offset);
+			if (WARN_ON(!q_spec)) {
+				mutex_unlock(&qpnp_pin_chips_lock);
+				return -ENODEV;
+			}
+			break;
+		}
+	}
+	mutex_unlock(&qpnp_pin_chips_lock);
+
+	rc = _qpnp_pin_config(q_chip, q_spec, param);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_pin_config);
+
+#define Q_MAX_CHIP_NAME 128
+int qpnp_pin_map(const char *name, uint32_t pmic_pin)
+{
+	struct qpnp_pin_chip *q_chip;
+	struct qpnp_pin_spec *q_spec = NULL;
+
+	mutex_lock(&qpnp_pin_chips_lock);
+	list_for_each_entry(q_chip, &qpnp_pin_chips, chip_list) {
+		if (strncmp(q_chip->gpio_chip.label, name,
+							Q_MAX_CHIP_NAME) != 0)
+			continue;
+		if (q_chip->pmic_pin_lowest <= pmic_pin &&
+		    q_chip->pmic_pin_highest >= pmic_pin) {
+			q_spec = qpnp_pmic_pin_get_spec(q_chip, pmic_pin);
+			mutex_unlock(&qpnp_pin_chips_lock);
+			if (WARN_ON(!q_spec))
+				return -ENODEV;
+			return q_chip->gpio_chip.base + q_spec->gpio_chip_idx;
+		}
+	}
+	mutex_unlock(&qpnp_pin_chips_lock);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(qpnp_pin_map);
+
+static int qpnp_pin_to_irq(struct gpio_chip *gpio_chip, unsigned offset)
+{
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+	struct qpnp_pin_spec *q_spec;
+
+	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+	if (!q_spec)
+		return -EINVAL;
+
+	return q_spec->irq;
+}
+
+static int qpnp_pin_get(struct gpio_chip *gpio_chip, unsigned offset)
+{
+	int rc, ret_val;
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+	struct qpnp_pin_spec *q_spec = NULL;
+	u8 buf[1];
+
+	if (WARN_ON(!q_chip))
+		return -ENODEV;
+
+	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+	if (WARN_ON(!q_spec))
+		return -ENODEV;
+
+	/* gpio val is from RT status iff input is enabled */
+	if ((q_spec->regs[Q_REG_I_MODE_CTL] & Q_REG_MODE_SEL_MASK)
+						== QPNP_PIN_MODE_DIG_IN) {
+		/* INT_RT_STS */
+		rc = spmi_ext_register_readl(q_chip->spmi->ctrl, q_spec->slave,
+				Q_REG_ADDR(q_spec, Q_REG_STATUS1),
+				&buf[0], 1);
+		return buf[0];
+
+	} else {
+		ret_val = (q_spec->regs[Q_REG_I_MODE_CTL] &
+			       Q_REG_OUT_INVERT_MASK) >> Q_REG_OUT_INVERT_SHIFT;
+		return ret_val;
+	}
+
+	return 0;
+}
+
+static int __qpnp_pin_set(struct qpnp_pin_chip *q_chip,
+			   struct qpnp_pin_spec *q_spec, int value)
+{
+	int rc;
+
+	if (!q_chip || !q_spec)
+		return -EINVAL;
+
+	if (value)
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+			  Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK, 1);
+	else
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+			  Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK, 0);
+
+	rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
+			      Q_REG_ADDR(q_spec, Q_REG_I_MODE_CTL),
+			      &q_spec->regs[Q_REG_I_MODE_CTL], 1);
+	if (rc)
+		dev_err(&q_chip->spmi->dev, "%s: spmi write failed\n",
+								__func__);
+	return rc;
+}
+
+
+static void qpnp_pin_set(struct gpio_chip *gpio_chip,
+		unsigned offset, int value)
+{
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+	struct qpnp_pin_spec *q_spec;
+
+	if (WARN_ON(!q_chip))
+		return;
+
+	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+	if (WARN_ON(!q_spec))
+		return;
+
+	__qpnp_pin_set(q_chip, q_spec, value);
+}
+
+static int qpnp_pin_set_mode(struct qpnp_pin_chip *q_chip,
+				   struct qpnp_pin_spec *q_spec, int mode)
+{
+	int rc;
+
+	if (!q_chip || !q_spec)
+		return -EINVAL;
+
+	if (mode >= QPNP_PIN_MODE_INVALID) {
+		pr_err("invalid mode specification %d\n", mode);
+		return -EINVAL;
+	}
+
+	q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+			Q_REG_MODE_SEL_SHIFT,
+			Q_REG_MODE_SEL_MASK,
+			mode);
+
+	rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
+			      Q_REG_ADDR(q_spec, Q_REG_I_MODE_CTL),
+			      &q_spec->regs[Q_REG_I_MODE_CTL], 1);
+	return rc;
+}
+
+static int qpnp_pin_direction_input(struct gpio_chip *gpio_chip,
+		unsigned offset)
+{
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+	struct qpnp_pin_spec *q_spec;
+
+	if (WARN_ON(!q_chip))
+		return -ENODEV;
+
+	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+	if (WARN_ON(!q_spec))
+		return -ENODEV;
+
+	return qpnp_pin_set_mode(q_chip, q_spec, QPNP_PIN_MODE_DIG_IN);
+}
+
+static int qpnp_pin_direction_output(struct gpio_chip *gpio_chip,
+		unsigned offset,
+		int val)
+{
+	int rc;
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+	struct qpnp_pin_spec *q_spec;
+
+	if (WARN_ON(!q_chip))
+		return -ENODEV;
+
+	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+	if (WARN_ON(!q_spec))
+		return -ENODEV;
+
+	rc = __qpnp_pin_set(q_chip, q_spec, val);
+	if (rc)
+		return rc;
+
+	rc = qpnp_pin_set_mode(q_chip, q_spec, QPNP_PIN_MODE_DIG_OUT);
+
+	return rc;
+}
+
+static int qpnp_pin_of_gpio_xlate(struct gpio_chip *gpio_chip,
+				   const struct of_phandle_args *gpio_spec,
+				   u32 *flags)
+{
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+	struct qpnp_pin_spec *q_spec;
+
+	if (WARN_ON(gpio_chip->of_gpio_n_cells < 2)) {
+		pr_err("of_gpio_n_cells < 2\n");
+		return -EINVAL;
+	}
+
+	q_spec = qpnp_pmic_pin_get_spec(q_chip, gpio_spec->args[0]);
+	if (!q_spec) {
+		pr_err("no such PMIC gpio %u in device topology\n",
+							gpio_spec->args[0]);
+		return -EINVAL;
+	}
+
+	if (flags)
+		*flags = gpio_spec->args[1];
+
+	return q_spec->gpio_chip_idx;
+}
+
+static int qpnp_pin_apply_config(struct qpnp_pin_chip *q_chip,
+				  struct qpnp_pin_spec *q_spec)
+{
+	struct qpnp_pin_cfg param;
+	struct device_node *node = q_spec->node;
+	int rc;
+
+	param.mode	   = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
+				       Q_REG_MODE_SEL_SHIFT,
+				       Q_REG_MODE_SEL_MASK);
+	param.output_type  = q_reg_get(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+				       Q_REG_OUT_TYPE_SHIFT,
+				       Q_REG_OUT_TYPE_MASK);
+	param.invert	   = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
+				       Q_REG_OUT_INVERT_MASK,
+				       Q_REG_OUT_INVERT_MASK);
+	param.pull	   = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
+				       Q_REG_PULL_SHIFT, Q_REG_PULL_MASK);
+	param.vin_sel	   = q_reg_get(&q_spec->regs[Q_REG_I_DIG_VIN_CTL],
+				       Q_REG_VIN_SHIFT, Q_REG_VIN_MASK);
+	param.out_strength = q_reg_get(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+				       Q_REG_OUT_STRENGTH_SHIFT,
+				       Q_REG_OUT_STRENGTH_MASK);
+	param.select   = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
+				       Q_REG_SRC_SEL_SHIFT, Q_REG_SRC_SEL_MASK);
+	param.master_en    = q_reg_get(&q_spec->regs[Q_REG_I_EN_CTL],
+				       Q_REG_MASTER_EN_SHIFT,
+				       Q_REG_MASTER_EN_MASK);
+	param.aout_ref    = q_reg_get(&q_spec->regs[Q_REG_I_AOUT_CTL],
+				       Q_REG_AOUT_REF_SHIFT,
+				       Q_REG_AOUT_REF_MASK);
+	param.ain_route    = q_reg_get(&q_spec->regs[Q_REG_I_AIN_CTL],
+				       Q_REG_AIN_ROUTE_SHIFT,
+				       Q_REG_AIN_ROUTE_MASK);
+	param.cs_out    = q_reg_get(&q_spec->regs[Q_REG_I_SINK_CTL],
+				       Q_REG_CS_OUT_SHIFT,
+				       Q_REG_CS_OUT_MASK);
+
+	of_property_read_u32(node, "qcom,mode",
+		&param.mode);
+	of_property_read_u32(node, "qcom,output-type",
+		&param.output_type);
+	of_property_read_u32(node, "qcom,invert",
+		&param.invert);
+	of_property_read_u32(node, "qcom,pull",
+		&param.pull);
+	of_property_read_u32(node, "qcom,vin-sel",
+		&param.vin_sel);
+	of_property_read_u32(node, "qcom,out-strength",
+		&param.out_strength);
+	of_property_read_u32(node, "qcom,src-select",
+		&param.select);
+	of_property_read_u32(node, "qcom,master-en",
+		&param.master_en);
+	of_property_read_u32(node, "qcom,aout-ref",
+		&param.aout_ref);
+	of_property_read_u32(node, "qcom,ain-route",
+		&param.ain_route);
+	of_property_read_u32(node, "qcom,cs-out",
+		&param.cs_out);
+	rc = _qpnp_pin_config(q_chip, q_spec, &param);
+
+	return rc;
+}
+
+static int qpnp_pin_free_chip(struct qpnp_pin_chip *q_chip)
+{
+	struct spmi_device *spmi = q_chip->spmi;
+	int rc, i;
+
+	if (q_chip->chip_gpios)
+		for (i = 0; i < spmi->num_dev_node; i++)
+			kfree(q_chip->chip_gpios[i]);
+
+	mutex_lock(&qpnp_pin_chips_lock);
+	list_del(&q_chip->chip_list);
+	mutex_unlock(&qpnp_pin_chips_lock);
+	rc = gpiochip_remove(&q_chip->gpio_chip);
+	if (rc)
+		dev_err(&q_chip->spmi->dev, "%s: unable to remove gpio\n",
+				__func__);
+	kfree(q_chip->chip_gpios);
+	kfree(q_chip->pmic_pins);
+	kfree(q_chip);
+	return rc;
+}
+
+#ifdef CONFIG_GPIO_QPNP_PIN_DEBUG
+struct qpnp_pin_reg {
+	uint32_t addr;
+	uint32_t idx;
+	uint32_t shift;
+	uint32_t mask;
+};
+
+static struct dentry *driver_dfs_dir;
+
+static int qpnp_pin_reg_attr(enum qpnp_pin_param_type type,
+			     struct qpnp_pin_reg *cfg)
+{
+	switch (type) {
+	case Q_PIN_CFG_MODE:
+		cfg->addr = Q_REG_MODE_CTL;
+		cfg->idx = Q_REG_I_MODE_CTL;
+		cfg->shift = Q_REG_MODE_SEL_SHIFT;
+		cfg->mask = Q_REG_MODE_SEL_MASK;
+		break;
+	case Q_PIN_CFG_OUTPUT_TYPE:
+		cfg->addr = Q_REG_DIG_OUT_CTL;
+		cfg->idx = Q_REG_I_DIG_OUT_CTL;
+		cfg->shift = Q_REG_OUT_TYPE_SHIFT;
+		cfg->mask = Q_REG_OUT_TYPE_MASK;
+		break;
+	case Q_PIN_CFG_INVERT:
+		cfg->addr = Q_REG_MODE_CTL;
+		cfg->idx = Q_REG_I_MODE_CTL;
+		cfg->shift = Q_REG_OUT_INVERT_SHIFT;
+		cfg->mask = Q_REG_OUT_INVERT_MASK;
+		break;
+	case Q_PIN_CFG_PULL:
+		cfg->addr = Q_REG_DIG_PULL_CTL;
+		cfg->idx = Q_REG_I_DIG_PULL_CTL;
+		cfg->shift = Q_REG_PULL_SHIFT;
+		cfg->mask = Q_REG_PULL_MASK;
+		break;
+	case Q_PIN_CFG_VIN_SEL:
+		cfg->addr = Q_REG_DIG_VIN_CTL;
+		cfg->idx = Q_REG_I_DIG_VIN_CTL;
+		cfg->shift = Q_REG_VIN_SHIFT;
+		cfg->mask = Q_REG_VIN_MASK;
+		break;
+	case Q_PIN_CFG_OUT_STRENGTH:
+		cfg->addr = Q_REG_DIG_OUT_CTL;
+		cfg->idx = Q_REG_I_DIG_OUT_CTL;
+		cfg->shift = Q_REG_OUT_STRENGTH_SHIFT;
+		cfg->mask = Q_REG_OUT_STRENGTH_MASK;
+		break;
+	case Q_PIN_CFG_SELECT:
+		cfg->addr = Q_REG_MODE_CTL;
+		cfg->idx = Q_REG_I_MODE_CTL;
+		cfg->shift = Q_REG_SRC_SEL_SHIFT;
+		cfg->mask = Q_REG_SRC_SEL_MASK;
+		break;
+	case Q_PIN_CFG_MASTER_EN:
+		cfg->addr = Q_REG_EN_CTL;
+		cfg->idx = Q_REG_I_EN_CTL;
+		cfg->shift = Q_REG_MASTER_EN_SHIFT;
+		cfg->mask = Q_REG_MASTER_EN_MASK;
+		break;
+	case Q_PIN_CFG_AOUT_REF:
+		cfg->addr = Q_REG_AOUT_CTL;
+		cfg->idx = Q_REG_I_AOUT_CTL;
+		cfg->shift = Q_REG_AOUT_REF_SHIFT;
+		cfg->mask = Q_REG_AOUT_REF_MASK;
+		break;
+	case Q_PIN_CFG_AIN_ROUTE:
+		cfg->addr = Q_REG_AIN_CTL;
+		cfg->idx = Q_REG_I_AIN_CTL;
+		cfg->shift = Q_REG_AIN_ROUTE_SHIFT;
+		cfg->mask = Q_REG_AIN_ROUTE_MASK;
+		break;
+	case Q_PIN_CFG_CS_OUT:
+		cfg->addr = Q_REG_SINK_CTL;
+		cfg->idx = Q_REG_I_SINK_CTL;
+		cfg->shift = Q_REG_CS_OUT_SHIFT;
+		cfg->mask = Q_REG_CS_OUT_MASK;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qpnp_pin_debugfs_get(void *data, u64 *val)
+{
+	enum qpnp_pin_param_type *idx = data;
+	struct qpnp_pin_spec *q_spec;
+	struct qpnp_pin_reg cfg = {};
+	int rc;
+
+	rc = qpnp_pin_reg_attr(*idx, &cfg);
+	if (rc)
+		return rc;
+	q_spec = container_of(idx, struct qpnp_pin_spec, params[*idx]);
+	*val = q_reg_get(&q_spec->regs[cfg.idx], cfg.shift, cfg.mask);
+	return 0;
+}
+
+static int qpnp_pin_debugfs_set(void *data, u64 val)
+{
+	enum qpnp_pin_param_type *idx = data;
+	struct qpnp_pin_spec *q_spec;
+	struct qpnp_pin_chip *q_chip;
+	struct qpnp_pin_reg cfg = {};
+	int rc;
+
+	q_spec = container_of(idx, struct qpnp_pin_spec, params[*idx]);
+	q_chip = q_spec->q_chip;
+
+	rc = qpnp_pin_check_config(*idx, q_spec, val);
+	if (rc)
+		return rc;
+
+	rc = qpnp_pin_reg_attr(*idx, &cfg);
+	if (rc)
+		return rc;
+	q_reg_clr_set(&q_spec->regs[cfg.idx], cfg.shift, cfg.mask, val);
+	rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
+				      Q_REG_ADDR(q_spec, cfg.addr),
+				      &q_spec->regs[cfg.idx], 1);
+
+	return rc;
+}
+DEFINE_SIMPLE_ATTRIBUTE(qpnp_pin_fops, qpnp_pin_debugfs_get,
+			qpnp_pin_debugfs_set, "%llu\n");
+
+#define DEBUGFS_BUF_SIZE 11 /* supports 2^32 in decimal */
+
+struct qpnp_pin_debugfs_args {
+	enum qpnp_pin_param_type type;
+	const char *filename;
+};
+
+static struct qpnp_pin_debugfs_args dfs_args[] = {
+	{ Q_PIN_CFG_MODE, "mode" },
+	{ Q_PIN_CFG_OUTPUT_TYPE, "output_type" },
+	{ Q_PIN_CFG_INVERT, "invert" },
+	{ Q_PIN_CFG_PULL, "pull" },
+	{ Q_PIN_CFG_VIN_SEL, "vin_sel" },
+	{ Q_PIN_CFG_OUT_STRENGTH, "out_strength" },
+	{ Q_PIN_CFG_SELECT, "select" },
+	{ Q_PIN_CFG_MASTER_EN, "master_en" },
+	{ Q_PIN_CFG_AOUT_REF, "aout_ref" },
+	{ Q_PIN_CFG_AIN_ROUTE, "ain_route" },
+	{ Q_PIN_CFG_CS_OUT, "cs_out" },
+};
+
+static int qpnp_pin_debugfs_create(struct qpnp_pin_chip *q_chip)
+{
+	struct spmi_device *spmi = q_chip->spmi;
+	struct device *dev = &spmi->dev;
+	struct qpnp_pin_spec *q_spec;
+	enum qpnp_pin_param_type *params;
+	enum qpnp_pin_param_type type;
+	char pmic_pin[DEBUGFS_BUF_SIZE];
+	const char *filename;
+	struct dentry *dfs, *dfs_io_dir;
+	int i, j, rc;
+
+	BUG_ON(Q_NUM_PARAMS != ARRAY_SIZE(dfs_args));
+
+	q_chip->dfs_dir = debugfs_create_dir(q_chip->gpio_chip.label,
+							driver_dfs_dir);
+	if (q_chip->dfs_dir == NULL) {
+		dev_err(dev, "%s: cannot register chip debugfs directory %s\n",
+						__func__, dev->of_node->name);
+		return -ENODEV;
+	}
+
+	for (i = 0; i < spmi->num_dev_node; i++) {
+		q_spec = qpnp_chip_gpio_get_spec(q_chip, i);
+		params = q_spec->params;
+		snprintf(pmic_pin, DEBUGFS_BUF_SIZE, "%u", q_spec->pmic_pin);
+		dfs_io_dir = debugfs_create_dir(pmic_pin, q_chip->dfs_dir);
+		if (dfs_io_dir == NULL)
+			goto dfs_err;
+
+		for (j = 0; j < Q_NUM_PARAMS; j++) {
+			type = dfs_args[j].type;
+			filename = dfs_args[j].filename;
+
+			/*
+			 * Use a value of '0' to see if the pin has even basic
+			 * support for a function. Do not create a file if
+			 * it doesn't.
+			 */
+			rc = qpnp_pin_check_config(type, q_spec, 0);
+			if (rc == -ENXIO)
+				continue;
+
+			params[type] = type;
+			dfs = debugfs_create_file(
+					filename,
+					S_IRUGO | S_IWUSR,
+					dfs_io_dir,
+					&q_spec->params[type],
+					&qpnp_pin_fops);
+			if (dfs == NULL)
+				goto dfs_err;
+		}
+	}
+	return 0;
+dfs_err:
+	dev_err(dev, "%s: cannot register debugfs for pmic gpio %u on chip %s\n",
+			__func__, q_spec->pmic_pin, dev->of_node->name);
+	debugfs_remove_recursive(q_chip->dfs_dir);
+	return -ENFILE;
+}
+#else
+static int qpnp_pin_debugfs_create(struct qpnp_pin_chip *q_chip)
+{
+	return 0;
+}
+#endif
+
+static int qpnp_pin_probe(struct spmi_device *spmi)
+{
+	struct qpnp_pin_chip *q_chip;
+	struct qpnp_pin_spec *q_spec;
+	struct resource *res;
+	struct spmi_resource *d_node;
+	int i, rc;
+	int lowest_gpio = UINT_MAX, highest_gpio = 0;
+	u32 intspec[3], gpio;
+	char buf[2];
+	const char *dev_name;
+
+	dev_name = spmi_get_primary_dev_name(spmi);
+	if (!dev_name) {
+		dev_err(&spmi->dev, "%s: label binding undefined for node %s\n",
+					__func__, spmi->dev.of_node->full_name);
+		return -EINVAL;
+	}
+
+	q_chip = kzalloc(sizeof(*q_chip), GFP_KERNEL);
+	if (!q_chip) {
+		dev_err(&spmi->dev, "%s: Can't allocate gpio_chip\n",
+								__func__);
+		return -ENOMEM;
+	}
+	q_chip->spmi = spmi;
+	dev_set_drvdata(&spmi->dev, q_chip);
+
+	mutex_lock(&qpnp_pin_chips_lock);
+	list_add(&q_chip->chip_list, &qpnp_pin_chips);
+	mutex_unlock(&qpnp_pin_chips_lock);
+
+	/* first scan through nodes to find the range required for allocation */
+	for (i = 0; i < spmi->num_dev_node; i++) {
+		rc = of_property_read_u32(spmi->dev_node[i].of_node,
+						"qcom,pin-num", &gpio);
+		if (rc) {
+			dev_err(&spmi->dev, "%s: unable to get qcom,pin-num property\n",
+								__func__);
+			goto err_probe;
+		}
+
+		if (gpio < lowest_gpio)
+			lowest_gpio = gpio;
+		if (gpio > highest_gpio)
+			highest_gpio = gpio;
+	}
+
+	if (highest_gpio < lowest_gpio) {
+		dev_err(&spmi->dev, "%s: no device nodes specified in topology\n",
+								__func__);
+		rc = -EINVAL;
+		goto err_probe;
+	} else if (lowest_gpio == 0) {
+		dev_err(&spmi->dev, "%s: 0 is not a valid PMIC GPIO\n",
+								__func__);
+		rc = -EINVAL;
+		goto err_probe;
+	}
+
+	q_chip->pmic_pin_lowest = lowest_gpio;
+	q_chip->pmic_pin_highest = highest_gpio;
+
+	/* allocate gpio lookup tables */
+	q_chip->pmic_pins = kzalloc(sizeof(struct qpnp_pin_spec *) *
+						highest_gpio - lowest_gpio + 1,
+						GFP_KERNEL);
+	q_chip->chip_gpios = kzalloc(sizeof(struct qpnp_pin_spec *) *
+						spmi->num_dev_node, GFP_KERNEL);
+	if (!q_chip->pmic_pins || !q_chip->chip_gpios) {
+		dev_err(&spmi->dev, "%s: unable to allocate memory\n",
+								__func__);
+		rc = -ENOMEM;
+		goto err_probe;
+	}
+
+	/* get interrupt controller device_node */
+	q_chip->int_ctrl = of_irq_find_parent(spmi->dev.of_node);
+	if (!q_chip->int_ctrl) {
+		dev_err(&spmi->dev, "%s: Can't find interrupt parent\n",
+								__func__);
+		rc = -EINVAL;
+		goto err_probe;
+	}
+
+	/* now scan through again and populate the lookup table */
+	for (i = 0; i < spmi->num_dev_node; i++) {
+		d_node = &spmi->dev_node[i];
+		res = spmi_get_resource(spmi, d_node, IORESOURCE_MEM, 0);
+		if (!res) {
+			dev_err(&spmi->dev, "%s: node %s is missing has no base address definition\n",
+				__func__, d_node->of_node->full_name);
+		}
+
+		rc = of_property_read_u32(d_node->of_node,
+							"qcom,pin-num", &gpio);
+		if (rc) {
+			dev_err(&spmi->dev, "%s: unable to get qcom,pin-num property\n",
+								__func__);
+			goto err_probe;
+		}
+
+		q_spec = kzalloc(sizeof(struct qpnp_pin_spec),
+							GFP_KERNEL);
+		if (!q_spec) {
+			dev_err(&spmi->dev, "%s: unable to allocate memory\n",
+								__func__);
+			rc = -ENOMEM;
+			goto err_probe;
+		}
+
+		q_spec->slave = spmi->sid;
+		q_spec->offset = res->start;
+		q_spec->gpio_chip_idx = i;
+		q_spec->pmic_pin = gpio;
+		q_spec->node = d_node->of_node;
+		q_spec->q_chip = q_chip;
+
+		rc = spmi_ext_register_readl(spmi->ctrl, q_spec->slave,
+				Q_REG_ADDR(q_spec, Q_REG_TYPE), &buf[0], 2);
+		if (rc) {
+			dev_err(&spmi->dev, "%s: unable to read type regs\n",
+						__func__);
+			goto err_probe;
+		}
+		q_spec->type	= buf[0];
+		q_spec->subtype = buf[1];
+
+		rc = qpnp_pin_ctl_regs_init(q_spec);
+		if (rc)
+			goto err_probe;
+
+		/* call into irq_domain to get irq mapping */
+		intspec[0] = q_chip->spmi->sid;
+		intspec[1] = (q_spec->offset >> 8) & 0xFF;
+		intspec[2] = 0;
+		q_spec->irq = irq_create_of_mapping(q_chip->int_ctrl,
+							intspec, 3);
+		if (!q_spec->irq) {
+			dev_err(&spmi->dev, "%s: invalid irq for gpio %u\n",
+								__func__, gpio);
+			rc = -EINVAL;
+			goto err_probe;
+		}
+		/* initialize lookup table params */
+		qpnp_pmic_pin_set_spec(q_chip, gpio, q_spec);
+		qpnp_chip_gpio_set_spec(q_chip, i, q_spec);
+	}
+
+	q_chip->gpio_chip.base = -1;
+	q_chip->gpio_chip.ngpio = spmi->num_dev_node;
+	q_chip->gpio_chip.label = dev_name;
+	q_chip->gpio_chip.direction_input = qpnp_pin_direction_input;
+	q_chip->gpio_chip.direction_output = qpnp_pin_direction_output;
+	q_chip->gpio_chip.to_irq = qpnp_pin_to_irq;
+	q_chip->gpio_chip.get = qpnp_pin_get;
+	q_chip->gpio_chip.set = qpnp_pin_set;
+	q_chip->gpio_chip.dev = &spmi->dev;
+	q_chip->gpio_chip.of_xlate = qpnp_pin_of_gpio_xlate;
+	q_chip->gpio_chip.of_gpio_n_cells = 2;
+	q_chip->gpio_chip.can_sleep = 0;
+
+	rc = gpiochip_add(&q_chip->gpio_chip);
+	if (rc) {
+		dev_err(&spmi->dev, "%s: Can't add gpio chip, rc = %d\n",
+								__func__, rc);
+		goto err_probe;
+	}
+
+	/* now configure gpio config defaults if they exist */
+	for (i = 0; i < spmi->num_dev_node; i++) {
+		q_spec = qpnp_chip_gpio_get_spec(q_chip, i);
+		if (WARN_ON(!q_spec)) {
+			rc = -ENODEV;
+			goto err_probe;
+		}
+
+		rc = qpnp_pin_cache_regs(q_chip, q_spec);
+		if (rc)
+			goto err_probe;
+
+		rc = qpnp_pin_apply_config(q_chip, q_spec);
+		if (rc)
+			goto err_probe;
+	}
+
+	dev_dbg(&spmi->dev, "%s: gpio_chip registered between %d-%u\n",
+			__func__, q_chip->gpio_chip.base,
+			(q_chip->gpio_chip.base + q_chip->gpio_chip.ngpio) - 1);
+
+	rc = qpnp_pin_debugfs_create(q_chip);
+	if (rc) {
+		dev_err(&spmi->dev, "%s: debugfs creation failed\n", __func__);
+		goto err_probe;
+	}
+
+	return 0;
+
+err_probe:
+	qpnp_pin_free_chip(q_chip);
+	return rc;
+}
+
+static int qpnp_pin_remove(struct spmi_device *spmi)
+{
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(&spmi->dev);
+
+	debugfs_remove_recursive(q_chip->dfs_dir);
+
+	return qpnp_pin_free_chip(q_chip);
+}
+
+static struct of_device_id spmi_match_table[] = {
+	{	.compatible = "qcom,qpnp-pin",
+	},
+	{}
+};
+
+static const struct spmi_device_id qpnp_pin_id[] = {
+	{ "qcom,qpnp-pin", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(spmi, qpnp_pin_id);
+
+static struct spmi_driver qpnp_pin_driver = {
+	.driver		= {
+		.name	= "qcom,qpnp-pin",
+		.of_match_table = spmi_match_table,
+	},
+	.probe		= qpnp_pin_probe,
+	.remove		= qpnp_pin_remove,
+	.id_table	= qpnp_pin_id,
+};
+
+static int __init qpnp_pin_init(void)
+{
+#ifdef CONFIG_GPIO_QPNP_PIN_DEBUG
+	driver_dfs_dir = debugfs_create_dir("qpnp_pin", NULL);
+	if (driver_dfs_dir == NULL)
+		pr_err("Cannot register top level debugfs directory\n");
+#endif
+
+	return spmi_driver_register(&qpnp_pin_driver);
+}
+
+static void __exit qpnp_pin_exit(void)
+{
+#ifdef CONFIG_GPIO_QPNP_PIN_DEBUG
+	debugfs_remove_recursive(driver_dfs_dir);
+#endif
+	spmi_driver_unregister(&qpnp_pin_driver);
+}
+
+MODULE_DESCRIPTION("QPNP PMIC gpio driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(qpnp_pin_init);
+module_exit(qpnp_pin_exit);
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index 5c7ab3a..c31f0f0 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -22,6 +22,7 @@
 #include <linux/anon_inodes.h>
 #include <linux/ion.h>
 #include <linux/list.h>
+#include <linux/memblock.h>
 #include <linux/miscdevice.h>
 #include <linux/mm.h>
 #include <linux/mm_types.h>
@@ -31,6 +32,7 @@
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
 #include <linux/debugfs.h>
+#include <linux/dma-buf.h>
 
 #include <mach/iommu_domains.h>
 #include "ion_priv.h"
@@ -51,14 +53,12 @@
 	struct rb_root heaps;
 	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
 			      unsigned long arg);
-	struct rb_root user_clients;
-	struct rb_root kernel_clients;
+	struct rb_root clients;
 	struct dentry *debug_root;
 };
 
 /**
  * struct ion_client - a process/hw block local address space
- * @ref:		for reference counting the client
  * @node:		node in the tree of all clients
  * @dev:		backpointer to ion device
  * @handles:		an rb tree of all the handles in this client
@@ -72,7 +72,6 @@
  * as well as the handles themselves, and should be held while modifying either.
  */
 struct ion_client {
-	struct kref ref;
 	struct rb_node node;
 	struct ion_device *dev;
 	struct rb_root handles;
@@ -92,7 +91,6 @@
  * @node:		node in the client's handle rbtree
  * @kmap_cnt:		count of times this client has mapped to kernel
  * @dmap_cnt:		count of times this client has mapped for dma
- * @usermap_cnt:	count of times this client has mapped for userspace
  *
  * Modifications to node, map_cnt or mapping should be protected by the
  * lock in the client.  Other fields are never changed after initialization.
@@ -103,8 +101,6 @@
 	struct ion_buffer *buffer;
 	struct rb_node node;
 	unsigned int kmap_cnt;
-	unsigned int dmap_cnt;
-	unsigned int usermap_cnt;
 	unsigned int iommu_map_cnt;
 };
 
@@ -217,6 +213,7 @@
 				     unsigned long flags)
 {
 	struct ion_buffer *buffer;
+	struct sg_table *table;
 	int ret;
 
 	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
@@ -231,8 +228,18 @@
 		kfree(buffer);
 		return ERR_PTR(ret);
 	}
+
 	buffer->dev = dev;
 	buffer->size = len;
+
+	table = buffer->heap->ops->map_dma(buffer->heap, buffer);
+	if (IS_ERR_OR_NULL(table)) {
+		heap->ops->free(buffer);
+		kfree(buffer);
+		return ERR_PTR(PTR_ERR(table));
+	}
+	buffer->sg_table = table;
+
 	mutex_init(&buffer->lock);
 	ion_buffer_add(dev, buffer);
 	return buffer;
@@ -275,6 +282,11 @@
 	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
 	struct ion_device *dev = buffer->dev;
 
+	if (WARN_ON(buffer->kmap_cnt > 0))
+		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+
+	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+
 	ion_iommu_delayed_unmap(buffer);
 	buffer->heap->ops->free(buffer);
 	mutex_lock(&dev->lock);
@@ -310,17 +322,26 @@
 	return handle;
 }
 
-/* Client lock must be locked when calling */
+static void ion_handle_kmap_put(struct ion_handle *);
+
 static void ion_handle_destroy(struct kref *kref)
 {
 	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
-	/* XXX Can a handle be destroyed while it's map count is non-zero?:
-	   if (handle->map_cnt) unmap
-	 */
-	WARN_ON(handle->kmap_cnt || handle->dmap_cnt || handle->usermap_cnt);
-	ion_buffer_put(handle->buffer);
+	struct ion_client *client = handle->client;
+	struct ion_buffer *buffer = handle->buffer;
+
+	mutex_lock(&client->lock);
+
+	mutex_lock(&buffer->lock);
+	while (handle->kmap_cnt)
+		ion_handle_kmap_put(handle);
+	mutex_unlock(&buffer->lock);
+
 	if (!RB_EMPTY_NODE(&handle->node))
-		rb_erase(&handle->node, &handle->client->handles);
+		rb_erase(&handle->node, &client->handles);
+	mutex_unlock(&client->lock);
+
+	ion_buffer_put(buffer);
 	kfree(handle);
 }
 
@@ -412,6 +433,11 @@
 	 * request of the caller allocate from it.  Repeat until allocate has
 	 * succeeded or all heaps have been tried
 	 */
+	if (WARN_ON(!len))
+		return ERR_PTR(-EINVAL);
+
+	len = PAGE_ALIGN(len);
+
 	mutex_lock(&dev->lock);
 	for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
 		struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
@@ -445,7 +471,10 @@
 	}
 	mutex_unlock(&dev->lock);
 
-	if (IS_ERR_OR_NULL(buffer)) {
+	if (buffer == NULL)
+		return ERR_PTR(-ENODEV);
+
+	if (IS_ERR(buffer)) {
 		pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
 			 "0x%x) from heap(s) %sfor client %s with heap "
 			 "mask 0x%x\n",
@@ -455,22 +484,19 @@
 
 	handle = ion_handle_create(client, buffer);
 
-	if (IS_ERR_OR_NULL(handle))
-		goto end;
-
 	/*
 	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
 	 * and ion_handle_create will take a second reference, drop one here
 	 */
 	ion_buffer_put(buffer);
 
-	mutex_lock(&client->lock);
-	ion_handle_add(client, handle);
-	mutex_unlock(&client->lock);
-	return handle;
+	if (!IS_ERR(handle)) {
+		mutex_lock(&client->lock);
+		ion_handle_add(client, handle);
+		mutex_unlock(&client->lock);
+	}
 
-end:
-	ion_buffer_put(buffer);
+
 	return handle;
 }
 EXPORT_SYMBOL(ion_alloc);
@@ -488,43 +514,11 @@
 		WARN(1, "%s: invalid handle passed to free.\n", __func__);
 		return;
 	}
-	ion_handle_put(handle);
 	mutex_unlock(&client->lock);
+	ion_handle_put(handle);
 }
 EXPORT_SYMBOL(ion_free);
 
-static void ion_client_get(struct ion_client *client);
-static int ion_client_put(struct ion_client *client);
-
-static bool _ion_map(int *buffer_cnt, int *handle_cnt)
-{
-	bool map;
-
-	BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0);
-
-	if (*buffer_cnt)
-		map = false;
-	else
-		map = true;
-	if (*handle_cnt == 0)
-		(*buffer_cnt)++;
-	(*handle_cnt)++;
-	return map;
-}
-
-static bool _ion_unmap(int *buffer_cnt, int *handle_cnt)
-{
-	BUG_ON(*handle_cnt == 0);
-	(*handle_cnt)--;
-	if (*handle_cnt != 0)
-		return false;
-	BUG_ON(*buffer_cnt == 0);
-	(*buffer_cnt)--;
-	if (*buffer_cnt == 0)
-		return true;
-	return false;
-}
-
 int ion_phys(struct ion_client *client, struct ion_handle *handle,
 	     ion_phys_addr_t *addr, size_t *len)
 {
@@ -551,52 +545,55 @@
 }
 EXPORT_SYMBOL(ion_phys);
 
-void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
-			unsigned long flags)
+static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
 {
-	struct ion_buffer *buffer;
 	void *vaddr;
 
-	mutex_lock(&client->lock);
-	if (!ion_handle_validate(client, handle)) {
-		pr_err("%s: invalid handle passed to map_kernel.\n",
-		       __func__);
-		mutex_unlock(&client->lock);
-		return ERR_PTR(-EINVAL);
+	if (buffer->kmap_cnt) {
+		buffer->kmap_cnt++;
+		return buffer->vaddr;
 	}
-
-	buffer = handle->buffer;
-	mutex_lock(&buffer->lock);
-
-	if (!handle->buffer->heap->ops->map_kernel) {
-		pr_err("%s: map_kernel is not implemented by this heap.\n",
-		       __func__);
-		mutex_unlock(&buffer->lock);
-		mutex_unlock(&client->lock);
-		return ERR_PTR(-ENODEV);
-	}
-
-	if (ion_validate_buffer_flags(buffer, flags)) {
-			vaddr = ERR_PTR(-EEXIST);
-			goto out;
-	}
-
-	if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
-		vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer,
-							flags);
-		if (IS_ERR_OR_NULL(vaddr))
-			_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
-		buffer->vaddr = vaddr;
-	} else {
-		vaddr = buffer->vaddr;
-	}
-
-out:
-	mutex_unlock(&buffer->lock);
-	mutex_unlock(&client->lock);
+	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
+	if (IS_ERR_OR_NULL(vaddr))
+		return vaddr;
+	buffer->vaddr = vaddr;
+	buffer->kmap_cnt++;
 	return vaddr;
 }
-EXPORT_SYMBOL(ion_map_kernel);
+
+static void *ion_handle_kmap_get(struct ion_handle *handle)
+{
+	struct ion_buffer *buffer = handle->buffer;
+	void *vaddr;
+
+	if (handle->kmap_cnt) {
+		handle->kmap_cnt++;
+		return buffer->vaddr;
+	}
+	vaddr = ion_buffer_kmap_get(buffer);
+	if (IS_ERR_OR_NULL(vaddr))
+		return vaddr;
+	handle->kmap_cnt++;
+	return vaddr;
+}
+
+static void ion_buffer_kmap_put(struct ion_buffer *buffer)
+{
+	buffer->kmap_cnt--;
+	if (!buffer->kmap_cnt) {
+		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+		buffer->vaddr = NULL;
+	}
+}
+
+static void ion_handle_kmap_put(struct ion_handle *handle)
+{
+	struct ion_buffer *buffer = handle->buffer;
+
+	handle->kmap_cnt--;
+	if (!handle->kmap_cnt)
+		ion_buffer_kmap_put(buffer);
+}
 
 static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
 		int domain_num, int partition_num, unsigned long align,
@@ -699,14 +696,10 @@
 	}
 
 	iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
-	_ion_map(&buffer->iommu_map_cnt, &handle->iommu_map_cnt);
 	if (!iommu_map) {
 		iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
 					    align, iova_length, flags, iova);
-		if (IS_ERR_OR_NULL(iommu_map)) {
-			_ion_unmap(&buffer->iommu_map_cnt,
-				   &handle->iommu_map_cnt);
-		} else {
+		if (!IS_ERR_OR_NULL(iommu_map)) {
 			iommu_map->flags = iommu_flags;
 
 			if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
@@ -717,22 +710,20 @@
 			pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
 				__func__, handle,
 				iommu_map->flags, iommu_flags);
-			_ion_unmap(&buffer->iommu_map_cnt,
-				   &handle->iommu_map_cnt);
 			ret = -EINVAL;
 		} else if (iommu_map->mapped_size != iova_length) {
 			pr_err("%s: handle %p is already mapped with length"
 					" %x, trying to map with length %lx\n",
 				__func__, handle, iommu_map->mapped_size,
 				iova_length);
-			_ion_unmap(&buffer->iommu_map_cnt,
-				   &handle->iommu_map_cnt);
 			ret = -EINVAL;
 		} else {
 			kref_get(&iommu_map->ref);
 			*iova = iommu_map->iova_addr;
 		}
 	}
+	if (!ret)
+		buffer->iommu_map_cnt++;
 	*buffer_size = buffer->size;
 out:
 	mutex_unlock(&buffer->lock);
@@ -771,9 +762,9 @@
 		goto out;
 	}
 
-	_ion_unmap(&buffer->iommu_map_cnt, &handle->iommu_map_cnt);
 	kref_put(&iommu_map->ref, ion_iommu_release);
 
+	buffer->iommu_map_cnt--;
 out:
 	mutex_unlock(&buffer->lock);
 
@@ -782,51 +773,41 @@
 }
 EXPORT_SYMBOL(ion_unmap_iommu);
 
-struct scatterlist *ion_map_dma(struct ion_client *client,
-				struct ion_handle *handle,
-				unsigned long flags)
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
+			unsigned long flags)
 {
 	struct ion_buffer *buffer;
-	struct scatterlist *sglist;
+	void *vaddr;
 
 	mutex_lock(&client->lock);
 	if (!ion_handle_validate(client, handle)) {
-		pr_err("%s: invalid handle passed to map_dma.\n",
+		pr_err("%s: invalid handle passed to map_kernel.\n",
 		       __func__);
 		mutex_unlock(&client->lock);
 		return ERR_PTR(-EINVAL);
 	}
-	buffer = handle->buffer;
-	mutex_lock(&buffer->lock);
 
-	if (!handle->buffer->heap->ops->map_dma) {
+	buffer = handle->buffer;
+
+	if (!handle->buffer->heap->ops->map_kernel) {
 		pr_err("%s: map_kernel is not implemented by this heap.\n",
 		       __func__);
-		mutex_unlock(&buffer->lock);
 		mutex_unlock(&client->lock);
 		return ERR_PTR(-ENODEV);
 	}
 
 	if (ion_validate_buffer_flags(buffer, flags)) {
-		sglist = ERR_PTR(-EEXIST);
-		goto out;
+		mutex_unlock(&client->lock);
+		return ERR_PTR(-EEXIST);
 	}
 
-	if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
-		sglist = buffer->heap->ops->map_dma(buffer->heap, buffer);
-		if (IS_ERR_OR_NULL(sglist))
-			_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
-		buffer->sglist = sglist;
-	} else {
-		sglist = buffer->sglist;
-	}
-
-out:
+	mutex_lock(&buffer->lock);
+	vaddr = ion_handle_kmap_get(handle);
 	mutex_unlock(&buffer->lock);
 	mutex_unlock(&client->lock);
-	return sglist;
+	return vaddr;
 }
-EXPORT_SYMBOL(ion_map_dma);
+EXPORT_SYMBOL(ion_map_kernel);
 
 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
 {
@@ -835,75 +816,12 @@
 	mutex_lock(&client->lock);
 	buffer = handle->buffer;
 	mutex_lock(&buffer->lock);
-	if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) {
-		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
-		buffer->vaddr = NULL;
-	}
+	ion_handle_kmap_put(handle);
 	mutex_unlock(&buffer->lock);
 	mutex_unlock(&client->lock);
 }
 EXPORT_SYMBOL(ion_unmap_kernel);
 
-void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
-{
-	struct ion_buffer *buffer;
-
-	mutex_lock(&client->lock);
-	buffer = handle->buffer;
-	mutex_lock(&buffer->lock);
-	if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) {
-		buffer->heap->ops->unmap_dma(buffer->heap, buffer);
-		buffer->sglist = NULL;
-	}
-	mutex_unlock(&buffer->lock);
-	mutex_unlock(&client->lock);
-}
-EXPORT_SYMBOL(ion_unmap_dma);
-
-struct ion_buffer *ion_share(struct ion_client *client,
-				 struct ion_handle *handle)
-{
-	bool valid_handle;
-
-	mutex_lock(&client->lock);
-	valid_handle = ion_handle_validate(client, handle);
-	mutex_unlock(&client->lock);
-	if (!valid_handle) {
-		WARN("%s: invalid handle passed to share.\n", __func__);
-		return ERR_PTR(-EINVAL);
-	}
-
-	/* do not take an extra reference here, the burden is on the caller
-	 * to make sure the buffer doesn't go away while it's passing it
-	 * to another client -- ion_free should not be called on this handle
-	 * until the buffer has been imported into the other client
-	 */
-	return handle->buffer;
-}
-EXPORT_SYMBOL(ion_share);
-
-struct ion_handle *ion_import(struct ion_client *client,
-			      struct ion_buffer *buffer)
-{
-	struct ion_handle *handle = NULL;
-
-	mutex_lock(&client->lock);
-	/* if a handle exists for this buffer just take a reference to it */
-	handle = ion_handle_lookup(client, buffer);
-	if (!IS_ERR_OR_NULL(handle)) {
-		ion_handle_get(handle);
-		goto end;
-	}
-	handle = ion_handle_create(client, buffer);
-	if (IS_ERR_OR_NULL(handle))
-		goto end;
-	ion_handle_add(client, handle);
-end:
-	mutex_unlock(&client->lock);
-	return handle;
-}
-EXPORT_SYMBOL(ion_import);
-
 static int check_vaddr_bounds(unsigned long start, unsigned long end)
 {
 	struct mm_struct *mm = current->active_mm;
@@ -968,30 +886,7 @@
 	return ret;
 
 }
-
-static const struct file_operations ion_share_fops;
-
-struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
-{
-	struct file *file = fget(fd);
-	struct ion_handle *handle;
-
-	if (!file) {
-		pr_err("%s: imported fd not found in file table.\n", __func__);
-		return ERR_PTR(-EINVAL);
-	}
-	if (file->f_op != &ion_share_fops) {
-		pr_err("%s: imported file %s is not a shared ion"
-			" file.", __func__, file->f_dentry->d_name.name);
-		handle = ERR_PTR(-EINVAL);
-		goto end;
-	}
-	handle = ion_import(client, file->private_data);
-end:
-	fput(file);
-	return handle;
-}
-EXPORT_SYMBOL(ion_import_fd);
+EXPORT_SYMBOL(ion_do_cache_op);
 
 static int ion_debug_client_show(struct seq_file *s, void *unused)
 {
@@ -1033,9 +928,6 @@
 		}
 		seq_printf(s, "\n");
 	}
-
-	seq_printf(s, "%16.16s %d\n", "client refcount:",
-			atomic_read(&client->ref.refcount));
 	mutex_unlock(&client->lock);
 
 	return 0;
@@ -1053,29 +945,6 @@
 	.release = single_release,
 };
 
-static struct ion_client *ion_client_lookup(struct ion_device *dev,
-					    struct task_struct *task)
-{
-	struct rb_node *n = dev->user_clients.rb_node;
-	struct ion_client *client;
-
-	mutex_lock(&dev->lock);
-	while (n) {
-		client = rb_entry(n, struct ion_client, node);
-		if (task == client->task) {
-			ion_client_get(client);
-			mutex_unlock(&dev->lock);
-			return client;
-		} else if (task < client->task) {
-			n = n->rb_left;
-		} else if (task > client->task) {
-			n = n->rb_right;
-		}
-	}
-	mutex_unlock(&dev->lock);
-	return NULL;
-}
-
 struct ion_client *ion_client_create(struct ion_device *dev,
 				     unsigned int heap_mask,
 				     const char *name)
@@ -1107,19 +976,10 @@
 	}
 	task_unlock(current->group_leader);
 
-	/* if this isn't a kernel thread, see if a client already
-	   exists */
-	if (task) {
-		client = ion_client_lookup(dev, task);
-		if (!IS_ERR_OR_NULL(client)) {
-			put_task_struct(current->group_leader);
-			return client;
-		}
-	}
-
 	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
 	if (!client) {
-		put_task_struct(current->group_leader);
+		if (task)
+			put_task_struct(current->group_leader);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -1139,36 +999,20 @@
 	client->heap_mask = heap_mask;
 	client->task = task;
 	client->pid = pid;
-	kref_init(&client->ref);
 
 	mutex_lock(&dev->lock);
-	if (task) {
-		p = &dev->user_clients.rb_node;
-		while (*p) {
-			parent = *p;
-			entry = rb_entry(parent, struct ion_client, node);
+	p = &dev->clients.rb_node;
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct ion_client, node);
 
-			if (task < entry->task)
-				p = &(*p)->rb_left;
-			else if (task > entry->task)
-				p = &(*p)->rb_right;
-		}
-		rb_link_node(&client->node, parent, p);
-		rb_insert_color(&client->node, &dev->user_clients);
-	} else {
-		p = &dev->kernel_clients.rb_node;
-		while (*p) {
-			parent = *p;
-			entry = rb_entry(parent, struct ion_client, node);
-
-			if (client < entry)
-				p = &(*p)->rb_left;
-			else if (client > entry)
-				p = &(*p)->rb_right;
-		}
-		rb_link_node(&client->node, parent, p);
-		rb_insert_color(&client->node, &dev->kernel_clients);
+		if (client < entry)
+			p = &(*p)->rb_left;
+		else if (client > entry)
+			p = &(*p)->rb_right;
 	}
+	rb_link_node(&client->node, parent, p);
+	rb_insert_color(&client->node, &dev->clients);
 
 
 	client->debug_root = debugfs_create_file(name, 0664,
@@ -1179,9 +1023,8 @@
 	return client;
 }
 
-static void _ion_client_destroy(struct kref *kref)
+void ion_client_destroy(struct ion_client *client)
 {
-	struct ion_client *client = container_of(kref, struct ion_client, ref);
 	struct ion_device *dev = client->dev;
 	struct rb_node *n;
 
@@ -1192,34 +1035,15 @@
 		ion_handle_destroy(&handle->ref);
 	}
 	mutex_lock(&dev->lock);
-	if (client->task) {
-		rb_erase(&client->node, &dev->user_clients);
+	if (client->task)
 		put_task_struct(client->task);
-	} else {
-		rb_erase(&client->node, &dev->kernel_clients);
-	}
+	rb_erase(&client->node, &dev->clients);
 	debugfs_remove_recursive(client->debug_root);
 	mutex_unlock(&dev->lock);
 
 	kfree(client->name);
 	kfree(client);
 }
-
-static void ion_client_get(struct ion_client *client)
-{
-	kref_get(&client->ref);
-}
-
-static int ion_client_put(struct ion_client *client)
-{
-	return kref_put(&client->ref, _ion_client_destroy);
-}
-
-void ion_client_destroy(struct ion_client *client)
-{
-	if (client)
-		ion_client_put(client);
-}
 EXPORT_SYMBOL(ion_client_destroy);
 
 int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
@@ -1266,77 +1090,64 @@
 }
 EXPORT_SYMBOL(ion_handle_get_size);
 
-static int ion_share_release(struct inode *inode, struct file* file)
+struct sg_table *ion_sg_table(struct ion_client *client,
+			      struct ion_handle *handle)
 {
-	struct ion_buffer *buffer = file->private_data;
+	struct ion_buffer *buffer;
+	struct sg_table *table;
 
-	pr_debug("%s: %d\n", __func__, __LINE__);
-	/* drop the reference to the buffer -- this prevents the
-	   buffer from going away because the client holding it exited
-	   while it was being passed */
-	ion_buffer_put(buffer);
-	return 0;
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to map_dma.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return ERR_PTR(-EINVAL);
+	}
+	buffer = handle->buffer;
+	table = buffer->sg_table;
+	mutex_unlock(&client->lock);
+	return table;
+}
+EXPORT_SYMBOL(ion_sg_table);
+
+static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
+					enum dma_data_direction direction)
+{
+	struct dma_buf *dmabuf = attachment->dmabuf;
+	struct ion_buffer *buffer = dmabuf->priv;
+
+	return buffer->sg_table;
+}
+
+static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
+			      struct sg_table *table,
+			      enum dma_data_direction direction)
+{
 }
 
 static void ion_vma_open(struct vm_area_struct *vma)
 {
-
-	struct ion_buffer *buffer = vma->vm_file->private_data;
-	struct ion_handle *handle = vma->vm_private_data;
-	struct ion_client *client;
+	struct ion_buffer *buffer = vma->vm_private_data;
 
 	pr_debug("%s: %d\n", __func__, __LINE__);
-	/* check that the client still exists and take a reference so
-	   it can't go away until this vma is closed */
-	client = ion_client_lookup(buffer->dev, current->group_leader);
-	if (IS_ERR_OR_NULL(client)) {
-		vma->vm_private_data = NULL;
-		return;
-	}
-	ion_handle_get(handle);
+
 	mutex_lock(&buffer->lock);
 	buffer->umap_cnt++;
 	mutex_unlock(&buffer->lock);
-	pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
-		 __func__, __LINE__,
-		 atomic_read(&client->ref.refcount),
-		 atomic_read(&handle->ref.refcount),
-		 atomic_read(&buffer->ref.refcount));
 }
 
 static void ion_vma_close(struct vm_area_struct *vma)
 {
-	struct ion_handle *handle = vma->vm_private_data;
-	struct ion_buffer *buffer = vma->vm_file->private_data;
-	struct ion_client *client;
+	struct ion_buffer *buffer = vma->vm_private_data;
 
 	pr_debug("%s: %d\n", __func__, __LINE__);
-	/* this indicates the client is gone, nothing to do here */
-	if (!handle)
-		return;
-	client = handle->client;
+
 	mutex_lock(&buffer->lock);
 	buffer->umap_cnt--;
 	mutex_unlock(&buffer->lock);
 
 	if (buffer->heap->ops->unmap_user)
 		buffer->heap->ops->unmap_user(buffer->heap, buffer);
-
-
-	pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
-		 __func__, __LINE__,
-		 atomic_read(&client->ref.refcount),
-		 atomic_read(&handle->ref.refcount),
-		 atomic_read(&buffer->ref.refcount));
-	mutex_lock(&client->lock);
-	ion_handle_put(handle);
-	mutex_unlock(&client->lock);
-	ion_client_put(client);
-	pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
-		 __func__, __LINE__,
-		 atomic_read(&client->ref.refcount),
-		 atomic_read(&handle->ref.refcount),
-		 atomic_read(&buffer->ref.refcount));
 }
 
 static struct vm_operations_struct ion_vm_ops = {
@@ -1344,129 +1155,202 @@
 	.close = ion_vma_close,
 };
 
-static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
+static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
 {
-	struct ion_buffer *buffer = file->private_data;
-	unsigned long size = vma->vm_end - vma->vm_start;
-	struct ion_client *client;
-	struct ion_handle *handle;
+	struct ion_buffer *buffer = dmabuf->priv;
 	int ret;
-	unsigned long flags = file->f_flags & O_DSYNC ?
-				ION_SET_CACHE(UNCACHED) :
-				ION_SET_CACHE(CACHED);
 
-
-	pr_debug("%s: %d\n", __func__, __LINE__);
-	/* make sure the client still exists, it's possible for the client to
-	   have gone away but the map/share fd still to be around, take
-	   a reference to it so it can't go away while this mapping exists */
-	client = ion_client_lookup(buffer->dev, current->group_leader);
-	if (IS_ERR_OR_NULL(client)) {
-		pr_err("%s: trying to mmap an ion handle in a process with no "
-		       "ion client\n", __func__);
+	if (!buffer->heap->ops->map_user) {
+		pr_err("%s: this heap does not define a method for mapping "
+		       "to userspace\n", __func__);
 		return -EINVAL;
 	}
 
-	if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) >
-				     buffer->size)) {
-		pr_err("%s: trying to map larger area than handle has available"
-		       "\n", __func__);
-		ret = -EINVAL;
-		goto err;
-	}
-
-	/* find the handle and take a reference to it */
-	handle = ion_import(client, buffer);
-	if (IS_ERR_OR_NULL(handle)) {
-		ret = -EINVAL;
-		goto err;
-	}
-
-	if (!handle->buffer->heap->ops->map_user) {
-		pr_err("%s: this heap does not define a method for mapping "
-		       "to userspace\n", __func__);
-		ret = -EINVAL;
-		goto err1;
-	}
-
 	mutex_lock(&buffer->lock);
-
-	if (ion_validate_buffer_flags(buffer, flags)) {
-		ret = -EEXIST;
-		mutex_unlock(&buffer->lock);
-		goto err1;
-	}
-
 	/* now map it to userspace */
-	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma,
-						flags);
+	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
 
-	buffer->umap_cnt++;
 	if (ret) {
+		mutex_unlock(&buffer->lock);
 		pr_err("%s: failure mapping buffer to userspace\n",
 		       __func__);
-		goto err2;
+	} else {
+		buffer->umap_cnt++;
+		mutex_unlock(&buffer->lock);
+
+		vma->vm_ops = &ion_vm_ops;
+		/*
+		 * move the buffer into the vm_private_data so we can access it
+		 * from vma_open/close
+		 */
+		vma->vm_private_data = buffer;
 	}
-	mutex_unlock(&buffer->lock);
-
-	vma->vm_ops = &ion_vm_ops;
-	/* move the handle into the vm_private_data so we can access it from
-	   vma_open/close */
-	vma->vm_private_data = handle;
-	pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
-		 __func__, __LINE__,
-		 atomic_read(&client->ref.refcount),
-		 atomic_read(&handle->ref.refcount),
-		 atomic_read(&buffer->ref.refcount));
-	return 0;
-
-err2:
-	buffer->umap_cnt--;
-	mutex_unlock(&buffer->lock);
-	/* drop the reference to the handle */
-err1:
-	mutex_lock(&client->lock);
-	ion_handle_put(handle);
-	mutex_unlock(&client->lock);
-err:
-	/* drop the reference to the client */
-	ion_client_put(client);
 	return ret;
 }
 
-static const struct file_operations ion_share_fops = {
-	.owner		= THIS_MODULE,
-	.release	= ion_share_release,
-	.mmap		= ion_share_mmap,
+static void ion_dma_buf_release(struct dma_buf *dmabuf)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	ion_buffer_put(buffer);
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	return buffer->vaddr + offset;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+			       void *ptr)
+{
+	return;
+}
+
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
+					size_t len,
+					enum dma_data_direction direction)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	void *vaddr;
+
+	if (!buffer->heap->ops->map_kernel) {
+		pr_err("%s: map kernel is not implemented by this heap.\n",
+		       __func__);
+		return -ENODEV;
+	}
+
+	mutex_lock(&buffer->lock);
+	vaddr = ion_buffer_kmap_get(buffer);
+	mutex_unlock(&buffer->lock);
+	if (IS_ERR(vaddr))
+		return PTR_ERR(vaddr);
+	if (!vaddr)
+		return -ENOMEM;
+	return 0;
+}
+
+static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
+				       size_t len,
+				       enum dma_data_direction direction)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+
+	mutex_lock(&buffer->lock);
+	ion_buffer_kmap_put(buffer);
+	mutex_unlock(&buffer->lock);
+}
+
+struct dma_buf_ops dma_buf_ops = {
+	.map_dma_buf = ion_map_dma_buf,
+	.unmap_dma_buf = ion_unmap_dma_buf,
+	.mmap = ion_mmap,
+	.release = ion_dma_buf_release,
+	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
+	.end_cpu_access = ion_dma_buf_end_cpu_access,
+	.kmap_atomic = ion_dma_buf_kmap,
+	.kunmap_atomic = ion_dma_buf_kunmap,
+	.kmap = ion_dma_buf_kmap,
+	.kunmap = ion_dma_buf_kunmap,
 };
 
-static int ion_ioctl_share(struct file *parent, struct ion_client *client,
-			   struct ion_handle *handle)
+static int ion_share_set_flags(struct ion_client *client,
+				struct ion_handle *handle,
+				unsigned long flags)
 {
-	int fd = get_unused_fd();
-	struct file *file;
+	struct ion_buffer *buffer;
+	bool valid_handle;
+	unsigned long ion_flags = ION_SET_CACHE(CACHED);
+	if (flags & O_DSYNC)
+		ion_flags = ION_SET_CACHE(UNCACHED);
 
-	if (fd < 0)
-		return -ENFILE;
+	mutex_lock(&client->lock);
+	valid_handle = ion_handle_validate(client, handle);
+	mutex_unlock(&client->lock);
+	if (!valid_handle) {
+		WARN(1, "%s: invalid handle passed to set_flags.\n", __func__);
+		return -EINVAL;
+	}
 
-	file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
-				  handle->buffer, O_RDWR);
-	if (IS_ERR_OR_NULL(file))
-		goto err;
+	buffer = handle->buffer;
 
-	if (parent->f_flags & O_DSYNC)
-		file->f_flags |= O_DSYNC;
-
-	ion_buffer_get(handle->buffer);
-	fd_install(fd, file);
-
-	return fd;
-
-err:
-	put_unused_fd(fd);
-	return -ENFILE;
+	mutex_lock(&buffer->lock);
+	if (ion_validate_buffer_flags(buffer, ion_flags)) {
+		mutex_unlock(&buffer->lock);
+		return -EEXIST;
+	}
+	mutex_unlock(&buffer->lock);
+	return 0;
 }
 
+
+int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
+{
+	struct ion_buffer *buffer;
+	struct dma_buf *dmabuf;
+	bool valid_handle;
+	int fd;
+
+	mutex_lock(&client->lock);
+	valid_handle = ion_handle_validate(client, handle);
+	mutex_unlock(&client->lock);
+	if (!valid_handle) {
+		WARN("%s: invalid handle passed to share.\n", __func__);
+		return -EINVAL;
+	}
+
+	buffer = handle->buffer;
+	ion_buffer_get(buffer);
+	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
+	if (IS_ERR(dmabuf)) {
+		ion_buffer_put(buffer);
+		return PTR_ERR(dmabuf);
+	}
+	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
+	if (fd < 0) {
+		dma_buf_put(dmabuf);
+		ion_buffer_put(buffer);
+	}
+	return fd;
+}
+EXPORT_SYMBOL(ion_share_dma_buf);
+
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+{
+	struct dma_buf *dmabuf;
+	struct ion_buffer *buffer;
+	struct ion_handle *handle;
+
+	dmabuf = dma_buf_get(fd);
+	if (IS_ERR_OR_NULL(dmabuf))
+		return ERR_PTR(PTR_ERR(dmabuf));
+	/* if this memory came from ion */
+
+	if (dmabuf->ops != &dma_buf_ops) {
+		pr_err("%s: can not import dmabuf from another exporter\n",
+		       __func__);
+		dma_buf_put(dmabuf);
+		return ERR_PTR(-EINVAL);
+	}
+	buffer = dmabuf->priv;
+
+	mutex_lock(&client->lock);
+	/* if a handle exists for this buffer just take a reference to it */
+	handle = ion_handle_lookup(client, buffer);
+	if (!IS_ERR_OR_NULL(handle)) {
+		ion_handle_get(handle);
+		goto end;
+	}
+	handle = ion_handle_create(client, buffer);
+	if (IS_ERR_OR_NULL(handle))
+		goto end;
+	ion_handle_add(client, handle);
+end:
+	mutex_unlock(&client->lock);
+	dma_buf_put(dmabuf);
+	return handle;
+}
+EXPORT_SYMBOL(ion_import_dma_buf);
+
 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	struct ion_client *client = filp->private_data;
@@ -1481,11 +1365,13 @@
 		data.handle = ion_alloc(client, data.len, data.align,
 					     data.flags);
 
-		if (IS_ERR_OR_NULL(data.handle))
-			return -ENOMEM;
+		if (IS_ERR(data.handle))
+			return PTR_ERR(data.handle);
 
-		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+			ion_free(client, data.handle);
 			return -EFAULT;
+		}
 		break;
 	}
 	case ION_IOC_FREE:
@@ -1508,18 +1394,15 @@
 	case ION_IOC_SHARE:
 	{
 		struct ion_fd_data data;
-
+		int ret;
 		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
 			return -EFAULT;
-		mutex_lock(&client->lock);
-		if (!ion_handle_validate(client, data.handle)) {
-			pr_err("%s: invalid handle passed to share ioctl.\n",
-			       __func__);
-			mutex_unlock(&client->lock);
-			return -EINVAL;
-		}
-		data.fd = ion_ioctl_share(filp, client, data.handle);
-		mutex_unlock(&client->lock);
+
+		ret = ion_share_set_flags(client, data.handle, filp->f_flags);
+		if (ret)
+			return ret;
+
+		data.fd = ion_share_dma_buf(client, data.handle);
 		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
 			return -EFAULT;
 		if (data.fd < 0)
@@ -1533,12 +1416,9 @@
 		if (copy_from_user(&data, (void __user *)arg,
 				   sizeof(struct ion_fd_data)))
 			return -EFAULT;
-
-		data.handle = ion_import_fd(client, data.fd);
-		if (IS_ERR(data.handle)) {
-			ret = PTR_ERR(data.handle);
+		data.handle = ion_import_dma_buf(client, data.fd);
+		if (IS_ERR(data.handle))
 			data.handle = NULL;
-		}
 		if (copy_to_user((void __user *)arg, &data,
 				 sizeof(struct ion_fd_data)))
 			return -EFAULT;
@@ -1581,8 +1461,8 @@
 		}
 
 		if (!data.handle) {
-			handle = ion_import_fd(client, data.fd);
-			if (IS_ERR_OR_NULL(handle)) {
+			handle = ion_import_dma_buf(client, data.fd);
+			if (IS_ERR(handle)) {
 				pr_info("%s: Could not import handle: %d\n",
 					__func__, (int)handle);
 				return -EINVAL;
@@ -1629,7 +1509,7 @@
 	struct ion_client *client = file->private_data;
 
 	pr_debug("%s: %d\n", __func__, __LINE__);
-	ion_client_put(client);
+	ion_client_destroy(client);
 	return 0;
 }
 
@@ -1739,14 +1619,7 @@
 	struct rb_node *j;
 	const char *client_name = NULL;
 
-	for (j = rb_first(&dev->user_clients); j && !client_name;
-			  j = rb_next(j)) {
-		struct ion_client *client = rb_entry(j, struct ion_client,
-						     node);
-		if (ion_debug_find_buffer_owner(client, buffer))
-			client_name = client->name;
-	}
-	for (j = rb_first(&dev->kernel_clients); j && !client_name;
+	for (j = rb_first(&dev->clients); j && !client_name;
 			  j = rb_next(j)) {
 		struct ion_client *client = rb_entry(j, struct ion_client,
 						     node);
@@ -1828,27 +1701,23 @@
 
 	mutex_lock(&dev->lock);
 	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
-	for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
-		struct ion_client *client = rb_entry(n, struct ion_client,
-						     node);
-		char task_comm[TASK_COMM_LEN];
-		size_t size = ion_debug_heap_total(client, heap->id);
-		if (!size)
-			continue;
 
-		get_task_comm(task_comm, client->task);
-		seq_printf(s, "%16.s %16u %16x\n", task_comm, client->pid,
-			   size);
-	}
-
-	for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
+	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
 		struct ion_client *client = rb_entry(n, struct ion_client,
 						     node);
 		size_t size = ion_debug_heap_total(client, heap->id);
 		if (!size)
 			continue;
-		seq_printf(s, "%16.s %16u %16x\n", client->name, client->pid,
-			   size);
+		if (client->task) {
+			char task_comm[TASK_COMM_LEN];
+
+			get_task_comm(task_comm, client->task);
+			seq_printf(s, "%16.s %16u %16u\n", task_comm,
+				   client->pid, size);
+		} else {
+			seq_printf(s, "%16.s %16u %16u\n", client->name,
+				   client->pid, size);
+		}
 	}
 	ion_heap_print_debug(s, heap);
 	mutex_unlock(&dev->lock);
@@ -1873,6 +1742,11 @@
 	struct rb_node *parent = NULL;
 	struct ion_heap *entry;
 
+	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
+	    !heap->ops->unmap_dma)
+		pr_err("%s: can not add heap with invalid ops struct.\n",
+		       __func__);
+
 	heap->dev = dev;
 	mutex_lock(&dev->lock);
 	while (*p) {
@@ -1924,6 +1798,7 @@
 	mutex_unlock(&dev->lock);
 	return ret_val;
 }
+EXPORT_SYMBOL(ion_secure_heap);
 
 int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
 			void *data)
@@ -1951,6 +1826,7 @@
 	mutex_unlock(&dev->lock);
 	return ret_val;
 }
+EXPORT_SYMBOL(ion_unsecure_heap);
 
 static int ion_debug_leak_show(struct seq_file *s, void *unused)
 {
@@ -1970,7 +1846,7 @@
 	}
 
 	/* now see which buffers we can access */
-	for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
+	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
 		struct ion_client *client = rb_entry(n, struct ion_client,
 						     node);
 
@@ -1986,21 +1862,6 @@
 
 	}
 
-	for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
-		struct ion_client *client = rb_entry(n, struct ion_client,
-						     node);
-
-		mutex_lock(&client->lock);
-		for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
-			struct ion_handle *handle = rb_entry(n2,
-						struct ion_handle, node);
-
-			handle->buffer->marked = 0;
-
-		}
-		mutex_unlock(&client->lock);
-
-	}
 	/* And anyone still marked as a 1 means a leaked handle somewhere */
 	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
 		struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
@@ -2059,8 +1920,7 @@
 	idev->buffers = RB_ROOT;
 	mutex_init(&idev->lock);
 	idev->heaps = RB_ROOT;
-	idev->user_clients = RB_ROOT;
-	idev->kernel_clients = RB_ROOT;
+	idev->clients = RB_ROOT;
 	debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
 			    &debug_leak_fops);
 	return idev;
@@ -2072,3 +1932,19 @@
 	/* XXX need to free the heaps and clients ? */
 	kfree(dev);
 }
+
+void __init ion_reserve(struct ion_platform_data *data)
+{
+	int i, ret;
+
+	for (i = 0; i < data->nr; i++) {
+		if (data->heaps[i].size == 0)
+			continue;
+		ret = memblock_reserve(data->heaps[i].base,
+				       data->heaps[i].size);
+		if (ret)
+			pr_err("memblock reserve of %x@%lx failed\n",
+			       data->heaps[i].size,
+			       data->heaps[i].base);
+	}
+}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index 1fdc1f9..a591eb4 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -108,28 +108,38 @@
 	buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
 }
 
-struct scatterlist *ion_carveout_heap_map_dma(struct ion_heap *heap,
+struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
 					      struct ion_buffer *buffer)
 {
-	struct scatterlist *sglist;
+	struct sg_table *table;
+	int ret;
 
-	sglist = vmalloc(sizeof(struct scatterlist));
-	if (!sglist)
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table)
 		return ERR_PTR(-ENOMEM);
 
-	sg_init_table(sglist, 1);
-	sglist->length = buffer->size;
-	sglist->offset = 0;
-	sglist->dma_address = buffer->priv_phys;
+	ret = sg_alloc_table(table, 1, GFP_KERNEL);
+	if (ret)
+		goto err0;
 
-	return sglist;
+	table->sgl->length = buffer->size;
+	table->sgl->offset = 0;
+	table->sgl->dma_address = buffer->priv_phys;
+
+	return table;
+
+err0:
+	kfree(table);
+	return ERR_PTR(ret);
 }
 
 void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
 				 struct ion_buffer *buffer)
 {
-	if (buffer->sglist)
-		vfree(buffer->sglist);
+	if (buffer->sg_table)
+		sg_free_table(buffer->sg_table);
+	kfree(buffer->sg_table);
+	buffer->sg_table = 0;
 }
 
 static int ion_carveout_request_region(struct ion_carveout_heap *carveout_heap)
@@ -163,8 +173,7 @@
 }
 
 void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
-				   struct ion_buffer *buffer,
-				   unsigned long flags)
+				   struct ion_buffer *buffer)
 {
 	struct ion_carveout_heap *carveout_heap =
 		container_of(heap, struct ion_carveout_heap, heap);
@@ -173,7 +182,7 @@
 	if (ion_carveout_request_region(carveout_heap))
 		return NULL;
 
-	if (ION_IS_CACHED(flags))
+	if (ION_IS_CACHED(buffer->flags))
 		ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
 	else
 		ret_value = ioremap(buffer->priv_phys, buffer->size);
@@ -197,7 +206,7 @@
 }
 
 int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
-			       struct vm_area_struct *vma, unsigned long flags)
+			       struct vm_area_struct *vma)
 {
 	struct ion_carveout_heap *carveout_heap =
 		container_of(heap, struct ion_carveout_heap, heap);
@@ -206,7 +215,7 @@
 	if (ion_carveout_request_region(carveout_heap))
 		return -EINVAL;
 
-	if (!ION_IS_CACHED(flags))
+	if (!ION_IS_CACHED(buffer->flags))
 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 
 	ret_value =  remap_pfn_range(vma, vma->vm_start,
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index c5e9caf..23ccab3 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -366,33 +366,42 @@
 	buffer->priv_phys = ION_CP_ALLOCATE_FAIL;
 }
 
-struct scatterlist *ion_cp_heap_create_sglist(struct ion_buffer *buffer)
+struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer)
 {
-	struct scatterlist *sglist;
+	struct sg_table *table;
+	int ret;
 
-	sglist = vmalloc(sizeof(*sglist));
-	if (!sglist)
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table)
 		return ERR_PTR(-ENOMEM);
 
-	sg_init_table(sglist, 1);
-	sglist->length = buffer->size;
-	sglist->offset = 0;
-	sglist->dma_address = buffer->priv_phys;
+	ret = sg_alloc_table(table, 1, GFP_KERNEL);
+	if (ret)
+		goto err0;
 
-	return sglist;
+	table->sgl->length = buffer->size;
+	table->sgl->offset = 0;
+	table->sgl->dma_address = buffer->priv_phys;
+
+	return table;
+err0:
+	kfree(table);
+	return ERR_PTR(ret);
 }
 
-struct scatterlist *ion_cp_heap_map_dma(struct ion_heap *heap,
+struct sg_table *ion_cp_heap_map_dma(struct ion_heap *heap,
 					      struct ion_buffer *buffer)
 {
-	return ion_cp_heap_create_sglist(buffer);
+	return ion_cp_heap_create_sg_table(buffer);
 }
 
 void ion_cp_heap_unmap_dma(struct ion_heap *heap,
 				 struct ion_buffer *buffer)
 {
-	if (buffer->sglist)
-		vfree(buffer->sglist);
+	if (buffer->sg_table)
+		sg_free_table(buffer->sg_table);
+	kfree(buffer->sg_table);
+	buffer->sg_table = 0;
 }
 
 /**
@@ -441,9 +450,7 @@
 		return NULL;
 }
 
-void *ion_cp_heap_map_kernel(struct ion_heap *heap,
-				   struct ion_buffer *buffer,
-				   unsigned long flags)
+void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
 {
 	struct ion_cp_heap *cp_heap =
 		container_of(heap, struct ion_cp_heap, heap);
@@ -452,7 +459,7 @@
 	mutex_lock(&cp_heap->lock);
 	if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
 	    ((cp_heap->heap_protected == HEAP_PROTECTED) &&
-	      !ION_IS_CACHED(flags))) {
+	      !ION_IS_CACHED(buffer->flags))) {
 
 		if (ion_cp_request_region(cp_heap)) {
 			mutex_unlock(&cp_heap->lock);
@@ -461,10 +468,10 @@
 
 		if (cp_heap->reusable) {
 			ret_value = ion_map_fmem_buffer(buffer, cp_heap->base,
-					cp_heap->reserved_vrange, flags);
+				cp_heap->reserved_vrange, buffer->flags);
 
 		} else {
-			if (ION_IS_CACHED(flags))
+			if (ION_IS_CACHED(buffer->flags))
 				ret_value = ioremap_cached(buffer->priv_phys,
 							   buffer->size);
 			else
@@ -510,7 +517,7 @@
 }
 
 int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
-			struct vm_area_struct *vma, unsigned long flags)
+			struct vm_area_struct *vma)
 {
 	int ret_value = -EAGAIN;
 	struct ion_cp_heap *cp_heap =
@@ -523,7 +530,7 @@
 			return -EINVAL;
 		}
 
-		if (!ION_IS_CACHED(flags))
+		if (!ION_IS_CACHED(buffer->flags))
 			vma->vm_page_prot = pgprot_writecombine(
 							vma->vm_page_prot);
 
@@ -764,7 +771,6 @@
 	struct iommu_domain *domain;
 	int ret = 0;
 	unsigned long extra;
-	struct scatterlist *sglist = 0;
 	struct ion_cp_heap *cp_heap =
 		container_of(buffer->heap, struct ion_cp_heap, heap);
 	int prot = IOMMU_WRITE | IOMMU_READ;
@@ -819,12 +825,7 @@
 		goto out1;
 	}
 
-	sglist = ion_cp_heap_create_sglist(buffer);
-	if (IS_ERR_OR_NULL(sglist)) {
-		ret = -ENOMEM;
-		goto out1;
-	}
-	ret = iommu_map_range(domain, data->iova_addr, sglist,
+	ret = iommu_map_range(domain, data->iova_addr, buffer->sg_table->sgl,
 			      buffer->size, prot);
 	if (ret) {
 		pr_err("%s: could not map %lx in domain %p\n",
@@ -839,14 +840,11 @@
 		if (ret)
 			goto out2;
 	}
-	vfree(sglist);
 	return ret;
 
 out2:
 	iommu_unmap_range(domain, data->iova_addr, buffer->size);
 out1:
-	if (!IS_ERR_OR_NULL(sglist))
-		vfree(sglist);
 	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
 				data->mapped_size);
 out:
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index 9ea6f2b..d0f101c 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -35,7 +35,6 @@
 	struct page **pages;
 	int nrpages;
 	unsigned long size;
-	struct scatterlist *iommu_sglist;
 };
 
 static int ion_iommu_heap_allocate(struct ion_heap *heap,
@@ -47,6 +46,10 @@
 	struct ion_iommu_priv_data *data = NULL;
 
 	if (msm_use_iommu()) {
+		struct scatterlist *sg;
+		struct sg_table *table;
+		unsigned int i;
+
 		data = kmalloc(sizeof(*data), GFP_KERNEL);
 		if (!data)
 			return -ENOMEM;
@@ -59,25 +62,26 @@
 			ret = -ENOMEM;
 			goto err1;
 		}
-		data->iommu_sglist = vmalloc(sizeof(*data->iommu_sglist) *
-						data->nrpages);
-		if (!data->iommu_sglist) {
+
+		table = buffer->sg_table =
+				kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+
+		if (!table) {
 			ret = -ENOMEM;
 			goto err1;
 		}
+		ret = sg_alloc_table(table, data->nrpages, GFP_KERNEL);
+		if (ret)
+			goto err2;
 
-		sg_init_table(data->iommu_sglist, data->nrpages);
-
-		for (i = 0; i < data->nrpages; i++) {
+		for_each_sg(table->sgl, sg, table->nents, i) {
 			data->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
 			if (!data->pages[i])
-				goto err2;
+				goto err3;
 
-			sg_set_page(&data->iommu_sglist[i], data->pages[i],
-				    PAGE_SIZE, 0);
+			sg_set_page(sg, data->pages[i], PAGE_SIZE, 0);
 		}
 
-
 		buffer->priv_virt = data;
 		return 0;
 
@@ -86,9 +90,11 @@
 	}
 
 
+err3:
+	sg_free_table(buffer->sg_table);
 err2:
-	vfree(data->iommu_sglist);
-	data->iommu_sglist = NULL;
+	kfree(buffer->sg_table);
+	buffer->sg_table = 0;
 
 	for (i = 0; i < data->nrpages; i++) {
 		if (data->pages[i])
@@ -111,16 +117,12 @@
 	for (i = 0; i < data->nrpages; i++)
 		__free_page(data->pages[i]);
 
-	vfree(data->iommu_sglist);
-	data->iommu_sglist = NULL;
-
 	kfree(data->pages);
 	kfree(data);
 }
 
 void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
-				   struct ion_buffer *buffer,
-				   unsigned long flags)
+				struct ion_buffer *buffer)
 {
 	struct ion_iommu_priv_data *data = buffer->priv_virt;
 	pgprot_t page_prot = PAGE_KERNEL;
@@ -128,7 +130,7 @@
 	if (!data)
 		return NULL;
 
-	if (!ION_IS_CACHED(flags))
+	if (!ION_IS_CACHED(buffer->flags))
 		page_prot = pgprot_noncached(page_prot);
 
 	buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
@@ -147,7 +149,7 @@
 }
 
 int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
-			       struct vm_area_struct *vma, unsigned long flags)
+			       struct vm_area_struct *vma)
 {
 	struct ion_iommu_priv_data *data = buffer->priv_virt;
 	int i;
@@ -155,7 +157,7 @@
 	if (!data)
 		return -EINVAL;
 
-	if (!ION_IS_CACHED(flags))
+	if (!ION_IS_CACHED(buffer->flags))
 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 
 	curr_addr = vma->vm_start;
@@ -183,7 +185,6 @@
 	struct iommu_domain *domain;
 	int ret = 0;
 	unsigned long extra;
-	struct ion_iommu_priv_data *buffer_data = buffer->priv_virt;
 	int prot = IOMMU_WRITE | IOMMU_READ;
 	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
 
@@ -207,7 +208,8 @@
 	}
 
 	ret = iommu_map_range(domain, data->iova_addr,
-			      buffer_data->iommu_sglist, buffer->size, prot);
+			      buffer->sg_table->sgl,
+			      buffer->size, prot);
 	if (ret) {
 		pr_err("%s: could not map %lx in domain %p\n",
 			__func__, data->iova_addr, domain);
@@ -299,16 +301,19 @@
 	return 0;
 }
 
-static struct scatterlist *ion_iommu_heap_map_dma(struct ion_heap *heap,
+static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
 					      struct ion_buffer *buffer)
 {
-	struct ion_iommu_priv_data *data = buffer->priv_virt;
-	return data->iommu_sglist;
+	return buffer->sg_table;
 }
 
 static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
 				 struct ion_buffer *buffer)
 {
+	if (buffer->sg_table)
+		sg_free_table(buffer->sg_table);
+	kfree(buffer->sg_table);
+	buffer->sg_table = 0;
 }
 
 static struct ion_heap_ops iommu_heap_ops = {
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index 6940e2f..273e57e 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -26,18 +26,6 @@
 #include <linux/iommu.h>
 #include <linux/seq_file.h>
 
-struct ion_mapping;
-
-struct ion_dma_mapping {
-	struct kref ref;
-	struct scatterlist *sglist;
-};
-
-struct ion_kernel_mapping {
-	struct kref ref;
-	void *vaddr;
-};
-
 enum {
 	DI_PARTITION_NUM = 0,
 	DI_DOMAIN_NUM = 1,
@@ -92,7 +80,7 @@
  * @kmap_cnt:		number of times the buffer is mapped to the kernel
  * @vaddr:		the kenrel mapping if kmap_cnt is not zero
  * @dmap_cnt:		number of times the buffer is mapped for dma
- * @sglist:		the scatterlist for the buffer is dmap_cnt is not zero
+ * @sg_table:		the sg table for the buffer if dmap_cnt is not zero
 */
 struct ion_buffer {
 	struct kref ref;
@@ -109,7 +97,7 @@
 	int kmap_cnt;
 	void *vaddr;
 	int dmap_cnt;
-	struct scatterlist *sglist;
+	struct sg_table *sg_table;
 	int umap_cnt;
 	unsigned int iommu_map_cnt;
 	struct rb_root iommu_maps;
@@ -136,14 +124,13 @@
 	void (*free) (struct ion_buffer *buffer);
 	int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
 		     ion_phys_addr_t *addr, size_t *len);
-	struct scatterlist *(*map_dma) (struct ion_heap *heap,
+	struct sg_table *(*map_dma) (struct ion_heap *heap,
 					struct ion_buffer *buffer);
 	void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
-	void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer,
-				unsigned long flags);
+	void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
 	void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
 	int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
-			 struct vm_area_struct *vma, unsigned long flags);
+			 struct vm_area_struct *vma);
 	void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer);
 	int (*cache_op)(struct ion_heap *heap, struct ion_buffer *buffer,
 			void *vaddr, unsigned int offset,
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index 08b271b..c79c184 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -38,71 +38,90 @@
 				     unsigned long size, unsigned long align,
 				     unsigned long flags)
 {
-	buffer->priv_virt = vmalloc_user(size);
-	if (!buffer->priv_virt)
-		return -ENOMEM;
+	struct sg_table *table;
+	struct scatterlist *sg;
+	int i, j;
+	int npages = PAGE_ALIGN(size) / PAGE_SIZE;
 
+	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+	i = sg_alloc_table(table, npages, GFP_KERNEL);
+	if (i)
+		goto err0;
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		struct page *page;
+		page = alloc_page(GFP_KERNEL|__GFP_ZERO);
+		if (!page)
+			goto err1;
+		sg_set_page(sg, page, PAGE_SIZE, 0);
+	}
+	buffer->priv_virt = table;
 	atomic_add(size, &system_heap_allocated);
 	return 0;
+err1:
+	for_each_sg(table->sgl, sg, i, j)
+		__free_page(sg_page(sg));
+	sg_free_table(table);
+err0:
+	kfree(table);
+	return -ENOMEM;
 }
 
 void ion_system_heap_free(struct ion_buffer *buffer)
 {
-	vfree(buffer->priv_virt);
+	int i;
+	struct scatterlist *sg;
+	struct sg_table *table = buffer->priv_virt;
+
+	for_each_sg(table->sgl, sg, table->nents, i)
+		__free_page(sg_page(sg));
+	if (buffer->sg_table)
+		sg_free_table(buffer->sg_table);
+	kfree(buffer->sg_table);
 	atomic_sub(buffer->size, &system_heap_allocated);
 }
 
-struct scatterlist *ion_system_heap_map_dma(struct ion_heap *heap,
-					    struct ion_buffer *buffer)
+struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+					 struct ion_buffer *buffer)
 {
-	struct scatterlist *sglist;
-	struct page *page;
-	int i;
-	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
-	void *vaddr = buffer->priv_virt;
-
-	sglist = vmalloc(npages * sizeof(struct scatterlist));
-	if (!sglist)
-		return ERR_PTR(-ENOMEM);
-	memset(sglist, 0, npages * sizeof(struct scatterlist));
-	sg_init_table(sglist, npages);
-	for (i = 0; i < npages; i++) {
-		page = vmalloc_to_page(vaddr);
-		if (!page)
-			goto end;
-		sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
-		vaddr += PAGE_SIZE;
-	}
-	/* XXX do cache maintenance for dma? */
-	return sglist;
-end:
-	vfree(sglist);
-	return NULL;
+	return buffer->priv_virt;
 }
 
 void ion_system_heap_unmap_dma(struct ion_heap *heap,
 			       struct ion_buffer *buffer)
 {
-	/* XXX undo cache maintenance for dma? */
-	if (buffer->sglist)
-		vfree(buffer->sglist);
+	return;
 }
 
 void *ion_system_heap_map_kernel(struct ion_heap *heap,
-				 struct ion_buffer *buffer,
-				 unsigned long flags)
+				 struct ion_buffer *buffer)
 {
-	if (ION_IS_CACHED(flags))
-		return buffer->priv_virt;
-	else {
+	if (!ION_IS_CACHED(buffer->flags)) {
 		pr_err("%s: cannot map system heap uncached\n", __func__);
 		return ERR_PTR(-EINVAL);
+	} else {
+		struct scatterlist *sg;
+		int i;
+		void *vaddr;
+		struct sg_table *table = buffer->priv_virt;
+		struct page **pages = kmalloc(
+					sizeof(struct page *) * table->nents,
+					GFP_KERNEL);
+
+		for_each_sg(table->sgl, sg, table->nents, i)
+			pages[i] = sg_page(sg);
+		vaddr = vmap(pages, table->nents, VM_MAP, PAGE_KERNEL);
+		kfree(pages);
+
+		return vaddr;
 	}
 }
 
 void ion_system_heap_unmap_kernel(struct ion_heap *heap,
 				  struct ion_buffer *buffer)
 {
+	vunmap(buffer->vaddr);
 }
 
 void ion_system_heap_unmap_iommu(struct ion_iommu_map *data)
@@ -132,14 +151,27 @@
 }
 
 int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
-			     struct vm_area_struct *vma, unsigned long flags)
+			     struct vm_area_struct *vma)
 {
-	if (ION_IS_CACHED(flags))
-		return remap_vmalloc_range(vma, buffer->priv_virt,
-						vma->vm_pgoff);
-	else {
+	if (!ION_IS_CACHED(buffer->flags)) {
 		pr_err("%s: cannot map system heap uncached\n", __func__);
 		return -EINVAL;
+	} else {
+		struct sg_table *table = buffer->priv_virt;
+		unsigned long addr = vma->vm_start;
+		unsigned long offset = vma->vm_pgoff;
+		struct scatterlist *sg;
+		int i;
+
+		for_each_sg(table->sgl, sg, table->nents, i) {
+			if (offset) {
+				offset--;
+				continue;
+			}
+			vm_insert_page(vma, addr, sg_page(sg));
+			addr += PAGE_SIZE;
+		}
+		return 0;
 	}
 }
 
@@ -168,42 +200,20 @@
 
 	if (system_heap_has_outer_cache) {
 		unsigned long pstart;
-		void *vend;
-		void *vtemp;
-		unsigned long ln = 0;
-		vend = buffer->priv_virt + buffer->size;
-		vtemp = buffer->priv_virt + offset;
-
-		if ((vtemp+length) > vend) {
-			pr_err("Trying to flush outside of mapped range.\n");
-			pr_err("End of mapped range: %p, trying to flush to "
-				"address %p\n", vend, vtemp+length);
-			WARN(1, "%s: called with heap name %s, buffer size 0x%x, "
-				"vaddr 0x%p, offset 0x%x, length: 0x%x\n",
-				__func__, heap->name, buffer->size, vaddr,
-				offset, length);
-			return -EINVAL;
-		}
-
-		for (; ln < length && vtemp < vend;
-		      vtemp += PAGE_SIZE, ln += PAGE_SIZE) {
-			struct page *page = vmalloc_to_page(vtemp);
-			if (!page) {
-				WARN(1, "Could not find page for virt. address %p\n",
-					vtemp);
-				return -EINVAL;
-			}
+		struct sg_table *table = buffer->priv_virt;
+		struct scatterlist *sg;
+		int i;
+		for_each_sg(table->sgl, sg, table->nents, i) {
+			struct page *page = sg_page(sg);
 			pstart = page_to_phys(page);
 			/*
 			 * If page -> phys is returning NULL, something
 			 * has really gone wrong...
 			 */
 			if (!pstart) {
-				WARN(1, "Could not translate %p to physical address\n",
-					vtemp);
+				WARN(1, "Could not translate virtual address to physical address\n");
 				return -EINVAL;
 			}
-
 			outer_cache_op(pstart, pstart + PAGE_SIZE);
 		}
 	}
@@ -227,14 +237,11 @@
 				unsigned long iova_length,
 				unsigned long flags)
 {
-	int ret = 0, i;
+	int ret = 0;
 	struct iommu_domain *domain;
 	unsigned long extra;
 	unsigned long extra_iova_addr;
-	struct page *page;
-	int npages = buffer->size >> PAGE_SHIFT;
-	void *vaddr = buffer->priv_virt;
-	struct scatterlist *sglist = 0;
+	struct sg_table *table = buffer->priv_virt;
 	int prot = IOMMU_WRITE | IOMMU_READ;
 	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
 
@@ -261,23 +268,7 @@
 		goto out1;
 	}
 
-
-	sglist = vmalloc(sizeof(*sglist) * npages);
-	if (!sglist) {
-		ret = -ENOMEM;
-		goto out1;
-	}
-
-	sg_init_table(sglist, npages);
-	for (i = 0; i < npages; i++) {
-		page = vmalloc_to_page(vaddr);
-		if (!page)
-			goto out1;
-		sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
-		vaddr += PAGE_SIZE;
-	}
-
-	ret = iommu_map_range(domain, data->iova_addr, sglist,
+	ret = iommu_map_range(domain, data->iova_addr, table->sgl,
 			      buffer->size, prot);
 
 	if (ret) {
@@ -293,13 +284,11 @@
 		if (ret)
 			goto out2;
 	}
-	vfree(sglist);
 	return ret;
 
 out2:
 	iommu_unmap_range(domain, data->iova_addr, buffer->size);
 out1:
-	vfree(sglist);
 	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
 				data->mapped_size);
 out:
@@ -366,27 +355,32 @@
 	return 0;
 }
 
-struct scatterlist *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
 						   struct ion_buffer *buffer)
 {
-	struct scatterlist *sglist;
+	struct sg_table *table;
+	int ret;
 
-	sglist = vmalloc(sizeof(struct scatterlist));
-	if (!sglist)
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table)
 		return ERR_PTR(-ENOMEM);
-	sg_init_table(sglist, 1);
-	sg_set_page(sglist, virt_to_page(buffer->priv_virt), buffer->size, 0);
-	return sglist;
+	ret = sg_alloc_table(table, 1, GFP_KERNEL);
+	if (ret) {
+		kfree(table);
+		return ERR_PTR(ret);
+	}
+	sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
+		    0);
+	return table;
 }
 
 int ion_system_contig_heap_map_user(struct ion_heap *heap,
 				    struct ion_buffer *buffer,
-				    struct vm_area_struct *vma,
-				    unsigned long flags)
+				    struct vm_area_struct *vma)
 {
 	unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
 
-	if (ION_IS_CACHED(flags))
+	if (ION_IS_CACHED(buffer->flags))
 		return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
 			       vma->vm_end - vma->vm_start,
 			       vma->vm_page_prot);
diff --git a/drivers/gpu/msm/a3xx_reg.h b/drivers/gpu/msm/a3xx_reg.h
index bd58b4e..f5ee1d7 100644
--- a/drivers/gpu/msm/a3xx_reg.h
+++ b/drivers/gpu/msm/a3xx_reg.h
@@ -509,6 +509,6 @@
 #define RBBM_BLOCK_ID_MARB_3           0x2b
 
 /* RBBM_CLOCK_CTL default value */
-#define A3XX_RBBM_CLOCK_CTL_DEFAULT 0x00000000
+#define A3XX_RBBM_CLOCK_CTL_DEFAULT 0xBFFFFFFF
 
 #endif
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index f5cb888..c33089d 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -111,6 +111,21 @@
 	.ib_check_level = 0,
 };
 
+/* This set of registers are used for Hang detection
+ * If the values of these registers are same after
+ * KGSL_TIMEOUT_PART time, GPU hang is reported in
+ * kernel log.
+ */
+unsigned int hang_detect_regs[] = {
+	A3XX_RBBM_STATUS,
+	REG_CP_RB_RPTR,
+	REG_CP_IB1_BASE,
+	REG_CP_IB1_BUFSZ,
+	REG_CP_IB2_BASE,
+	REG_CP_IB2_BUFSZ,
+};
+
+const unsigned int hang_detect_regs_count = ARRAY_SIZE(hang_detect_regs);
 
 /*
  * This is the master list of all GPU cores that are supported by this
@@ -246,6 +261,7 @@
 }
 
 static void adreno_iommu_setstate(struct kgsl_device *device,
+					unsigned int context_id,
 					uint32_t flags)
 {
 	unsigned int pt_val, reg_pt_val;
@@ -256,17 +272,27 @@
 	struct kgsl_memdesc **reg_map_desc;
 	void *reg_map_array = NULL;
 	int num_iommu_units, i;
+	struct kgsl_context *context;
+	struct adreno_context *adreno_ctx = NULL;
 
 	if (!adreno_dev->drawctxt_active)
 		return kgsl_mmu_device_setstate(&device->mmu, flags);
 	num_iommu_units = kgsl_mmu_get_reg_map_desc(&device->mmu,
 							&reg_map_array);
+
+	context = idr_find(&device->context_idr, context_id);
+	adreno_ctx = context->devctxt;
+
 	reg_map_desc = reg_map_array;
 
 	if (kgsl_mmu_enable_clk(&device->mmu,
 				KGSL_IOMMU_CONTEXT_USER))
 		goto done;
 
+	cmds += __adreno_add_idle_indirect_cmds(cmds,
+		device->mmu.setstate_memory.gpuaddr +
+		KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
 	if (cpu_is_msm8960())
 		cmds += adreno_add_change_mh_phys_limit_cmds(cmds, 0xFFFFF000,
 					device->mmu.setstate_memory.gpuaddr +
@@ -335,10 +361,9 @@
 		*cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
 		*cmds++ = 0x7fff;
 
-		if (flags & KGSL_MMUFLAGS_TLBFLUSH)
-			cmds += __adreno_add_idle_indirect_cmds(cmds,
-				device->mmu.setstate_memory.gpuaddr +
-				KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+		cmds += __adreno_add_idle_indirect_cmds(cmds,
+			device->mmu.setstate_memory.gpuaddr +
+			KGSL_IOMMU_SETSTATE_NOP_OFFSET);
 	}
 	if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
 		/*
@@ -374,15 +399,29 @@
 			KGSL_IOMMU_SETSTATE_NOP_OFFSET);
 
 	sizedwords += (cmds - &link[0]);
-	if (sizedwords)
-		adreno_ringbuffer_issuecmds(device,
-			KGSL_CMD_FLAGS_PMODE, &link[0], sizedwords);
+	if (sizedwords) {
+		/*
+		 * add an interrupt at the end of commands so that the smmu
+		 * disable clock off function will get called
+		 */
+		*cmds++ = cp_type3_packet(CP_INTERRUPT, 1);
+		*cmds++ = CP_INT_CNTL__RB_INT_MASK;
+		sizedwords += 2;
+		/* This returns the per context timestamp but we need to
+		 * use the global timestamp for iommu clock disablement */
+		adreno_ringbuffer_issuecmds(device, adreno_ctx,
+			KGSL_CMD_FLAGS_PMODE,
+			&link[0], sizedwords);
+		kgsl_mmu_disable_clk_on_ts(&device->mmu,
+		adreno_dev->ringbuffer.timestamp[KGSL_MEMSTORE_GLOBAL], true);
+	}
 done:
 	if (num_iommu_units)
 		kfree(reg_map_array);
 }
 
 static void adreno_gpummu_setstate(struct kgsl_device *device,
+					unsigned int context_id,
 					uint32_t flags)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -390,14 +429,25 @@
 	unsigned int *cmds = &link[0];
 	int sizedwords = 0;
 	unsigned int mh_mmu_invalidate = 0x00000003; /*invalidate all and tc */
+	struct kgsl_context *context;
+	struct adreno_context *adreno_ctx = NULL;
 
 	/*
+	 * Fix target freeze issue by adding TLB flush for each submit
+	 * on A20X based targets.
+	 */
+	if (adreno_is_a20x(adreno_dev))
+		flags |= KGSL_MMUFLAGS_TLBFLUSH;
+	/*
 	 * If possible, then set the state via the command stream to avoid
 	 * a CPU idle.  Otherwise, use the default setstate which uses register
 	 * writes For CFF dump we must idle and use the registers so that it is
 	 * easier to filter out the mmu accesses from the dump
 	 */
 	if (!kgsl_cff_dump_enable && adreno_dev->drawctxt_active) {
+		context = idr_find(&device->context_idr, context_id);
+		adreno_ctx = context->devctxt;
+
 		if (flags & KGSL_MMUFLAGS_PTUPDATE) {
 			/* wait for graphics pipe to be idle */
 			*cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
@@ -470,7 +520,8 @@
 			sizedwords += 2;
 		}
 
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+		adreno_ringbuffer_issuecmds(device, adreno_ctx,
+					KGSL_CMD_FLAGS_PMODE,
 					&link[0], sizedwords);
 	} else {
 		kgsl_mmu_device_setstate(&device->mmu, flags);
@@ -478,13 +529,14 @@
 }
 
 static void adreno_setstate(struct kgsl_device *device,
+			unsigned int context_id,
 			uint32_t flags)
 {
 	/* call the mmu specific handler */
 	if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype())
-		return adreno_gpummu_setstate(device, flags);
+		return adreno_gpummu_setstate(device, context_id, flags);
 	else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
-		return adreno_iommu_setstate(device, flags);
+		return adreno_iommu_setstate(device, context_id, flags);
 }
 
 static unsigned int
@@ -518,7 +570,7 @@
 			patchid = 1;
 		else
 			patchid = 0;
-	} else if (cpu_is_msm8930()) {
+	} else if (cpu_is_msm8930() || cpu_is_msm8627()) {
 
 		/* A305 */
 		majorid = 0;
@@ -584,7 +636,7 @@
 static unsigned int
 adreno_getchipid(struct kgsl_device *device)
 {
-	if (cpu_is_apq8064() || cpu_is_msm8930())
+	if (cpu_is_apq8064() || cpu_is_msm8930() || cpu_is_msm8627())
 		return a3xx_getchipid(device);
 	else
 		return a2xx_getchipid(device);
@@ -714,6 +766,10 @@
 		kgsl_mh_start(device);
 	}
 
+	/* Assign correct RBBM status register to hang detect regs
+	 */
+	hang_detect_regs[0] = adreno_dev->gpudev->reg_rbbm_status;
+
 	status = kgsl_mmu_start(device);
 	if (status)
 		goto error_clk_off;
@@ -880,8 +936,7 @@
 	return ret;
 }
 
-static int
-adreno_dump_and_recover(struct kgsl_device *device)
+int adreno_dump_and_recover(struct kgsl_device *device)
 {
 	int result = -ETIMEDOUT;
 
@@ -921,6 +976,7 @@
 done:
 	return result;
 }
+EXPORT_SYMBOL(adreno_dump_and_recover);
 
 static int adreno_getproperty(struct kgsl_device *device,
 				enum kgsl_property_type type,
@@ -1080,7 +1136,10 @@
 	unsigned long wait_time_part;
 	unsigned int msecs;
 	unsigned int msecs_first;
-	unsigned int msecs_part;
+	unsigned int msecs_part = KGSL_TIMEOUT_PART;
+	unsigned int prev_reg_val[hang_detect_regs_count];
+
+	memset(prev_reg_val, 0, sizeof(prev_reg_val));
 
 	kgsl_cffdump_regpoll(device->id,
 		adreno_dev->gpudev->reg_rbbm_status << 2,
@@ -1092,7 +1151,6 @@
 	if (rb->flags & KGSL_FLAGS_STARTED) {
 		msecs = adreno_dev->wait_timeout;
 		msecs_first = (msecs <= 100) ? ((msecs + 4) / 5) : 100;
-		msecs_part = (msecs - msecs_first + 3) / 4;
 		wait_time = jiffies + wait_timeout;
 		wait_time_part = jiffies + msecs_to_jiffies(msecs_first);
 		adreno_poke(device);
@@ -1101,6 +1159,8 @@
 				adreno_poke(device);
 				wait_time_part = jiffies +
 					msecs_to_jiffies(msecs_part);
+				if ((adreno_hang_detect(device, prev_reg_val)))
+					goto err;
 			}
 			GSL_RB_GET_READPTR(rb, &rb->rptr);
 			if (time_after(jiffies, wait_time)) {
@@ -1113,6 +1173,7 @@
 
 	/* now, wait for the GPU to finish its operations */
 	wait_time = jiffies + wait_timeout;
+	wait_time_part = jiffies + msecs_to_jiffies(msecs_part);
 	while (time_before(jiffies, wait_time)) {
 		adreno_regread(device, adreno_dev->gpudev->reg_rbbm_status,
 			&rbbm_status);
@@ -1123,6 +1184,16 @@
 			if (!(rbbm_status & 0x80000000))
 				return 0;
 		}
+
+		/* Dont wait for timeout, detect hang faster.
+		 */
+		if (time_after(jiffies, wait_time_part)) {
+				wait_time_part = jiffies +
+					msecs_to_jiffies(msecs_part);
+				if ((adreno_hang_detect(device, prev_reg_val)))
+					goto err;
+		}
+
 	}
 
 err:
@@ -1309,6 +1380,7 @@
 	int status;
 	unsigned int ref_ts, enableflag;
 	unsigned int context_id;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 
 	mutex_lock(&device->mutex);
 	context_id = _get_context_id(context);
@@ -1354,8 +1426,15 @@
 			* get an interrupt */
 			cmds[0] = cp_type3_packet(CP_NOP, 1);
 			cmds[1] = 0;
-			adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
-				&cmds[0], 2);
+
+			if (adreno_dev->drawctxt_active)
+				adreno_ringbuffer_issuecmds(device,
+					adreno_dev->drawctxt_active,
+					KGSL_CMD_FLAGS_NONE, &cmds[0], 2);
+			else
+				/* We would never call this function if there
+				 * was no active contexts running */
+				BUG();
 		}
 	}
 unlock:
@@ -1380,6 +1459,32 @@
 	__ret;								\
 })
 
+
+
+unsigned int adreno_hang_detect(struct kgsl_device *device,
+						unsigned int *prev_reg_val)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	unsigned int curr_reg_val[hang_detect_regs_count];
+	unsigned int hang_detected = 1;
+	unsigned int i;
+
+	if (!adreno_dev->fast_hang_detect)
+		return 0;
+
+	for (i = 0; i < hang_detect_regs_count; i++) {
+		adreno_regread(device, hang_detect_regs[i],
+					   &curr_reg_val[i]);
+		if (curr_reg_val[i] != prev_reg_val[i]) {
+			prev_reg_val[i] = curr_reg_val[i];
+			hang_detected = 0;
+		}
+	}
+
+	return hang_detected;
+}
+
+
 /* MUST be called with the device mutex held */
 static int adreno_waittimestamp(struct kgsl_device *device,
 				struct kgsl_context *context,
@@ -1391,16 +1496,20 @@
 	static uint io_cnt;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
-	int retries;
+	int retries = 0;
 	unsigned int msecs_first;
-	unsigned int msecs_part;
+	unsigned int msecs_part = KGSL_TIMEOUT_PART;
 	unsigned int ts_issued;
 	unsigned int context_id = _get_context_id(context);
+	unsigned int time_elapsed = 0;
+	unsigned int prev_reg_val[hang_detect_regs_count];
+
+	memset(prev_reg_val, 0, sizeof(prev_reg_val));
 
 	ts_issued = adreno_dev->ringbuffer.timestamp[context_id];
 
 	/* Don't wait forever, set a max value for now */
-	if (msecs == -1)
+	if (msecs == KGSL_TIMEOUT_DEFAULT)
 		msecs = adreno_dev->wait_timeout;
 
 	if (timestamp_cmp(timestamp, ts_issued) > 0) {
@@ -1416,8 +1525,7 @@
 	 * been updated properly.
 	 */
 	msecs_first = (msecs <= 100) ? ((msecs + 4) / 5) : 100;
-	msecs_part = (msecs - msecs_first + 3) / 4;
-	for (retries = 0; retries < 5; retries++) {
+	do {
 		/*
 		 * If the context ID is invalid, we are in a race with
 		 * the context being destroyed by userspace so bail.
@@ -1442,6 +1550,11 @@
 		if (io_cnt <
 		    pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
 			io = 0;
+
+		if ((retries > 0) &&
+			(adreno_hang_detect(device, prev_reg_val)))
+			goto hang_dump;
+
 		mutex_unlock(&device->mutex);
 		/* We need to make sure that the process is
 		 * placed in wait-q before its condition is called
@@ -1463,7 +1576,14 @@
 			goto done;
 		}
 		/*this wait timed out*/
-	}
+
+		time_elapsed = time_elapsed +
+				(retries ? msecs_part : msecs_first);
+		retries++;
+
+	} while (time_elapsed < msecs);
+
+hang_dump:
 	status = -ETIMEDOUT;
 	KGSL_DRV_ERR(device,
 		     "Device hang detected while waiting for timestamp: "
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index feaa36f..fcbf1d9 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -82,6 +82,7 @@
 	unsigned int pix_shader_start;
 	unsigned int instruction_size;
 	unsigned int ib_check_level;
+	unsigned int fast_hang_detect;
 };
 
 struct adreno_gpudev {
@@ -99,7 +100,8 @@
 	int (*ctxt_create)(struct adreno_device *, struct adreno_context *);
 	void (*ctxt_save)(struct adreno_device *, struct adreno_context *);
 	void (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
-	void (*ctxt_draw_workaround)(struct adreno_device *);
+	void (*ctxt_draw_workaround)(struct adreno_device *,
+					struct adreno_context *);
 	irqreturn_t (*irq_handler)(struct adreno_device *);
 	void (*irq_control)(struct adreno_device *, int);
 	void * (*snapshot)(struct adreno_device *, void *, int *, int);
@@ -123,6 +125,10 @@
 extern const unsigned int a3xx_registers[];
 extern const unsigned int a3xx_registers_count;
 
+extern unsigned int hang_detect_regs[];
+extern const unsigned int hang_detect_regs_count;
+
+
 int adreno_idle(struct kgsl_device *device, unsigned int timeout);
 void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
 				unsigned int *value);
@@ -143,6 +149,11 @@
 void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
 		int hang);
 
+int adreno_dump_and_recover(struct kgsl_device *device);
+
+unsigned int adreno_hang_detect(struct kgsl_device *device,
+						unsigned int *prev_reg_val);
+
 static inline int adreno_is_a200(struct adreno_device *adreno_dev)
 {
 	return (adreno_dev->gpurev == ADRENO_REV_A200);
@@ -250,7 +261,6 @@
 {
 	unsigned int *start = cmds;
 
-	cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
 	*cmds++ = cp_type0_packet(MH_MMU_MPU_END, 1);
 	*cmds++ = new_phys_limit;
 	cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
@@ -263,7 +273,6 @@
 {
 	unsigned int *start = cmds;
 
-	cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
 	*cmds++ = cp_type0_packet(REG_CP_STATE_DEBUG_INDEX, 1);
 	*cmds++ = (cur_ctx_bank ? 0 : 0x20);
 	cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index d846d3d..5a76c86 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -1450,7 +1450,8 @@
 	return ret;
 }
 
-static void a2xx_drawctxt_workaround(struct adreno_device *adreno_dev)
+static void a2xx_drawctxt_draw_workaround(struct adreno_device *adreno_dev,
+					struct adreno_context *context)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
 	unsigned int cmd[11];
@@ -1497,7 +1498,7 @@
 					| adreno_dev->pix_shader_start;
 	}
 
-	adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+	adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_PMODE,
 			&cmd[0], cmds - cmd);
 }
 
@@ -1516,12 +1517,13 @@
 	if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
 
 		/* save registers and constants. */
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_NONE,
 			context->reg_save, 3);
 
 		if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
 			/* save shader partitioning and instructions. */
-			adreno_ringbuffer_issuecmds(device,
+			adreno_ringbuffer_issuecmds(device, context,
 				KGSL_CMD_FLAGS_PMODE,
 				context->shader_save, 3);
 
@@ -1529,7 +1531,8 @@
 			 * fixup shader partitioning parameter for
 			 *  SET_SHADER_BASES.
 			 */
-			adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+			adreno_ringbuffer_issuecmds(device, context,
+				KGSL_CMD_FLAGS_NONE,
 				context->shader_fixup, 3);
 
 			context->flags |= CTXT_FLAGS_SHADER_RESTORE;
@@ -1541,19 +1544,21 @@
 		/* save gmem.
 		 * (note: changes shader. shader must already be saved.)
 		 */
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_PMODE,
 			context->context_gmem_shadow.gmem_save, 3);
 
 		/* Restore TP0_CHICKEN */
 		if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
-			adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+			adreno_ringbuffer_issuecmds(device, context,
+				KGSL_CMD_FLAGS_NONE,
 				context->chicken_restore, 3);
 		}
 		adreno_dev->gpudev->ctx_switches_since_last_draw = 0;
 
 		context->flags |= CTXT_FLAGS_GMEM_RESTORE;
 	} else if (adreno_is_a2xx(adreno_dev))
-		a2xx_drawctxt_workaround(adreno_dev);
+		a2xx_drawctxt_draw_workaround(adreno_dev, context);
 }
 
 static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
@@ -1564,7 +1569,8 @@
 
 	if (context == NULL) {
 		/* No context - set the default apgetable and thats it */
-		kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable);
+		kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
+				adreno_dev->drawctxt_active->id);
 		return;
 	}
 
@@ -1576,8 +1582,9 @@
 	cmds[3] = device->memstore.gpuaddr +
 		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
 	cmds[4] = context->id;
-	adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE, cmds, 5);
-	kgsl_mmu_setstate(&device->mmu, context->pagetable);
+	adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+					cmds, 5);
+	kgsl_mmu_setstate(&device->mmu, context->pagetable, context->id);
 
 #ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
 	kgsl_cffdump_syncmem(NULL, &context->gpustate,
@@ -1589,12 +1596,14 @@
 	 *  (note: changes shader. shader must not already be restored.)
 	 */
 	if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_PMODE,
 			context->context_gmem_shadow.gmem_restore, 3);
 
 		if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
 			/* Restore TP0_CHICKEN */
-			adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+			adreno_ringbuffer_issuecmds(device, context,
+				KGSL_CMD_FLAGS_NONE,
 				context->chicken_restore, 3);
 		}
 
@@ -1604,12 +1613,12 @@
 	if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
 
 		/* restore registers and constants. */
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
-			context->reg_restore, 3);
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
 
 		/* restore shader instructions & partitioning. */
 		if (context->flags & CTXT_FLAGS_SHADER_RESTORE) {
-			adreno_ringbuffer_issuecmds(device,
+			adreno_ringbuffer_issuecmds(device, context,
 				KGSL_CMD_FLAGS_NONE,
 				context->shader_restore, 3);
 		}
@@ -1618,8 +1627,8 @@
 	if (adreno_is_a20x(adreno_dev)) {
 		cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
 		cmds[1] = context->bin_base_offset;
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
-			cmds, 2);
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_NONE, cmds, 2);
 	}
 }
 
@@ -1710,7 +1719,6 @@
 			kgsl_sharedmem_writel(&rb->device->memstore,
 					KGSL_MEMSTORE_OFFSET(context_id,
 						ts_cmp_enable), 0);
-			device->last_expired_ctxt_id = context_id;
 			wmb();
 		}
 		KGSL_CMD_WARN(rb->device, "ringbuffer rb interrupt\n");
@@ -2011,7 +2019,7 @@
 	.ctxt_create = a2xx_drawctxt_create,
 	.ctxt_save = a2xx_drawctxt_save,
 	.ctxt_restore = a2xx_drawctxt_restore,
-	.ctxt_draw_workaround = a2xx_drawctxt_workaround,
+	.ctxt_draw_workaround = a2xx_drawctxt_draw_workaround,
 	.irq_handler = a2xx_irq_handler,
 	.irq_control = a2xx_irq_control,
 	.snapshot = a2xx_snapshot,
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 56696c4..6eebeb8 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2226,16 +2226,17 @@
 
 	if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
 		/* Fixup self modifying IBs for save operations */
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
-			context->save_fixup, 3);
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_NONE, context->save_fixup, 3);
 
 		/* save registers and constants. */
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_NONE,
 			context->regconstant_save, 3);
 
 		if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
 			/* Save shader instructions */
-			adreno_ringbuffer_issuecmds(device,
+			adreno_ringbuffer_issuecmds(device, context,
 				KGSL_CMD_FLAGS_PMODE, context->shader_save, 3);
 
 			context->flags |= CTXT_FLAGS_SHADER_RESTORE;
@@ -2249,7 +2250,8 @@
 		 * already be saved.)
 		 */
 
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+		adreno_ringbuffer_issuecmds(device, context,
+					KGSL_CMD_FLAGS_PMODE,
 					    context->context_gmem_shadow.
 					    gmem_save, 3);
 		context->flags |= CTXT_FLAGS_GMEM_RESTORE;
@@ -2264,7 +2266,8 @@
 
 	if (context == NULL) {
 		/* No context - set the default pagetable and thats it */
-		kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable);
+		kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
+				adreno_dev->drawctxt_active->id);
 		return;
 	}
 
@@ -2276,8 +2279,9 @@
 	cmds[3] = device->memstore.gpuaddr +
 		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
 	cmds[4] = context->id;
-	adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE, cmds, 5);
-	kgsl_mmu_setstate(&device->mmu, context->pagetable);
+	adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+					cmds, 5);
+	kgsl_mmu_setstate(&device->mmu, context->pagetable, context->id);
 
 	/*
 	 * Restore GMEM.  (note: changes shader.
@@ -2285,29 +2289,34 @@
 	 */
 
 	if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+		adreno_ringbuffer_issuecmds(device, context,
+					KGSL_CMD_FLAGS_PMODE,
 					    context->context_gmem_shadow.
 					    gmem_restore, 3);
 		context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
 	}
 
 	if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
-			context->reg_restore, 3);
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
 
 		/* Fixup self modifying IBs for restore operations */
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_NONE,
 			context->restore_fixup, 3);
 
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_NONE,
 			context->constant_restore, 3);
 
 		if (context->flags & CTXT_FLAGS_SHADER_RESTORE)
-			adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+			adreno_ringbuffer_issuecmds(device, context,
+				KGSL_CMD_FLAGS_NONE,
 				context->shader_restore, 3);
 
 		/* Restore HLSQ_CONTROL_0 register */
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_NONE,
 			context->hlsqcontrol_restore, 3);
 	}
 }
@@ -2607,6 +2616,10 @@
 	adreno_regwrite(device, A3XX_RBBM_INTERFACE_HANG_INT_CTL,
 			(1 << 16) | 0xFFF);
 
+	/* Enable Clock gating */
+	adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL,
+			A3XX_RBBM_CLOCK_CTL_DEFAULT);
+
 }
 
 /* Defined in adreno_a3xx_snapshot.c */
diff --git a/drivers/gpu/msm/adreno_a3xx_snapshot.c b/drivers/gpu/msm/adreno_a3xx_snapshot.c
index 60aab64..a3bee4d 100644
--- a/drivers/gpu/msm/adreno_a3xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a3xx_snapshot.c
@@ -285,6 +285,9 @@
 			remain, REG_CP_ME_CNTL, REG_CP_ME_STATUS,
 			64, 44);
 
+	/* Disable Clock gating temporarily for the debug bus to work */
+	adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL, 0x00);
+
 	/* VPC memory */
 	snapshot = kgsl_snapshot_add_section(device,
 			KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
@@ -321,5 +324,9 @@
 
 	snapshot = a3xx_snapshot_debugbus(device, snapshot, remain);
 
+	/* Enable Clock gating */
+	adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL,
+			A3XX_RBBM_CLOCK_CTL_DEFAULT);
+
 	return snapshot;
 }
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 822cf14..e3c9a18 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -117,6 +117,11 @@
 	debugfs_create_u32("ib_check", 0644, device->d_debugfs,
 			   &adreno_dev->ib_check_level);
 
+	/* By Default enable fast hang detection */
+	adreno_dev->fast_hang_detect = 1;
+	debugfs_create_u32("fast_hang_detect", 0644, device->d_debugfs,
+			   &adreno_dev->fast_hang_detect);
+
 	/* Create post mortem control files */
 
 	pm_d_debugfs = debugfs_create_dir("postmortem", device->d_debugfs);
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 267fd45..098c4f5 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -274,7 +274,7 @@
 		if (adreno_dev->gpudev->ctxt_draw_workaround &&
 			adreno_is_a225(adreno_dev))
 				adreno_dev->gpudev->ctxt_draw_workaround(
-					adreno_dev);
+					adreno_dev, drawctxt);
 		return;
 	}
 
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 3d46221..afcceee 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -53,6 +53,14 @@
 	unsigned int freecmds;
 	unsigned int *cmds;
 	uint cmds_gpu;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
+	unsigned long wait_timeout = msecs_to_jiffies(adreno_dev->wait_timeout);
+	unsigned long wait_time;
+	unsigned long wait_time_part;
+	unsigned int msecs_part = KGSL_TIMEOUT_PART;
+	unsigned int prev_reg_val[hang_detect_regs_count];
+
+	memset(prev_reg_val, 0, sizeof(prev_reg_val));
 
 	/* if wptr ahead, fill the remaining with NOPs */
 	if (wptr_ahead) {
@@ -79,13 +87,46 @@
 		rb->wptr = 0;
 	}
 
+	wait_time = jiffies + wait_timeout;
+	wait_time_part = jiffies + msecs_to_jiffies(msecs_part);
 	/* wait for space in ringbuffer */
-	do {
+	while (1) {
 		GSL_RB_GET_READPTR(rb, &rb->rptr);
 
 		freecmds = rb->rptr - rb->wptr;
 
-	} while ((freecmds != 0) && (freecmds <= numcmds));
+		if (freecmds == 0 || freecmds > numcmds)
+			break;
+
+		/* Dont wait for timeout, detect hang faster.
+		 */
+		if (time_after(jiffies, wait_time_part)) {
+			wait_time_part = jiffies +
+				msecs_to_jiffies(msecs_part);
+			if ((adreno_hang_detect(rb->device,
+						prev_reg_val))){
+				KGSL_DRV_ERR(rb->device,
+				"Hang detected while waiting for freespace in"
+				"ringbuffer rptr: 0x%x, wptr: 0x%x\n",
+				rb->rptr, rb->wptr);
+				goto err;
+			}
+		}
+
+		if (time_after(jiffies, wait_time)) {
+			KGSL_DRV_ERR(rb->device,
+			"Timed out while waiting for freespace in ringbuffer "
+			"rptr: 0x%x, wptr: 0x%x\n", rb->rptr, rb->wptr);
+			goto err;
+		}
+
+err:
+		if (!adreno_dump_and_recover(rb->device))
+				wait_time = jiffies + wait_timeout;
+			else
+				/* GPU is hung and we cannot recover */
+				BUG();
+	}
 }
 
 unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
@@ -439,15 +480,13 @@
 	unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
 	unsigned int gpuaddr = rb->device->memstore.gpuaddr;
 
-	if (context != NULL) {
-		/*
-		 * if the context was not created with per context timestamp
-		 * support, we must use the global timestamp since issueibcmds
-		 * will be returning that one.
-		 */
-		if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)
-			context_id = context->id;
-	}
+	/*
+	 * if the context was not created with per context timestamp
+	 * support, we must use the global timestamp since issueibcmds
+	 * will be returning that one.
+	 */
+	if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)
+		context_id = context->id;
 
 	/* reserve space to temporarily turn off protected mode
 	*  error checking if needed
@@ -460,7 +499,7 @@
 		total_sizedwords += 7;
 
 	total_sizedwords += 2; /* scratchpad ts for recovery */
-	if (context) {
+	if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) {
 		total_sizedwords += 3; /* sop timestamp */
 		total_sizedwords += 4; /* eop timestamp */
 		total_sizedwords += 3; /* global timestamp without cache
@@ -470,6 +509,15 @@
 	}
 
 	ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
+	/* GPU may hang during space allocation, if thats the case the current
+	 * context may have hung the GPU */
+	if (context->flags & CTXT_FLAGS_GPU_HANG) {
+		KGSL_CTXT_WARN(rb->device,
+		"Context %p caused a gpu hang. Will not accept commands for context %d\n",
+		context, context->id);
+		return rb->timestamp[context_id];
+	}
+
 	rcmd_gpu = rb->buffer_desc.gpuaddr
 		+ sizeof(uint)*(rb->wptr-total_sizedwords);
 
@@ -525,7 +573,7 @@
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
 	}
 
-	if (context) {
+	if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) {
 		/* start-of-pipeline timestamp */
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
 			cp_type3_packet(CP_MEM_WRITE, 2));
@@ -591,8 +639,9 @@
 	return timestamp;
 }
 
-void
+unsigned int
 adreno_ringbuffer_issuecmds(struct kgsl_device *device,
+						struct adreno_context *drawctxt,
 						unsigned int flags,
 						unsigned int *cmds,
 						int sizedwords)
@@ -601,8 +650,9 @@
 	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
 
 	if (device->state & KGSL_STATE_HUNG)
-		return;
-	adreno_ringbuffer_addcmds(rb, NULL, flags, cmds, sizedwords);
+		return kgsl_readtimestamp(device, KGSL_MEMSTORE_GLOBAL,
+					KGSL_TIMESTAMP_RETIRED);
+	return adreno_ringbuffer_addcmds(rb, drawctxt, flags, cmds, sizedwords);
 }
 
 static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr,
@@ -869,7 +919,7 @@
 	*cmds++ = cp_nop_packet(1);
 	*cmds++ = KGSL_END_OF_IB_IDENTIFIER;
 
-	kgsl_setstate(&device->mmu,
+	kgsl_setstate(&device->mmu, context->id,
 		      kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
 					device->id));
 
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index ae2e4c7..6429f46 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -103,7 +103,8 @@
 
 void adreno_ringbuffer_close(struct adreno_ringbuffer *rb);
 
-void adreno_ringbuffer_issuecmds(struct kgsl_device *device,
+unsigned int adreno_ringbuffer_issuecmds(struct kgsl_device *device,
+					struct adreno_context *drawctxt,
 					unsigned int flags,
 					unsigned int *cmdaddr,
 					int sizedwords);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 5883f08..2a9f564 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -62,9 +62,9 @@
  * @returns - 0 on success or error code on failure
  */
 
-static int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
+int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
 	void (*cb)(struct kgsl_device *, void *, u32, u32), void *priv,
-	struct kgsl_device_private *owner)
+	void *owner)
 {
 	struct kgsl_event *event;
 	struct list_head *n;
@@ -122,6 +122,7 @@
 	queue_work(device->work_queue, &device->ts_expired_ws);
 	return 0;
 }
+EXPORT_SYMBOL(kgsl_add_event);
 
 /**
  * kgsl_cancel_events_ctxt - Cancel all events for a context
@@ -162,8 +163,8 @@
  * @owner - driver instance that owns the events to cancel
  *
  */
-static void kgsl_cancel_events(struct kgsl_device *device,
-	struct kgsl_device_private *owner)
+void kgsl_cancel_events(struct kgsl_device *device,
+	void *owner)
 {
 	struct kgsl_event *event, *event_tmp;
 	unsigned int id, cur;
@@ -189,6 +190,7 @@
 		kfree(event);
 	}
 }
+EXPORT_SYMBOL(kgsl_cancel_events);
 
 /* kgsl_get_mem_entry - get the mem_entry structure for the specified object
  * @ptbase - the pagetable base of the object
@@ -247,13 +249,12 @@
 		kgsl_driver.stats.mapped -= entry->memdesc.size;
 
 	/*
-	 * Ion takes care of freeing the sglist for us (how nice </sarcasm>) so
-	 * unmap the dma before freeing the sharedmem so kgsl_sharedmem_free
+	 * Ion takes care of freeing the sglist for us so
+	 * clear the sg before freeing the sharedmem so kgsl_sharedmem_free
 	 * doesn't try to free it again
 	 */
 
 	if (entry->memtype == KGSL_MEM_ENTRY_ION) {
-		ion_unmap_dma(kgsl_ion_client, entry->priv_data);
 		entry->memdesc.sg = NULL;
 	}
 
@@ -439,8 +440,6 @@
 		kfree(event);
 	}
 
-	device->last_expired_ctxt_id = KGSL_CONTEXT_INVALID;
-
 	mutex_unlock(&device->mutex);
 }
 EXPORT_SYMBOL(kgsl_timestamp_expired);
@@ -1746,12 +1745,12 @@
 {
 	struct ion_handle *handle;
 	struct scatterlist *s;
-	unsigned long flags;
+	struct sg_table *sg_table;
 
 	if (IS_ERR_OR_NULL(kgsl_ion_client))
 		return -ENODEV;
 
-	handle = ion_import_fd(kgsl_ion_client, fd);
+	handle = ion_import_dma_buf(kgsl_ion_client, fd);
 	if (IS_ERR_OR_NULL(handle))
 		return PTR_ERR(handle);
 
@@ -1760,13 +1759,12 @@
 	entry->memdesc.pagetable = pagetable;
 	entry->memdesc.size = 0;
 
-	if (ion_handle_get_flags(kgsl_ion_client, handle, &flags))
+	sg_table = ion_sg_table(kgsl_ion_client, handle);
+
+	if (IS_ERR_OR_NULL(sg_table))
 		goto err;
 
-	entry->memdesc.sg = ion_map_dma(kgsl_ion_client, handle, flags);
-
-	if (IS_ERR_OR_NULL(entry->memdesc.sg))
-		goto err;
+	entry->memdesc.sg = sg_table->sgl;
 
 	/* Calculate the size of the memdesc from the sglist */
 
@@ -1890,7 +1888,6 @@
 			fput(entry->priv_data);
 		break;
 	case KGSL_MEM_ENTRY_ION:
-		ion_unmap_dma(kgsl_ion_client, entry->priv_data);
 		ion_free(kgsl_ion_client, entry->priv_data);
 		break;
 	default:
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index b67f460..7ffa83b 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -179,6 +179,13 @@
 	struct kgsl_process_private *private, unsigned int gpuaddr,
 	size_t size);
 
+int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
+	void (*cb)(struct kgsl_device *, void *, u32, u32), void *priv,
+	void *owner);
+
+void kgsl_cancel_events(struct kgsl_device *device,
+	void *owner);
+
 extern const struct dev_pm_ops kgsl_pm_ops;
 
 struct early_suspend;
@@ -213,7 +220,8 @@
 
 static inline void *kgsl_memdesc_map(struct kgsl_memdesc *memdesc)
 {
-	if (memdesc->hostptr == NULL && memdesc->ops->map_kernel_mem)
+	if (memdesc->hostptr == NULL && memdesc->ops &&
+		memdesc->ops->map_kernel_mem)
 		memdesc->ops->map_kernel_mem(memdesc);
 
 	return memdesc->hostptr;
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 932c995..0336a20 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -25,6 +25,7 @@
 
 #define KGSL_TIMEOUT_NONE       0
 #define KGSL_TIMEOUT_DEFAULT    0xFFFFFFFF
+#define KGSL_TIMEOUT_PART       2000 /* 2 sec */
 
 #define FIRST_TIMEOUT (HZ / 2)
 
@@ -97,7 +98,8 @@
 	/* Optional functions - these functions are not mandatory.  The
 	   driver will check that the function pointer is not NULL before
 	   calling the hook */
-	void (*setstate) (struct kgsl_device *device, uint32_t flags);
+	void (*setstate) (struct kgsl_device *device, unsigned int context_id,
+			uint32_t flags);
 	int (*drawctxt_create) (struct kgsl_device *device,
 		struct kgsl_pagetable *pagetable, struct kgsl_context *context,
 		uint32_t flags);
@@ -125,7 +127,7 @@
 	void (*func)(struct kgsl_device *, void *, u32, u32);
 	void *priv;
 	struct list_head list;
-	struct kgsl_device_private *owner;
+	void *owner;
 };
 
 
@@ -156,7 +158,6 @@
 	uint32_t state;
 	uint32_t requested_state;
 
-	unsigned int last_expired_ctxt_id;
 	unsigned int active_cnt;
 	struct completion suspend_gate;
 
@@ -214,8 +215,7 @@
 	.mutex = __MUTEX_INITIALIZER((_dev).mutex),\
 	.state = KGSL_STATE_INIT,\
 	.ver_major = DRIVER_VERSION_MAJOR,\
-	.ver_minor = DRIVER_VERSION_MINOR,\
-	.last_expired_ctxt_id = KGSL_CONTEXT_INVALID
+	.ver_minor = DRIVER_VERSION_MINOR
 
 struct kgsl_context {
 	struct kref refcount;
diff --git a/drivers/gpu/msm/kgsl_drm.c b/drivers/gpu/msm/kgsl_drm.c
index 66ac08f..870a7d7 100644
--- a/drivers/gpu/msm/kgsl_drm.c
+++ b/drivers/gpu/msm/kgsl_drm.c
@@ -238,11 +238,8 @@
 	}
 
 	if (TYPE_IS_PMEM(priv->type)) {
-		int type;
-
 		if (priv->type == DRM_KGSL_GEM_TYPE_EBI ||
 		    priv->type & DRM_KGSL_GEM_PMEM_EBI) {
-				type = PMEM_MEMTYPE_EBI1;
 				result = kgsl_sharedmem_ebimem_user(
 						&priv->memdesc,
 						priv->pagetable,
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index 429d035..998eaab 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -485,7 +485,8 @@
 }
 
 static void kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
-				struct kgsl_pagetable *pagetable)
+				struct kgsl_pagetable *pagetable,
+				unsigned int context_id)
 {
 	if (mmu->flags & KGSL_FLAGS_STARTED) {
 		/* page table not current, then setup mmu to use new
@@ -499,7 +500,7 @@
 			kgsl_mmu_pt_get_flags(pagetable, mmu->device->id);
 
 			/* call device specific set page table */
-			kgsl_setstate(mmu, KGSL_MMUFLAGS_TLBFLUSH |
+			kgsl_setstate(mmu, context_id, KGSL_MMUFLAGS_TLBFLUSH |
 				KGSL_MMUFLAGS_PTUPDATE);
 		}
 	}
@@ -583,7 +584,7 @@
 	kgsl_regwrite(mmu->device, MH_MMU_VA_RANGE,
 		      (KGSL_PAGETABLE_BASE |
 		      (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
-	kgsl_setstate(mmu, KGSL_MMUFLAGS_TLBFLUSH);
+	kgsl_setstate(mmu, KGSL_MEMSTORE_GLOBAL, KGSL_MMUFLAGS_TLBFLUSH);
 	mmu->flags |= KGSL_FLAGS_STARTED;
 
 	return 0;
@@ -591,7 +592,8 @@
 
 static int
 kgsl_gpummu_unmap(void *mmu_specific_pt,
-		struct kgsl_memdesc *memdesc)
+		struct kgsl_memdesc *memdesc,
+		unsigned int *tlb_flags)
 {
 	unsigned int numpages;
 	unsigned int pte, ptefirst, ptelast, superpte;
@@ -729,7 +731,7 @@
 	.mmu_pagefault = kgsl_gpummu_pagefault,
 	.mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
 	.mmu_enable_clk = NULL,
-	.mmu_disable_clk = NULL,
+	.mmu_disable_clk_on_ts = NULL,
 	.mmu_get_hwpagetable_asid = NULL,
 	.mmu_get_pt_lsb = NULL,
 	.mmu_get_reg_map_desc = NULL,
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index d20cf7e..25d0463 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -130,6 +130,89 @@
 }
 
 /*
+ * kgsl_iommu_disable_clk_event - An event function that is executed when
+ * the required timestamp is reached. It disables the IOMMU clocks if
+ * the timestamp on which the clocks can be disabled has expired.
+ * @device - The kgsl device pointer
+ * @data - The data passed during event creation, it is the MMU pointer
+ * @id - Context ID, should always be KGSL_MEMSTORE_GLOBAL
+ * @ts - The current timestamp that has expired for the device
+ *
+ * Disables IOMMU clocks if timestamp has expired
+ * Return - void
+ */
+static void kgsl_iommu_clk_disable_event(struct kgsl_device *device, void *data,
+					unsigned int id, unsigned int ts)
+{
+	struct kgsl_mmu *mmu = data;
+	struct kgsl_iommu *iommu = mmu->priv;
+
+	if (!iommu->clk_event_queued) {
+		if (0 > timestamp_cmp(ts, iommu->iommu_last_cmd_ts))
+			KGSL_DRV_ERR(device,
+			"IOMMU disable clock event being cancelled, "
+			"iommu_last_cmd_ts: %x, retired ts: %x\n",
+			iommu->iommu_last_cmd_ts, ts);
+		return;
+	}
+
+	if (0 <= timestamp_cmp(ts, iommu->iommu_last_cmd_ts)) {
+		kgsl_iommu_disable_clk(mmu);
+		iommu->clk_event_queued = false;
+	} else {
+		/* add new event to fire when ts is reached, this can happen
+		 * if we queued an event and someone requested the clocks to
+		 * be disbaled on a later timestamp */
+		if (kgsl_add_event(device, id, iommu->iommu_last_cmd_ts,
+			kgsl_iommu_clk_disable_event, mmu, mmu)) {
+				KGSL_DRV_ERR(device,
+				"Failed to add IOMMU disable clk event\n");
+				iommu->clk_event_queued = false;
+		}
+	}
+}
+
+/*
+ * kgsl_iommu_disable_clk_on_ts - Sets up event to disable IOMMU clocks
+ * @mmu - The kgsl MMU pointer
+ * @ts - Timestamp on which the clocks should be disabled
+ * @ts_valid - Indicates whether ts parameter is valid, if this parameter
+ * is false then it means that the calling function wants to disable the
+ * IOMMU clocks immediately without waiting for any timestamp
+ *
+ * Creates an event to disable the IOMMU clocks on timestamp and if event
+ * already exists then updates the timestamp of disabling the IOMMU clocks
+ * with the passed in ts if it is greater than the current value at which
+ * the clocks will be disabled
+ * Return - void
+ */
+static void
+kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu, unsigned int ts,
+				bool ts_valid)
+{
+	struct kgsl_iommu *iommu = mmu->priv;
+
+	if (iommu->clk_event_queued) {
+		if (ts_valid && (0 <
+			timestamp_cmp(ts, iommu->iommu_last_cmd_ts)))
+			iommu->iommu_last_cmd_ts = ts;
+	} else {
+		if (ts_valid) {
+			iommu->iommu_last_cmd_ts = ts;
+			iommu->clk_event_queued = true;
+			if (kgsl_add_event(mmu->device, KGSL_MEMSTORE_GLOBAL,
+				ts, kgsl_iommu_clk_disable_event, mmu, mmu)) {
+				KGSL_DRV_ERR(mmu->device,
+				"Failed to add IOMMU disable clk event\n");
+				iommu->clk_event_queued = false;
+			}
+		} else {
+			kgsl_iommu_disable_clk(mmu);
+		}
+	}
+}
+
+/*
  * kgsl_iommu_enable_clk - Enable iommu clocks
  * @mmu - Pointer to mmu structure
  * @ctx_id - The context bank whose clocks are to be turned on
@@ -534,7 +617,8 @@
 }
 
 static void kgsl_iommu_setstate(struct kgsl_mmu *mmu,
-				struct kgsl_pagetable *pagetable)
+				struct kgsl_pagetable *pagetable,
+				unsigned int context_id)
 {
 	if (mmu->flags & KGSL_FLAGS_STARTED) {
 		struct kgsl_iommu *iommu = mmu->priv;
@@ -551,7 +635,8 @@
 				flags |= KGSL_MMUFLAGS_TLBFLUSH;
 			flags |= kgsl_mmu_pt_get_flags(mmu->hwpagetable,
 							mmu->device->id);
-			kgsl_setstate(mmu, KGSL_MMUFLAGS_PTUPDATE | flags);
+			kgsl_setstate(mmu, context_id,
+				KGSL_MMUFLAGS_PTUPDATE | flags);
 		}
 	}
 }
@@ -751,12 +836,12 @@
 				KGSL_IOMMU_CONTEXT_USER,
 				CONTEXTIDR);
 
-	kgsl_iommu_disable_clk(mmu);
+	kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
 	mmu->flags |= KGSL_FLAGS_STARTED;
 
 done:
 	if (status) {
-		kgsl_iommu_disable_clk(mmu);
+		kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
 		kgsl_detach_pagetable_iommu_domain(mmu);
 	}
 	return status;
@@ -764,7 +849,8 @@
 
 static int
 kgsl_iommu_unmap(void *mmu_specific_pt,
-		struct kgsl_memdesc *memdesc)
+		struct kgsl_memdesc *memdesc,
+		unsigned int *tlb_flags)
 {
 	int ret;
 	unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
@@ -785,6 +871,14 @@
 			"with err: %d\n", iommu_pt->domain, gpuaddr,
 			range, ret);
 
+#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
+	/*
+	 * Flushing only required if per process pagetables are used. With
+	 * global case, flushing will happen inside iommu_map function
+	 */
+	if (!ret)
+		*tlb_flags = UINT_MAX;
+#endif
 	return 0;
 }
 
@@ -814,19 +908,12 @@
 		return ret;
 	}
 
-#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
-	/*
-	 * Flushing only required if per process pagetables are used. With
-	 * global case, flushing will happen inside iommu_map function
-	 */
-	if (!ret)
-		*tlb_flags = UINT_MAX;
-#endif
 	return ret;
 }
 
 static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
 {
+	struct kgsl_iommu *iommu = mmu->priv;
 	/*
 	 *  stop device mmu
 	 *
@@ -841,6 +928,11 @@
 
 		mmu->flags &= ~KGSL_FLAGS_STARTED;
 	}
+
+	/* switch off MMU clocks and cancel any events it has queued */
+	iommu->clk_event_queued = false;
+	kgsl_cancel_events(mmu->device, mmu);
+	kgsl_iommu_disable_clk(mmu);
 }
 
 static int kgsl_iommu_close(struct kgsl_mmu *mmu)
@@ -883,7 +975,7 @@
 	pt_base = readl_relaxed(iommu->iommu_units[0].reg_map.hostptr +
 			(KGSL_IOMMU_CONTEXT_USER << KGSL_IOMMU_CTX_SHIFT) +
 			KGSL_IOMMU_TTBR0);
-	kgsl_iommu_disable_clk(mmu);
+	kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
 	return pt_base & (KGSL_IOMMU_TTBR0_PA_MASK <<
 				KGSL_IOMMU_TTBR0_PA_SHIFT);
 }
@@ -996,7 +1088,7 @@
 		}
 	}
 	/* Disable smmu clock */
-	kgsl_iommu_disable_clk(mmu);
+	kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
 }
 
 /*
@@ -1046,7 +1138,7 @@
 	.mmu_pagefault = NULL,
 	.mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
 	.mmu_enable_clk = kgsl_iommu_enable_clk,
-	.mmu_disable_clk = kgsl_iommu_disable_clk,
+	.mmu_disable_clk_on_ts = kgsl_iommu_disable_clk_on_ts,
 	.mmu_get_hwpagetable_asid = kgsl_iommu_get_hwpagetable_asid,
 	.mmu_get_pt_lsb = kgsl_iommu_get_pt_lsb,
 	.mmu_get_reg_map_desc = kgsl_iommu_get_reg_map_desc,
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index efc3d9c..354a5cf 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -103,6 +103,8 @@
  * instance of the IOMMU driver
  * @iommu_last_cmd_ts: The timestamp of last command submitted that
  * aceeses iommu registers
+ * @clk_event_queued: Indicates whether an event to disable clocks
+ * is already queued or not
  * @device: Pointer to kgsl device
  * @asids: A bit structure indicating which id's are presently used
  * @asid: Contains the initial value of IOMMU_CONTEXTIDR when a domain
@@ -113,6 +115,7 @@
 	struct kgsl_iommu_unit iommu_units[KGSL_IOMMU_MAX_UNITS];
 	unsigned int unit_count;
 	unsigned int iommu_last_cmd_ts;
+	bool clk_event_queued;
 	struct kgsl_device *device;
 	unsigned long *asids;
 	unsigned int asid;
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index dfaadba..c02274d 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -543,13 +543,14 @@
 }
 EXPORT_SYMBOL(kgsl_mmu_putpagetable);
 
-void kgsl_setstate(struct kgsl_mmu *mmu, uint32_t flags)
+void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
+			uint32_t flags)
 {
 	struct kgsl_device *device = mmu->device;
 	if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
 		return;
 	else if (device->ftbl->setstate)
-		device->ftbl->setstate(device, flags);
+		device->ftbl->setstate(device, context_id, flags);
 	else if (mmu->mmu_ops->mmu_device_setstate)
 		mmu->mmu_ops->mmu_device_setstate(mmu, flags);
 }
@@ -684,7 +685,8 @@
 
 	if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
 		spin_lock(&pagetable->lock);
-	pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc);
+	pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc,
+					&pagetable->tlb_flags);
 	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
 		spin_lock(&pagetable->lock);
 	/* Remove the statistics */
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 4c0c015..d06ce45 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -125,14 +125,15 @@
 	int (*mmu_start) (struct kgsl_mmu *mmu);
 	void (*mmu_stop) (struct kgsl_mmu *mmu);
 	void (*mmu_setstate) (struct kgsl_mmu *mmu,
-		struct kgsl_pagetable *pagetable);
+		struct kgsl_pagetable *pagetable,
+		unsigned int context_id);
 	void (*mmu_device_setstate) (struct kgsl_mmu *mmu,
 					uint32_t flags);
 	void (*mmu_pagefault) (struct kgsl_mmu *mmu);
 	unsigned int (*mmu_get_current_ptbase)
 			(struct kgsl_mmu *mmu);
-	void (*mmu_disable_clk)
-		(struct kgsl_mmu *mmu);
+	void (*mmu_disable_clk_on_ts)
+		(struct kgsl_mmu *mmu, uint32_t ts, bool ts_valid);
 	int (*mmu_enable_clk)
 		(struct kgsl_mmu *mmu, int ctx_id);
 	int (*mmu_get_hwpagetable_asid)(struct kgsl_mmu *mmu);
@@ -149,7 +150,8 @@
 			unsigned int protflags,
 			unsigned int *tlb_flags);
 	int (*mmu_unmap) (void *mmu_pt,
-			struct kgsl_memdesc *memdesc);
+			struct kgsl_memdesc *memdesc,
+			unsigned int *tlb_flags);
 	void *(*mmu_create_pagetable) (void);
 	void (*mmu_destroy_pagetable) (void *pt);
 	int (*mmu_pt_equal) (struct kgsl_pagetable *pt,
@@ -193,7 +195,8 @@
 int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
 		    struct kgsl_memdesc *memdesc);
 unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
-void kgsl_setstate(struct kgsl_mmu *mmu, uint32_t flags);
+void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
+			uint32_t flags);
 int kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base);
 int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
 			enum kgsl_deviceid id);
@@ -219,10 +222,11 @@
 }
 
 static inline void kgsl_mmu_setstate(struct kgsl_mmu *mmu,
-			struct kgsl_pagetable *pagetable)
+			struct kgsl_pagetable *pagetable,
+			unsigned int context_id)
 {
 	if (mmu->mmu_ops && mmu->mmu_ops->mmu_setstate)
-		mmu->mmu_ops->mmu_setstate(mmu, pagetable);
+		mmu->mmu_ops->mmu_setstate(mmu, pagetable, context_id);
 }
 
 static inline void kgsl_mmu_device_setstate(struct kgsl_mmu *mmu,
@@ -291,10 +295,11 @@
 		return 0;
 }
 
-static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu)
+static inline void kgsl_mmu_disable_clk_on_ts(struct kgsl_mmu *mmu,
+						unsigned int ts, bool ts_valid)
 {
-	if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk)
-		mmu->mmu_ops->mmu_disable_clk(mmu);
+	if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk_on_ts)
+		mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, ts_valid);
 }
 
 #endif /* __KGSL_MMU_H */
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index d55d476..6df073a 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -439,6 +439,8 @@
 		if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
 			&pwr->power_flags)) {
 			trace_kgsl_rail(device, state);
+			if (pwr->gpu_dig)
+				regulator_disable(pwr->gpu_dig);
 			if (pwr->gpu_reg)
 				regulator_disable(pwr->gpu_reg);
 		}
@@ -449,8 +451,18 @@
 			if (pwr->gpu_reg) {
 				int status = regulator_enable(pwr->gpu_reg);
 				if (status)
-					KGSL_DRV_ERR(device, "regulator_enable "
-							"failed: %d\n", status);
+					KGSL_DRV_ERR(device,
+							"core regulator_enable "
+							"failed: %d\n",
+							status);
+			}
+			if (pwr->gpu_dig) {
+				int status = regulator_enable(pwr->gpu_dig);
+				if (status)
+					KGSL_DRV_ERR(device,
+							"cx regulator_enable "
+							"failed: %d\n",
+							status);
 			}
 		}
 	}
@@ -534,6 +546,13 @@
 	if (IS_ERR(pwr->gpu_reg))
 		pwr->gpu_reg = NULL;
 
+	if (pwr->gpu_reg) {
+		pwr->gpu_dig = regulator_get(&pdev->dev, "vdd_dig");
+		if (IS_ERR(pwr->gpu_dig))
+			pwr->gpu_dig = NULL;
+	} else
+		pwr->gpu_dig = NULL;
+
 	pwr->power_flags = 0;
 
 	pwr->nap_allowed = pdata->nap_allowed;
@@ -596,6 +615,11 @@
 		pwr->gpu_reg = NULL;
 	}
 
+	if (pwr->gpu_dig) {
+		regulator_put(pwr->gpu_dig);
+		pwr->gpu_dig = NULL;
+	}
+
 	for (i = 1; i < KGSL_MAX_CLKS; i++)
 		if (pwr->grp_clks[i]) {
 			clk_put(pwr->grp_clks[i]);
@@ -713,7 +737,6 @@
 		}
 		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
 		kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
-		kgsl_mmu_disable_clk(&device->mmu);
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
 	case KGSL_STATE_NAP:
 	case KGSL_STATE_SLEEP:
@@ -755,7 +778,6 @@
 				gpu_freq);
 		_sleep_accounting(device);
 		kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
-		kgsl_mmu_disable_clk(&device->mmu);
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
 		pm_qos_update_request(&device->pm_qos_req_dma,
 					PM_QOS_DEFAULT_VALUE);
@@ -785,9 +807,6 @@
 	case KGSL_STATE_NAP:
 	case KGSL_STATE_SLEEP:
 		del_timer_sync(&device->idle_timer);
-		if (!device->pwrctrl.strtstp_sleepwake)
-			kgsl_pwrctrl_pwrlevel_change(device,
-					KGSL_PWRLEVEL_NOMINAL);
 		device->pwrctrl.restore_slumber = true;
 		device->ftbl->suspend_context(device);
 		device->ftbl->stop(device);
@@ -891,7 +910,6 @@
 	/* Order pwrrail/clk sequence based upon platform */
 	kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
 	kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
-	kgsl_mmu_disable_clk(&device->mmu);
 	kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
 }
 EXPORT_SYMBOL(kgsl_pwrctrl_disable);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 1e5c21c..954c818 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -50,6 +50,7 @@
 	unsigned int interval_timeout;
 	bool strtstp_sleepwake;
 	struct regulator *gpu_reg;
+	struct regulator *gpu_dig;
 	uint32_t pcl;
 	unsigned int nap_allowed;
 	unsigned int idle_needed;
diff --git a/drivers/gpu/msm/kgsl_pwrscale_idlestats.c b/drivers/gpu/msm/kgsl_pwrscale_idlestats.c
index 71af893..4102302 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_idlestats.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_idlestats.c
@@ -89,6 +89,7 @@
 			struct kgsl_pwrscale *pwrscale)
 {
 	struct idlestats_priv *priv = pwrscale->priv;
+	struct kgsl_power_stats stats;
 	int i, busy, nr_cpu = 1;
 
 	if (priv->pulse.busy_start_time != 0) {
@@ -111,6 +112,19 @@
 			spin_unlock(&priv->cpu_info.lock);
 		}
 		priv->pulse.wait_interval /= nr_cpu;
+
+		/* This is called from within a mutex protected function, so
+		   no additional locking required */
+		device->ftbl->power_stats(device, &stats);
+
+		/* If total_time is zero, then we don't have
+		   any interesting statistics to store */
+		if (stats.total_time == 0) {
+			priv->pulse.busy_start_time = 0;
+			return;
+		}
+
+		priv->pulse.busy_interval = stats.busy_time;
 		msm_idle_stats_idle_end(&priv->idledev, &priv->pulse);
 	}
 	priv->pulse.busy_start_time = ktime_to_us(ktime_get());
@@ -120,21 +134,8 @@
 			struct kgsl_pwrscale *pwrscale)
 {
 	int i, nr_cpu;
-	struct kgsl_power_stats stats;
 	struct idlestats_priv *priv = pwrscale->priv;
 
-	/* This is called from within a mutex protected function, so
-	   no additional locking required */
-	device->ftbl->power_stats(device, &stats);
-
-	/* If total_time is zero, then we don't have
-	   any interesting statistics to store */
-	if (stats.total_time == 0) {
-		priv->pulse.busy_start_time = 0;
-		return;
-	}
-
-	priv->pulse.busy_interval   = stats.busy_time;
 	nr_cpu = num_possible_cpus();
 	for (i = 0; i < nr_cpu; i++)
 		if (cpu_online(i))
diff --git a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
index ad1e7ed..c8a7a71 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
@@ -114,8 +114,7 @@
 {
 	struct tz_priv *priv = pwrscale->priv;
 	if (device->state != KGSL_STATE_NAP &&
-		priv->governor == TZ_GOVERNOR_ONDEMAND &&
-		device->pwrctrl.restore_slumber == 0)
+		priv->governor == TZ_GOVERNOR_ONDEMAND)
 		kgsl_pwrctrl_pwrlevel_change(device,
 					device->pwrctrl.default_pwrlevel);
 }
@@ -182,7 +181,7 @@
 
 	/* Trustzone is only valid for some SOCs */
 	if (!(cpu_is_msm8x60() || cpu_is_msm8960() || cpu_is_apq8064() ||
-		cpu_is_msm8930()))
+		cpu_is_msm8930() || cpu_is_msm8627()))
 		return -EINVAL;
 
 	priv = pwrscale->priv = kzalloc(sizeof(struct tz_priv), GFP_KERNEL);
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
index bc2685c..6efba45 100644
--- a/drivers/gpu/msm/z180.c
+++ b/drivers/gpu/msm/z180.c
@@ -444,11 +444,13 @@
 	    (ctrl & KGSL_CONTEXT_CTX_SWITCH)) {
 		KGSL_CMD_INFO(device, "context switch %d -> %d\n",
 			context->id, z180_dev->ringbuffer.prevctx);
-		kgsl_mmu_setstate(&device->mmu, pagetable);
+		kgsl_mmu_setstate(&device->mmu, pagetable,
+				KGSL_MEMSTORE_GLOBAL);
 		cnt = PACKETSIZE_STATESTREAM;
 		ofs = 0;
 	}
 	kgsl_setstate(&device->mmu,
+			KGSL_MEMSTORE_GLOBAL,
 			kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
 			device->id));
 
@@ -861,7 +863,8 @@
 	if (z180_dev->ringbuffer.prevctx == context->id) {
 		z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
 		device->mmu.hwpagetable = device->mmu.defaultpagetable;
-		kgsl_setstate(&device->mmu, KGSL_MMUFLAGS_PTUPDATE);
+		kgsl_setstate(&device->mmu, KGSL_MEMSTORE_GLOBAL,
+				KGSL_MMUFLAGS_PTUPDATE);
 	}
 }
 
diff --git a/drivers/hwmon/epm_adc.c b/drivers/hwmon/epm_adc.c
index a8b99b9..fd4524e 100644
--- a/drivers/hwmon/epm_adc.c
+++ b/drivers/hwmon/epm_adc.c
@@ -71,8 +71,6 @@
 struct epm_adc_drv {
 	struct platform_device		*pdev;
 	struct device			*hwmon;
-	struct sensor_device_attribute	*sens_attr;
-	char				**fnames;
 	struct spi_device		*epm_spi_client;
 	struct mutex			conv_lock;
 	uint32_t			bus_id;
@@ -723,60 +721,57 @@
 	return snprintf(buf, 16, "Result: %d\n", conv.physical);
 }
 
-static struct sensor_device_attribute epm_adc_in_attr =
-	SENSOR_ATTR(NULL, S_IRUGO, epm_adc_show_in, NULL, 0);
+static struct sensor_device_attribute epm_adc_in_attrs[] = {
+	SENSOR_ATTR(ads0_chan0,  S_IRUGO, epm_adc_show_in, NULL, 0),
+	SENSOR_ATTR(ads0_chan1,  S_IRUGO, epm_adc_show_in, NULL, 1),
+	SENSOR_ATTR(ads0_chan2,  S_IRUGO, epm_adc_show_in, NULL, 2),
+	SENSOR_ATTR(ads0_chan3,  S_IRUGO, epm_adc_show_in, NULL, 3),
+	SENSOR_ATTR(ads0_chan4,  S_IRUGO, epm_adc_show_in, NULL, 4),
+	SENSOR_ATTR(ads0_chan5,  S_IRUGO, epm_adc_show_in, NULL, 5),
+	SENSOR_ATTR(ads0_chan6,  S_IRUGO, epm_adc_show_in, NULL, 6),
+	SENSOR_ATTR(ads0_chan7,  S_IRUGO, epm_adc_show_in, NULL, 7),
+	SENSOR_ATTR(ads0_chan8,  S_IRUGO, epm_adc_show_in, NULL, 8),
+	SENSOR_ATTR(ads0_chan9,  S_IRUGO, epm_adc_show_in, NULL, 9),
+	SENSOR_ATTR(ads0_chan10, S_IRUGO, epm_adc_show_in, NULL, 10),
+	SENSOR_ATTR(ads0_chan11, S_IRUGO, epm_adc_show_in, NULL, 11),
+	SENSOR_ATTR(ads0_chan12, S_IRUGO, epm_adc_show_in, NULL, 12),
+	SENSOR_ATTR(ads0_chan13, S_IRUGO, epm_adc_show_in, NULL, 13),
+	SENSOR_ATTR(ads0_chan14, S_IRUGO, epm_adc_show_in, NULL, 14),
+	SENSOR_ATTR(ads0_chan15, S_IRUGO, epm_adc_show_in, NULL, 15),
+	SENSOR_ATTR(ads1_chan0,  S_IRUGO, epm_adc_show_in, NULL, 16),
+	SENSOR_ATTR(ads1_chan1,  S_IRUGO, epm_adc_show_in, NULL, 17),
+	SENSOR_ATTR(ads1_chan2,  S_IRUGO, epm_adc_show_in, NULL, 18),
+	SENSOR_ATTR(ads1_chan3,  S_IRUGO, epm_adc_show_in, NULL, 19),
+	SENSOR_ATTR(ads1_chan4,  S_IRUGO, epm_adc_show_in, NULL, 20),
+	SENSOR_ATTR(ads1_chan5,  S_IRUGO, epm_adc_show_in, NULL, 21),
+	SENSOR_ATTR(ads1_chan6,  S_IRUGO, epm_adc_show_in, NULL, 22),
+	SENSOR_ATTR(ads1_chan7,  S_IRUGO, epm_adc_show_in, NULL, 23),
+	SENSOR_ATTR(ads1_chan8,  S_IRUGO, epm_adc_show_in, NULL, 24),
+	SENSOR_ATTR(ads1_chan9,  S_IRUGO, epm_adc_show_in, NULL, 25),
+	SENSOR_ATTR(ads1_chan10, S_IRUGO, epm_adc_show_in, NULL, 26),
+	SENSOR_ATTR(ads1_chan11, S_IRUGO, epm_adc_show_in, NULL, 27),
+	SENSOR_ATTR(ads1_chan12, S_IRUGO, epm_adc_show_in, NULL, 28),
+	SENSOR_ATTR(ads1_chan13, S_IRUGO, epm_adc_show_in, NULL, 29),
+	SENSOR_ATTR(ads1_chan14, S_IRUGO, epm_adc_show_in, NULL, 30),
+	SENSOR_ATTR(ads1_chan15, S_IRUGO, epm_adc_show_in, NULL, 31),
+};
 
 static int __devinit epm_adc_init_hwmon(struct platform_device *pdev,
 					       struct epm_adc_drv *epm_adc)
 {
 	struct epm_adc_platform_data *pdata = pdev->dev.platform_data;
-	int num_chans = pdata->num_channels, dev_idx = 0, chan_idx = 0;
-	int i = 0, rc = 0;
-	const char prefix[] = "ads", postfix[] = "_chan";
-	char tmpbuf[3];
+	int i, rc, num_chans = pdata->num_channels;
 
-	epm_adc->fnames = devm_kzalloc(&pdev->dev,
-				num_chans * EPM_ADC_MAX_FNAME +
-				num_chans * sizeof(char *), GFP_KERNEL);
-	if (!epm_adc->fnames) {
-		dev_err(&pdev->dev, "Unable to allocate memory\n");
-		return -ENOMEM;
-	}
-
-	epm_adc->sens_attr = devm_kzalloc(&pdev->dev, num_chans *
-			    sizeof(struct sensor_device_attribute), GFP_KERNEL);
-	if (!epm_adc->sens_attr) {
-		dev_err(&pdev->dev, "Unable to allocate memory\n");
-		rc = -ENOMEM;
-	}
-
-	for (i = 0; i < num_chans; i++, chan_idx++) {
-		epm_adc->fnames[i] = (char *)epm_adc->fnames +
-			(i * EPM_ADC_MAX_FNAME) + (num_chans *
-			sizeof(char *));
-		if (chan_idx == pdata->chan_per_adc) {
-			chan_idx = 0;
-			dev_idx++;
-		}
-		strlcpy(epm_adc->fnames[i], prefix, EPM_ADC_MAX_FNAME);
-		snprintf(tmpbuf, sizeof(tmpbuf), "%d", dev_idx);
-		strlcat(epm_adc->fnames[i], tmpbuf, EPM_ADC_MAX_FNAME);
-		strlcat(epm_adc->fnames[i], postfix, EPM_ADC_MAX_FNAME);
-		snprintf(tmpbuf, sizeof(tmpbuf), "%d", chan_idx);
-		strlcat(epm_adc->fnames[i], tmpbuf, EPM_ADC_MAX_FNAME);
-		epm_adc_in_attr.index = i;
-		epm_adc_in_attr.dev_attr.attr.name = epm_adc->fnames[i];
-		memcpy(&epm_adc->sens_attr[i], &epm_adc_in_attr,
-						sizeof(epm_adc_in_attr));
+	for (i = 0; i < num_chans; i++) {
 		rc = device_create_file(&pdev->dev,
-				&epm_adc->sens_attr[i].dev_attr);
+				&epm_adc_in_attrs[i].dev_attr);
 		if (rc) {
 			dev_err(&pdev->dev, "device_create_file failed\n");
 			return rc;
 		}
 	}
 
-	return rc;
+	return 0;
 }
 
 static int __devinit epm_adc_spi_probe(struct spi_device *spi)
@@ -866,10 +861,8 @@
 	int num_chans = pdata->num_channels;
 	int i = 0;
 
-	if (epm_adc->sens_attr)
-		for (i = 0; i < num_chans; i++)
-			device_remove_file(&pdev->dev,
-					&epm_adc->sens_attr[i].dev_attr);
+	for (i = 0; i < num_chans; i++)
+		device_remove_file(&pdev->dev, &epm_adc_in_attrs[i].dev_attr);
 	hwmon_device_unregister(epm_adc->hwmon);
 	misc_deregister(&epm_adc->misc);
 	epm_adc = NULL;
diff --git a/drivers/hwmon/pm8xxx-adc.c b/drivers/hwmon/pm8xxx-adc.c
index 6bef3d3..aa9acf7 100644
--- a/drivers/hwmon/pm8xxx-adc.c
+++ b/drivers/hwmon/pm8xxx-adc.c
@@ -1042,23 +1042,6 @@
 }
 DEFINE_SIMPLE_ATTRIBUTE(reg_fops, get_adc, NULL, "%llu\n");
 
-static int get_mpp_adc(void *data, u64 *val)
-{
-	struct pm8xxx_adc_chan_result result;
-	int i = (int)data;
-	int rc;
-
-	rc = pm8xxx_adc_mpp_config_read(i,
-		ADC_MPP_1_AMUX6, &result);
-	if (!rc)
-		pr_info("ADC MPP value raw:%x physical:%lld\n",
-			result.adc_code, result.physical);
-	*val = result.physical;
-
-	return 0;
-}
-DEFINE_SIMPLE_ATTRIBUTE(reg_mpp_fops, get_mpp_adc, NULL, "%llu\n");
-
 #ifdef CONFIG_DEBUG_FS
 static void create_debugfs_entries(void)
 {
@@ -1100,6 +1083,7 @@
 						adc_pmic->adc_channel[i].name;
 		memcpy(&adc_pmic->sens_attr[i], &pm8xxx_adc_attr,
 						sizeof(pm8xxx_adc_attr));
+		sysfs_attr_init(&adc_pmic->sens_attr[i].dev_attr.attr);
 		rc = device_create_file(&pdev->dev,
 				&adc_pmic->sens_attr[i].dev_attr);
 		if (rc) {
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 84a69bf..f867dcb 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -16,7 +16,7 @@
 # MSM IOMMU support
 config MSM_IOMMU
 	bool "MSM IOMMU Support"
-	depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_APQ8064 || ARCH_MSMCOPPER
+	depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_APQ8064 || ARCH_MSM8974
 	select IOMMU_API
 	help
 	  Support for the IOMMUs found on certain Qualcomm SOCs.
diff --git a/drivers/media/radio/radio-iris.c b/drivers/media/radio/radio-iris.c
index 53eb85c..23d11c3 100644
--- a/drivers/media/radio/radio-iris.c
+++ b/drivers/media/radio/radio-iris.c
@@ -88,6 +88,7 @@
 	unsigned char power_mode;
 	int search_on;
 	unsigned int tone_freq;
+	unsigned char spur_table_size;
 	unsigned char g_scan_time;
 	unsigned int g_antenna;
 	unsigned int g_rds_grp_proc_ps;
@@ -101,11 +102,13 @@
 	struct hci_fm_sig_threshold_rsp sig_th;
 	struct hci_fm_ch_det_threshold ch_det_threshold;
 	struct hci_fm_data_rd_rsp default_data;
+	struct hci_fm_spur_data spur_data;
 };
 
 static struct video_device *priv_videodev;
 static int iris_do_calibration(struct iris_device *radio);
 
+static int update_spur_table(struct iris_device *radio);
 static struct v4l2_queryctrl iris_v4l2_queryctrl[] = {
 	{
 	.id	= V4L2_CID_AUDIO_VOLUME,
@@ -2896,6 +2899,7 @@
 				FMDERR("get frequency failed %d\n", retval);
 			break;
 		case FM_OFF:
+			radio->spur_table_size = 0;
 			switch (radio->mode) {
 			case FM_RECV:
 				retval = hci_cmd(HCI_FM_DISABLE_RECV_CMD,
@@ -3248,12 +3252,116 @@
 		*/
 		retval = 0;
 		break;
+	case V4L2_CID_PRIVATE_SPUR_FREQ:
+		if (radio->spur_table_size >= MAX_SPUR_FREQ_LIMIT) {
+			FMDERR("%s: Spur Table Full!\n", __func__);
+			retval = -1;
+		} else
+			radio->spur_data.freq[radio->spur_table_size] =
+				ctrl->value;
+		break;
+	case V4L2_CID_PRIVATE_SPUR_FREQ_RMSSI:
+		if (radio->spur_table_size >= MAX_SPUR_FREQ_LIMIT) {
+			FMDERR("%s: Spur Table Full!\n", __func__);
+			retval = -1;
+		} else
+			radio->spur_data.rmssi[radio->spur_table_size] =
+				ctrl->value;
+		break;
+	case V4L2_CID_PRIVATE_SPUR_SELECTION:
+		if (radio->spur_table_size >= MAX_SPUR_FREQ_LIMIT) {
+			FMDERR("%s: Spur Table Full!\n", __func__);
+			retval = -1;
+		} else {
+			radio->spur_data.enable[radio->spur_table_size] =
+				ctrl->value;
+			radio->spur_table_size++;
+		}
+		break;
+	case V4L2_CID_PRIVATE_UPDATE_SPUR_TABLE:
+		update_spur_table(radio);
+		break;
 	default:
 		retval = -EINVAL;
 	}
 	return retval;
 }
 
+static int update_spur_table(struct iris_device *radio)
+{
+	struct hci_fm_def_data_wr_req default_data;
+	int len = 0, index = 0, offset = 0, i = 0;
+	int retval = 0, temp = 0, cnt = 0;
+
+	memset(&default_data, 0, sizeof(default_data));
+
+	/* Pass the mode of SPUR_CLK */
+	default_data.mode = CKK_SPUR;
+
+	temp = radio->spur_table_size;
+	for (cnt = 0; cnt < (temp / 5); cnt++) {
+		offset = 0;
+		/*
+		 * Program the spur entries in spur table in following order:
+		 *    Spur index
+		 *    Length of the spur data
+		 *    Spur Data:
+		 *        MSB of the spur frequency
+		 *        LSB of the spur frequency
+		 *        Enable/Disable the spur frequency
+		 *        RMSSI value of the spur frequency
+		 */
+		default_data.data[offset++] = ENTRY_0 + cnt;
+		for (i = 0; i < SPUR_ENTRIES_PER_ID; i++) {
+			default_data.data[offset++] = GET_FREQ(COMPUTE_SPUR(
+				radio->spur_data.freq[index]), 0);
+			default_data.data[offset++] = GET_FREQ(COMPUTE_SPUR(
+				radio->spur_data.freq[index]), 1);
+			default_data.data[offset++] =
+				radio->spur_data.enable[index];
+			default_data.data[offset++] =
+				radio->spur_data.rmssi[index];
+			index++;
+		}
+		len = (SPUR_ENTRIES_PER_ID * SPUR_DATA_SIZE);
+		default_data.length = (len + 1);
+		retval = hci_def_data_write(&default_data, radio->fm_hdev);
+		if (retval < 0) {
+			FMDBG("%s: Failed to configure entries for ID : %d\n",
+				__func__, default_data.data[0]);
+			return retval;
+		}
+	}
+
+	/* Compute balance SPUR frequencies to be programmed */
+	temp %= SPUR_ENTRIES_PER_ID;
+	if (temp > 0) {
+		offset = 0;
+		default_data.data[offset++] = (radio->spur_table_size / 5);
+		for (i = 0; i < temp; i++) {
+			default_data.data[offset++] = GET_FREQ(COMPUTE_SPUR(
+				radio->spur_data.freq[index]), 0);
+			default_data.data[offset++] = GET_FREQ(COMPUTE_SPUR(
+				radio->spur_data.freq[index]), 1);
+			default_data.data[offset++] =
+				radio->spur_data.enable[index];
+			default_data.data[offset++] =
+				radio->spur_data.rmssi[index];
+			index++;
+		}
+		len = (temp * SPUR_DATA_SIZE);
+		default_data.length = (len + 1);
+		retval = hci_def_data_write(&default_data, radio->fm_hdev);
+		if (retval < 0) {
+			FMDERR("%s: Failed to configure entries for ID : %d\n",
+				__func__, default_data.data[0]);
+			return retval;
+		}
+	}
+
+	return retval;
+}
+
 static int iris_vidioc_g_tuner(struct file *file, void *priv,
 		struct v4l2_tuner *tuner)
 {
diff --git a/drivers/media/radio/radio-tavarua.c b/drivers/media/radio/radio-tavarua.c
index 6085871..116b7f9 100644
--- a/drivers/media/radio/radio-tavarua.c
+++ b/drivers/media/radio/radio-tavarua.c
@@ -996,6 +996,10 @@
 			FMDBG("read PHY_TXGAIN is successful");
 			complete(&radio->sync_req_done);
 			break;
+		case (XFR_EXT | 0x80):
+			FMDBG("Set tone generator successful\n");
+			complete(&radio->sync_req_done);
+			break;
 		case (0x80 | RX_CONFIG):
 		case (0x80 | RADIO_CONFIG):
 		case (0x80 | RDS_CONFIG):
@@ -3449,9 +3453,17 @@
 		if (retval < 0)
 			FMDBG("write failed");
 	} break;
+	case V4L2_CID_PRIVATE_SOFT_MUTE:
+		radio->registers[IOCTRL] &= ~(IOC_SFT_MUTE);
+		if (ctrl->value)
+			radio->registers[IOCTRL] |= IOC_SFT_MUTE;
+		retval = tavarua_write_register(radio, IOCTRL,
+					radio->registers[IOCTRL]);
+		if (retval < 0)
+			FMDERR("Failed to enable/disable SMute\n");
+		break;
 	/*These IOCTL's are place holders to keep the
 	driver compatible with change in frame works for IRIS */
-	case V4L2_CID_PRIVATE_SOFT_MUTE:
 	case V4L2_CID_PRIVATE_RIVA_ACCS_ADDR:
 	case V4L2_CID_PRIVATE_RIVA_ACCS_LEN:
 	case V4L2_CID_PRIVATE_RIVA_PEEK:
@@ -3459,7 +3471,6 @@
 	case V4L2_CID_PRIVATE_SSBI_ACCS_ADDR:
 	case V4L2_CID_PRIVATE_SSBI_PEEK:
 	case V4L2_CID_PRIVATE_SSBI_POKE:
-	case V4L2_CID_PRIVATE_TX_TONE:
 	case V4L2_CID_PRIVATE_RDS_GRP_COUNTERS:
 	case V4L2_CID_PRIVATE_SET_NOTCH_FILTER:
 	case V4L2_CID_PRIVATE_TAVARUA_DO_CALIBRATION:
@@ -3479,6 +3490,54 @@
 	case V4L2_CID_PRIVATE_UPDATE_SPUR_TABLE:
 		retval = update_spur_table(radio);
 		break;
+	case V4L2_CID_PRIVATE_TX_TONE:
+		retval = 0;
+		memset(xfr_buf, 0, sizeof(xfr_buf));
+		switch (ctrl->value) {
+		case ONE_KHZ_LR_EQUA_0DBFS:
+			xfr_buf[TONE_CHANNEL_EN_AND_SCALING_BYTE]
+				= TONE_LEFT_RIGHT_CH_ENABLED;
+			xfr_buf[TONE_LEFT_FREQ_BYTE] = 0x01;
+			xfr_buf[TONE_RIGHT_FREQ_BYTE] = 0x01;
+			break;
+		case ONE_KHZ_LEFTONLY_EQUA_0DBFS:
+			xfr_buf[TONE_CHANNEL_EN_AND_SCALING_BYTE]
+				 = TONE_LEFT_CH_ENABLED;
+			xfr_buf[TONE_LEFT_FREQ_BYTE] = 0x01;
+			break;
+		case ONE_KHZ_RIGHTONLY_EQUA_0DBFS:
+			xfr_buf[TONE_CHANNEL_EN_AND_SCALING_BYTE]
+				 = TONE_RIGHT_CH_ENABLED;
+			xfr_buf[TONE_RIGHT_FREQ_BYTE] = 0x01;
+			break;
+		case ONE_KHZ_LR_EQUA_l8DBFS:
+			xfr_buf[TONE_CHANNEL_EN_AND_SCALING_BYTE]
+				 = (LSH_DATA(TONE_SCALE_IND_12,
+						 TONE_SCALING_SHIFT)
+					 | TONE_LEFT_RIGHT_CH_ENABLED);
+			xfr_buf[TONE_LEFT_FREQ_BYTE] = 0x01;
+			xfr_buf[TONE_RIGHT_FREQ_BYTE] = 0x01;
+			break;
+		case FIFTEEN_KHZ_LR_EQUA_l8DBFS:
+			xfr_buf[TONE_CHANNEL_EN_AND_SCALING_BYTE]
+				 = (LSH_DATA(TONE_SCALE_IND_12,
+						 TONE_SCALING_SHIFT)
+					 | TONE_LEFT_RIGHT_CH_ENABLED);
+			xfr_buf[TONE_LEFT_FREQ_BYTE] = 0x0F;
+			xfr_buf[TONE_RIGHT_FREQ_BYTE] = 0x0F;
+			break;
+		default:
+			retval = -1;
+			FMDERR("tone generator value not valid\n");
+			break;
+		}
+		if (retval >= 0) {
+			xfr_buf[TONE_GEN_CTRL_BYTE] = 0x01;
+			retval = sync_write_xfr(radio, XFR_EXT, xfr_buf);
+		}
+		if (retval < 0)
+			FMDERR("Tone generator failed\n");
+		break;
 	default:
 		retval = -EINVAL;
 	}
diff --git a/drivers/media/video/msm/Kconfig b/drivers/media/video/msm/Kconfig
index 5ffc133..9c791e4 100644
--- a/drivers/media/video/msm/Kconfig
+++ b/drivers/media/video/msm/Kconfig
@@ -271,6 +271,15 @@
 	cores and composite them into a single
 	interrupt to the MSM.
 
+config MSM_CPP
+        bool "Qualcomm MSM Camera Post Processing Engine support"
+        depends on MSM_CAMERA && MSM_CAMERA_V4L2
+        ---help---
+          Enable support for Camera Post-processing Engine
+          The Post processing engine is capable of scaling
+          and cropping image. The driver support V4L2 subdev
+          APIs.
+
 config QUP_EXCLUSIVE_TO_CAMERA
 	bool "QUP exclusive to camera"
 	depends on MSM_CAMERA
diff --git a/drivers/media/video/msm/Makefile b/drivers/media/video/msm/Makefile
index 431da2e..63120da 100644
--- a/drivers/media/video/msm/Makefile
+++ b/drivers/media/video/msm/Makefile
@@ -13,6 +13,7 @@
   EXTRA_CFLAGS += -Idrivers/media/video/msm/server
   obj-$(CONFIG_MSM_CAMERA) += msm_isp.o msm.o msm_mem.o msm_mctl.o msm_mctl_buf.o msm_mctl_pp.o
   obj-$(CONFIG_MSM_CAMERA) += server/ eeprom/ sensors/ actuators/ csi/
+  obj-$(CONFIG_MSM_CPP) += cpp/
   obj-$(CONFIG_MSM_CAMERA) += msm_gesture.o
   obj-$(CONFIG_MSM_CAM_IRQ_ROUTER) += msm_camirq_router.o
 else
diff --git a/drivers/media/video/msm/actuators/msm_actuator.c b/drivers/media/video/msm/actuators/msm_actuator.c
index 50399de..554cddc 100644
--- a/drivers/media/video/msm/actuators/msm_actuator.c
+++ b/drivers/media/video/msm/actuators/msm_actuator.c
@@ -14,55 +14,42 @@
 #include "msm_actuator.h"
 
 static struct msm_actuator_ctrl_t msm_actuator_t;
-
-static struct msm_actuator msm_vcm_actuator_table = {
-	.act_type = ACTUATOR_VCM,
-	.func_tbl = {
-		.actuator_init_step_table = msm_actuator_init_step_table,
-		.actuator_move_focus = msm_actuator_move_focus,
-		.actuator_write_focus = msm_actuator_write_focus,
-		.actuator_set_default_focus = msm_actuator_set_default_focus,
-		.actuator_init_focus = msm_actuator_init_focus,
-		.actuator_i2c_write = msm_actuator_i2c_write,
-	},
-};
-
-static struct msm_actuator msm_piezo_actuator_table = {
-	.act_type = ACTUATOR_PIEZO,
-	.func_tbl = {
-		.actuator_init_step_table = NULL,
-		.actuator_move_focus = msm_actuator_piezo_move_focus,
-		.actuator_write_focus = NULL,
-		.actuator_set_default_focus =
-			msm_actuator_piezo_set_default_focus,
-		.actuator_init_focus = msm_actuator_init_focus,
-		.actuator_i2c_write = msm_actuator_i2c_write,
-	},
-};
+static struct msm_actuator msm_vcm_actuator_table;
+static struct msm_actuator msm_piezo_actuator_table;
 
 static struct msm_actuator *actuators[] = {
 	&msm_vcm_actuator_table,
 	&msm_piezo_actuator_table,
 };
 
-int32_t msm_actuator_piezo_set_default_focus(
+static int32_t msm_actuator_piezo_set_default_focus(
 	struct msm_actuator_ctrl_t *a_ctrl,
 	struct msm_actuator_move_params_t *move_params)
 {
 	int32_t rc = 0;
 
 	if (a_ctrl->curr_step_pos != 0) {
-		rc = a_ctrl->func_tbl->actuator_i2c_write(a_ctrl,
-			a_ctrl->initial_code, 0);
-		rc = a_ctrl->func_tbl->actuator_i2c_write(a_ctrl,
-			a_ctrl->initial_code, 0);
+		a_ctrl->i2c_tbl_index = 0;
+		rc = a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl,
+			a_ctrl->initial_code, 0, 0);
+		rc = a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl,
+			a_ctrl->initial_code, 0, 0);
+		rc = msm_camera_i2c_write_table_w_microdelay(
+			&a_ctrl->i2c_client, a_ctrl->i2c_reg_tbl,
+			a_ctrl->i2c_tbl_index, a_ctrl->i2c_data_type);
+		if (rc < 0) {
+			pr_err("%s: i2c write error:%d\n",
+				__func__, rc);
+			return rc;
+		}
+		a_ctrl->i2c_tbl_index = 0;
 		a_ctrl->curr_step_pos = 0;
 	}
 	return rc;
 }
 
-int32_t msm_actuator_i2c_write(struct msm_actuator_ctrl_t *a_ctrl,
-	int16_t next_lens_position, uint32_t hw_params)
+static int32_t msm_actuator_parse_i2c_params(struct msm_actuator_ctrl_t *a_ctrl,
+	int16_t next_lens_position, uint32_t hw_params, uint16_t delay)
 {
 	struct msm_actuator_reg_params_t *write_arr = a_ctrl->reg_tbl;
 	uint32_t hw_dword = hw_params;
@@ -70,6 +57,7 @@
 	uint16_t value = 0;
 	uint32_t size = a_ctrl->reg_tbl_size, i = 0;
 	int32_t rc = 0;
+	struct msm_camera_i2c_reg_tbl *i2c_tbl = a_ctrl->i2c_reg_tbl;
 	CDBG("%s: IN\n", __func__);
 	for (i = 0; i < size; i++) {
 		if (write_arr[i].reg_write_type == MSM_ACTUATOR_WRITE_DAC) {
@@ -82,22 +70,19 @@
 				i2c_byte1 = write_arr[i].reg_addr;
 				i2c_byte2 = value;
 				if (size != (i+1)) {
-					i2c_byte2 = (i2c_byte2 & 0xFF00) >> 8;
+					i2c_byte2 = value & 0xFF;
 					CDBG("%s: byte1:0x%x, byte2:0x%x\n",
 					__func__, i2c_byte1, i2c_byte2);
-					rc = msm_camera_i2c_write(
-						&a_ctrl->i2c_client,
-						i2c_byte1, i2c_byte2,
-						a_ctrl->i2c_data_type);
-					if (rc < 0) {
-						pr_err("%s: i2c write error:%d\n",
-							__func__, rc);
-						return rc;
-					}
-
+					i2c_tbl[a_ctrl->i2c_tbl_index].
+						reg_addr = i2c_byte1;
+					i2c_tbl[a_ctrl->i2c_tbl_index].
+						reg_data = i2c_byte2;
+					i2c_tbl[a_ctrl->i2c_tbl_index].
+						delay = 0;
+					a_ctrl->i2c_tbl_index++;
 					i++;
 					i2c_byte1 = write_arr[i].reg_addr;
-					i2c_byte2 = value & 0xFF;
+					i2c_byte2 = (value & 0xFF00) >> 8;
 				}
 			} else {
 				i2c_byte1 = (value & 0xFF00) >> 8;
@@ -110,14 +95,16 @@
 		}
 		CDBG("%s: i2c_byte1:0x%x, i2c_byte2:0x%x\n", __func__,
 			i2c_byte1, i2c_byte2);
-		rc = msm_camera_i2c_write(&a_ctrl->i2c_client,
-			i2c_byte1, i2c_byte2, a_ctrl->i2c_data_type);
+		i2c_tbl[a_ctrl->i2c_tbl_index].reg_addr = i2c_byte1;
+		i2c_tbl[a_ctrl->i2c_tbl_index].reg_data = i2c_byte2;
+		i2c_tbl[a_ctrl->i2c_tbl_index].delay = delay;
+		a_ctrl->i2c_tbl_index++;
 	}
 		CDBG("%s: OUT\n", __func__);
 	return rc;
 }
 
-int32_t msm_actuator_init_focus(struct msm_actuator_ctrl_t *a_ctrl,
+static int32_t msm_actuator_init_focus(struct msm_actuator_ctrl_t *a_ctrl,
 	uint16_t size, enum msm_actuator_data_type type,
 	struct reg_settings_t *settings)
 {
@@ -153,7 +140,7 @@
 	return rc;
 }
 
-int32_t msm_actuator_write_focus(
+static int32_t msm_actuator_write_focus(
 	struct msm_actuator_ctrl_t *a_ctrl,
 	uint16_t curr_lens_pos,
 	struct damping_params_t *damping_params,
@@ -177,27 +164,25 @@
 			(next_lens_pos +
 				(sign_direction * damping_code_step))) {
 		rc = a_ctrl->func_tbl->
-			actuator_i2c_write(a_ctrl, next_lens_pos,
-				damping_params->hw_params);
+			actuator_parse_i2c_params(a_ctrl, next_lens_pos,
+				damping_params->hw_params, wait_time);
 		if (rc < 0) {
 			pr_err("%s: error:%d\n",
 				__func__, rc);
 			return rc;
 		}
 		curr_lens_pos = next_lens_pos;
-		usleep(wait_time);
 	}
 
 	if (curr_lens_pos != code_boundary) {
 		rc = a_ctrl->func_tbl->
-			actuator_i2c_write(a_ctrl, code_boundary,
-				damping_params->hw_params);
-		usleep(wait_time);
+			actuator_parse_i2c_params(a_ctrl, code_boundary,
+				damping_params->hw_params, wait_time);
 	}
 	return rc;
 }
 
-int32_t msm_actuator_piezo_move_focus(
+static int32_t msm_actuator_piezo_move_focus(
 	struct msm_actuator_ctrl_t *a_ctrl,
 	struct msm_actuator_move_params_t *move_params)
 {
@@ -208,17 +193,27 @@
 	if (num_steps == 0)
 		return rc;
 
+	a_ctrl->i2c_tbl_index = 0;
 	rc = a_ctrl->func_tbl->
-		actuator_i2c_write(a_ctrl,
+		actuator_parse_i2c_params(a_ctrl,
 		(num_steps *
 		a_ctrl->region_params[0].code_per_step),
-		move_params->ringing_params[0].hw_params);
+		move_params->ringing_params[0].hw_params, 0);
 
+	rc = msm_camera_i2c_write_table_w_microdelay(&a_ctrl->i2c_client,
+		a_ctrl->i2c_reg_tbl, a_ctrl->i2c_tbl_index,
+		a_ctrl->i2c_data_type);
+	if (rc < 0) {
+		pr_err("%s: i2c write error:%d\n",
+			__func__, rc);
+		return rc;
+	}
+	a_ctrl->i2c_tbl_index = 0;
 	a_ctrl->curr_step_pos = dest_step_position;
 	return rc;
 }
 
-int32_t msm_actuator_move_focus(
+static int32_t msm_actuator_move_focus(
 	struct msm_actuator_ctrl_t *a_ctrl,
 	struct msm_actuator_move_params_t *move_params)
 {
@@ -241,6 +236,7 @@
 		return rc;
 
 	curr_lens_pos = a_ctrl->step_position_table[a_ctrl->curr_step_pos];
+	a_ctrl->i2c_tbl_index = 0;
 	CDBG("curr_step_pos =%d dest_step_pos =%d curr_lens_pos=%d\n",
 		a_ctrl->curr_step_pos, dest_step_pos, curr_lens_pos);
 
@@ -299,10 +295,20 @@
 		a_ctrl->curr_step_pos = target_step_pos;
 	}
 
+	rc = msm_camera_i2c_write_table_w_microdelay(&a_ctrl->i2c_client,
+		a_ctrl->i2c_reg_tbl, a_ctrl->i2c_tbl_index,
+		a_ctrl->i2c_data_type);
+	if (rc < 0) {
+		pr_err("%s: i2c write error:%d\n",
+			__func__, rc);
+		return rc;
+	}
+	a_ctrl->i2c_tbl_index = 0;
+
 	return rc;
 }
 
-int32_t msm_actuator_init_step_table(struct msm_actuator_ctrl_t *a_ctrl,
+static int32_t msm_actuator_init_step_table(struct msm_actuator_ctrl_t *a_ctrl,
 	struct msm_actuator_set_info_t *set_info)
 {
 	int16_t code_per_step = 0;
@@ -361,7 +367,7 @@
 	return rc;
 }
 
-int32_t msm_actuator_set_default_focus(
+static int32_t msm_actuator_set_default_focus(
 	struct msm_actuator_ctrl_t *a_ctrl,
 	struct msm_actuator_move_params_t *move_params)
 {
@@ -373,7 +379,7 @@
 	return rc;
 }
 
-int32_t msm_actuator_power_down(struct msm_actuator_ctrl_t *a_ctrl)
+static int32_t msm_actuator_power_down(struct msm_actuator_ctrl_t *a_ctrl)
 {
 	int32_t rc = 0;
 	if (a_ctrl->vcm_enable) {
@@ -384,10 +390,13 @@
 
 	kfree(a_ctrl->step_position_table);
 	a_ctrl->step_position_table = NULL;
+	kfree(a_ctrl->i2c_reg_tbl);
+	a_ctrl->i2c_reg_tbl = NULL;
+	a_ctrl->i2c_tbl_index = 0;
 	return rc;
 }
 
-int32_t msm_actuator_init(struct msm_actuator_ctrl_t *a_ctrl,
+static int32_t msm_actuator_init(struct msm_actuator_ctrl_t *a_ctrl,
 	struct msm_actuator_set_info_t *set_info) {
 	struct reg_settings_t *init_settings = NULL;
 	int32_t rc = -EFAULT;
@@ -412,7 +421,6 @@
 		pr_err("%s: MAX_ACTUATOR_REGION is exceeded.\n", __func__);
 		return -EFAULT;
 	}
-	a_ctrl->total_steps = set_info->af_tuning_params.total_steps;
 	a_ctrl->pwd_step = set_info->af_tuning_params.pwd_step;
 	a_ctrl->total_steps = set_info->af_tuning_params.total_steps;
 
@@ -430,11 +438,22 @@
 			__func__);
 		return -EFAULT;
 	}
+
+	a_ctrl->i2c_reg_tbl =
+		kmalloc(sizeof(struct msm_camera_i2c_reg_tbl) *
+		(set_info->af_tuning_params.total_steps + 1), GFP_KERNEL);
+	if (!a_ctrl->i2c_reg_tbl) {
+		pr_err("%s kmalloc fail\n", __func__);
+		return -EFAULT;
+	}
+
 	if (copy_from_user(&a_ctrl->reg_tbl,
 		(void *)set_info->actuator_params.reg_tbl_params,
 		a_ctrl->reg_tbl_size *
-		sizeof(struct msm_actuator_reg_params_t)))
+		sizeof(struct msm_actuator_reg_params_t))) {
+		kfree(a_ctrl->i2c_reg_tbl);
 		return -EFAULT;
+	}
 
 	if (set_info->actuator_params.init_setting_size) {
 		if (a_ctrl->func_tbl->actuator_init_focus) {
@@ -442,6 +461,7 @@
 				(set_info->actuator_params.init_setting_size),
 				GFP_KERNEL);
 			if (init_settings == NULL) {
+				kfree(a_ctrl->i2c_reg_tbl);
 				pr_err("%s Error allocating memory for init_settings\n",
 					__func__);
 				return -EFAULT;
@@ -451,6 +471,7 @@
 				set_info->actuator_params.init_setting_size *
 				sizeof(struct reg_settings_t))) {
 				kfree(init_settings);
+				kfree(a_ctrl->i2c_reg_tbl);
 				pr_err("%s Error copying init_settings\n",
 					__func__);
 				return -EFAULT;
@@ -461,6 +482,7 @@
 				init_settings);
 			kfree(init_settings);
 			if (rc < 0) {
+				kfree(a_ctrl->i2c_reg_tbl);
 				pr_err("%s Error actuator_init_focus\n",
 					__func__);
 				return -EFAULT;
@@ -480,7 +502,7 @@
 }
 
 
-int32_t msm_actuator_config(struct msm_actuator_ctrl_t *a_ctrl,
+static int32_t msm_actuator_config(struct msm_actuator_ctrl_t *a_ctrl,
 							void __user *argp)
 {
 	struct msm_actuator_cfg_data cdata;
@@ -519,7 +541,7 @@
 	return rc;
 }
 
-int32_t msm_actuator_i2c_probe(
+static int32_t msm_actuator_i2c_probe(
 	struct i2c_client *client,
 	const struct i2c_device_id *id)
 {
@@ -554,7 +576,7 @@
 	return rc;
 }
 
-int32_t msm_actuator_power_up(struct msm_actuator_ctrl_t *a_ctrl)
+static int32_t msm_actuator_power_up(struct msm_actuator_ctrl_t *a_ctrl)
 {
 	int rc = 0;
 	CDBG("%s called\n", __func__);
@@ -594,7 +616,7 @@
 	return i2c_add_driver(msm_actuator_t.i2c_driver);
 }
 
-long msm_actuator_subdev_ioctl(struct v4l2_subdev *sd,
+static long msm_actuator_subdev_ioctl(struct v4l2_subdev *sd,
 			unsigned int cmd, void *arg)
 {
 	struct msm_actuator_ctrl_t *a_ctrl = get_actrl(sd);
@@ -607,7 +629,7 @@
 	}
 }
 
-int32_t msm_actuator_power(struct v4l2_subdev *sd, int on)
+static int32_t msm_actuator_power(struct v4l2_subdev *sd, int on)
 {
 	int rc = 0;
 	struct msm_actuator_ctrl_t *a_ctrl = get_actrl(sd);
@@ -644,6 +666,31 @@
 
 };
 
+static struct msm_actuator msm_vcm_actuator_table = {
+	.act_type = ACTUATOR_VCM,
+	.func_tbl = {
+		.actuator_init_step_table = msm_actuator_init_step_table,
+		.actuator_move_focus = msm_actuator_move_focus,
+		.actuator_write_focus = msm_actuator_write_focus,
+		.actuator_set_default_focus = msm_actuator_set_default_focus,
+		.actuator_init_focus = msm_actuator_init_focus,
+		.actuator_parse_i2c_params = msm_actuator_parse_i2c_params,
+	},
+};
+
+static struct msm_actuator msm_piezo_actuator_table = {
+	.act_type = ACTUATOR_PIEZO,
+	.func_tbl = {
+		.actuator_init_step_table = NULL,
+		.actuator_move_focus = msm_actuator_piezo_move_focus,
+		.actuator_write_focus = NULL,
+		.actuator_set_default_focus =
+			msm_actuator_piezo_set_default_focus,
+		.actuator_init_focus = msm_actuator_init_focus,
+		.actuator_parse_i2c_params = msm_actuator_parse_i2c_params,
+	},
+};
+
 subsys_initcall(msm_actuator_i2c_add_driver);
 MODULE_DESCRIPTION("MSM ACTUATOR");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/actuators/msm_actuator.h b/drivers/media/video/msm/actuators/msm_actuator.h
index 4f936e7..82157e8 100644
--- a/drivers/media/video/msm/actuators/msm_actuator.h
+++ b/drivers/media/video/msm/actuators/msm_actuator.h
@@ -51,8 +51,8 @@
 			struct msm_actuator_move_params_t *);
 	int32_t (*actuator_move_focus) (struct msm_actuator_ctrl_t *,
 			struct msm_actuator_move_params_t *);
-	int32_t (*actuator_i2c_write)(struct msm_actuator_ctrl_t *,
-			int16_t, uint32_t);
+	int32_t (*actuator_parse_i2c_params)(struct msm_actuator_ctrl_t *,
+			int16_t, uint32_t, uint16_t);
 	int32_t (*actuator_write_focus)(struct msm_actuator_ctrl_t *,
 			uint16_t,
 			struct damping_params_t *,
@@ -87,40 +87,11 @@
 	uint32_t total_steps;
 	uint16_t pwd_step;
 	uint16_t initial_code;
+	struct msm_camera_i2c_reg_tbl *i2c_reg_tbl;
+	uint16_t i2c_tbl_index;
 };
 
 struct msm_actuator_ctrl_t *get_actrl(struct v4l2_subdev *sd);
-int32_t msm_actuator_i2c_write(struct msm_actuator_ctrl_t *a_ctrl,
-		int16_t next_lens_position, uint32_t hw_params);
-int32_t msm_actuator_init_focus(struct msm_actuator_ctrl_t *a_ctrl,
-		uint16_t size, enum msm_actuator_data_type type,
-		struct reg_settings_t *settings);
-int32_t msm_actuator_i2c_write_b_af(struct msm_actuator_ctrl_t *a_ctrl,
-		uint8_t msb,
-		uint8_t lsb);
-int32_t msm_actuator_move_focus(struct msm_actuator_ctrl_t *a_ctrl,
-		struct msm_actuator_move_params_t *move_params);
-int32_t msm_actuator_piezo_move_focus(
-		struct msm_actuator_ctrl_t *a_ctrl,
-		struct msm_actuator_move_params_t *move_params);
-int32_t msm_actuator_init_step_table(struct msm_actuator_ctrl_t *a_ctrl,
-		struct msm_actuator_set_info_t *set_info);
-int32_t msm_actuator_set_default_focus(struct msm_actuator_ctrl_t *a_ctrl,
-		struct msm_actuator_move_params_t *move_params);
-int32_t msm_actuator_piezo_set_default_focus(
-		struct msm_actuator_ctrl_t *a_ctrl,
-		struct msm_actuator_move_params_t *move_params);
-int32_t msm_actuator_i2c_probe(struct i2c_client *client,
-		const struct i2c_device_id *id);
-int32_t msm_actuator_write_focus(struct msm_actuator_ctrl_t *a_ctrl,
-		uint16_t curr_lens_pos, struct damping_params_t *damping_params,
-		int8_t sign_direction, int16_t code_boundary);
-int32_t msm_actuator_write_focus2(struct msm_actuator_ctrl_t *a_ctrl,
-		uint16_t curr_lens_pos, struct damping_params_t *damping_params,
-		int8_t sign_direction, int16_t code_boundary);
-long msm_actuator_subdev_ioctl(struct v4l2_subdev *sd,
-			unsigned int cmd, void *arg);
-int32_t msm_actuator_power(struct v4l2_subdev *sd, int on);
 
 #define VIDIOC_MSM_ACTUATOR_CFG \
 	_IOWR('V', BASE_VIDIOC_PRIVATE + 11, void __user *)
diff --git a/drivers/media/video/msm/cpp/Makefile b/drivers/media/video/msm/cpp/Makefile
new file mode 100644
index 0000000..b4f1fdf
--- /dev/null
+++ b/drivers/media/video/msm/cpp/Makefile
@@ -0,0 +1,5 @@
+GCC_VERSION      := $(shell $(CONFIG_SHELL) $(PWD)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+ccflags-y += -Idrivers/media/video/msm
+ccflags-y += -Idrivers/media/video/msm/io
+obj-$(CONFIG_MSM_CPP) += msm_cpp.o
+
diff --git a/drivers/media/video/msm/cpp/msm_cpp.c b/drivers/media/video/msm/cpp/msm_cpp.c
new file mode 100644
index 0000000..e569388
--- /dev/null
+++ b/drivers/media/video/msm/cpp/msm_cpp.c
@@ -0,0 +1,412 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <mach/board.h>
+#include <mach/camera.h>
+#include <mach/vreg.h>
+#include <media/msm_isp.h>
+#include <linux/proc_fs.h>
+#include <linux/debugfs.h>
+
+#include "msm_cpp.h"
+#include "msm.h"
+
+#define CONFIG_MSM_CPP_DBG 0
+
+#if CONFIG_MSM_CPP_DBG
+#define CPP_DBG(fmt, args...) pr_info(fmt, ##args)
+#else
+#define CPP_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+static int cpp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	uint32_t i;
+	struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
+	CPP_DBG("%s\n", __func__);
+
+	mutex_lock(&cpp_dev->mutex);
+	if (cpp_dev->cpp_open_cnt == MAX_ACTIVE_CPP_INSTANCE) {
+		pr_err("No free CPP instance\n");
+		mutex_unlock(&cpp_dev->mutex);
+		return -ENODEV;
+	}
+
+	for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
+		if (cpp_dev->cpp_subscribe_list[i].active == 0) {
+			cpp_dev->cpp_subscribe_list[i].active = 1;
+			cpp_dev->cpp_subscribe_list[i].vfh = &fh->vfh;
+			break;
+		}
+	}
+	if (i == MAX_ACTIVE_CPP_INSTANCE) {
+		pr_err("No free instance\n");
+		mutex_unlock(&cpp_dev->mutex);
+		return -ENODEV;
+	}
+
+	CPP_DBG("open %d %p\n", i, &fh->vfh);
+	cpp_dev->cpp_open_cnt++;
+	mutex_unlock(&cpp_dev->mutex);
+	return 0;
+}
+
+static int cpp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	uint32_t i;
+	struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
+	mutex_lock(&cpp_dev->mutex);
+	for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
+		if (cpp_dev->cpp_subscribe_list[i].vfh == &fh->vfh) {
+			cpp_dev->cpp_subscribe_list[i].active = 0;
+			cpp_dev->cpp_subscribe_list[i].vfh = NULL;
+			break;
+		}
+	}
+	if (i == MAX_ACTIVE_CPP_INSTANCE) {
+		pr_err("Invalid close\n");
+		mutex_unlock(&cpp_dev->mutex);
+		return -ENODEV;
+	}
+
+	CPP_DBG("close %d %p\n", i, &fh->vfh);
+	cpp_dev->cpp_open_cnt--;
+	mutex_unlock(&cpp_dev->mutex);
+	return 0;
+}
+
+static const struct v4l2_subdev_internal_ops msm_cpp_internal_ops = {
+	.open = cpp_open_node,
+	.close = cpp_close_node,
+};
+
+static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev)
+{
+	struct v4l2_event v4l2_evt;
+	struct msm_queue_cmd *frame_qcmd;
+	struct msm_queue_cmd *event_qcmd;
+	struct msm_cpp_frame_info_t *processed_frame;
+	struct msm_device_queue *queue = &cpp_dev->processing_q;
+
+	if (queue->len > 0) {
+		frame_qcmd = msm_dequeue(queue, list_frame);
+		processed_frame = frame_qcmd->command;
+
+		event_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
+		if (!event_qcmd) {
+			pr_err("%s Insufficient memory. return", __func__);
+			return -ENOMEM;
+		}
+		atomic_set(&event_qcmd->on_heap, 1);
+		event_qcmd->command = processed_frame;
+		CPP_DBG("fid %d\n", processed_frame->frame_id);
+		msm_enqueue(&cpp_dev->eventData_q, &event_qcmd->list_eventdata);
+
+		v4l2_evt.id = processed_frame->inst_id;
+		v4l2_evt.type = V4L2_EVENT_CPP_FRAME_DONE;
+		v4l2_event_queue(cpp_dev->subdev.devnode, &v4l2_evt);
+	}
+	return 0;
+}
+
+static int msm_cpp_send_frame_to_hardware(struct cpp_device *cpp_dev)
+{
+	struct msm_queue_cmd *frame_qcmd;
+	struct msm_cpp_frame_info_t *process_frame;
+	struct msm_device_queue *queue;
+
+	if (cpp_dev->processing_q.len < MAX_CPP_PROCESSING_FRAME) {
+		while (cpp_dev->processing_q.len < MAX_CPP_PROCESSING_FRAME) {
+			if (cpp_dev->realtime_q.len != 0) {
+				queue = &cpp_dev->realtime_q;
+			} else if (cpp_dev->offline_q.len != 0) {
+				queue = &cpp_dev->offline_q;
+			} else {
+				pr_debug("%s: All frames queued\n", __func__);
+				break;
+			}
+			frame_qcmd = msm_dequeue(queue, list_frame);
+			/*TBD Code to actually sending to harware*/
+			process_frame = frame_qcmd->command;
+
+			msm_enqueue(&cpp_dev->processing_q,
+						&frame_qcmd->list_frame);
+		}
+	}
+	return 0;
+}
+
+long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
+			unsigned int cmd, void *arg)
+{
+	struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
+	struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
+	int rc = 0;
+
+	CPP_DBG("%s: %d\n", __func__, __LINE__);
+	mutex_lock(&cpp_dev->mutex);
+	CPP_DBG("%s cmd: %d\n", __func__, cmd);
+	switch (cmd) {
+	case VIDIOC_MSM_CPP_CFG: {
+		struct msm_queue_cmd *frame_qcmd;
+		struct msm_cpp_frame_info_t *new_frame =
+			kzalloc(sizeof(struct msm_cpp_frame_info_t),
+					GFP_KERNEL);
+		if (!new_frame) {
+			pr_err("%s Insufficient memory. return", __func__);
+			mutex_unlock(&cpp_dev->mutex);
+			return -ENOMEM;
+		}
+
+		COPY_FROM_USER(rc, new_frame,
+			       (void __user *)ioctl_ptr->ioctl_ptr,
+			       sizeof(struct msm_cpp_frame_info_t));
+		if (rc) {
+			ERR_COPY_FROM_USER();
+			kfree(new_frame);
+			mutex_unlock(&cpp_dev->mutex);
+			return -EINVAL;
+		}
+
+		frame_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
+		if (!frame_qcmd) {
+			pr_err("%s Insufficient memory. return", __func__);
+			kfree(new_frame);
+			mutex_unlock(&cpp_dev->mutex);
+			return -ENOMEM;
+		}
+
+		atomic_set(&frame_qcmd->on_heap, 1);
+		frame_qcmd->command = new_frame;
+		if (new_frame->frame_type == MSM_CPP_REALTIME_FRAME) {
+			msm_enqueue(&cpp_dev->realtime_q,
+						&frame_qcmd->list_frame);
+		} else if (new_frame->frame_type == MSM_CPP_OFFLINE_FRAME) {
+			msm_enqueue(&cpp_dev->offline_q,
+						&frame_qcmd->list_frame);
+		} else {
+			pr_err("%s: Invalid frame type\n", __func__);
+			kfree(new_frame);
+			kfree(frame_qcmd);
+			mutex_unlock(&cpp_dev->mutex);
+			return -EINVAL;
+		}
+		break;
+	}
+	case VIDIOC_MSM_CPP_GET_EVENTPAYLOAD: {
+		struct msm_device_queue *queue = &cpp_dev->eventData_q;
+		struct msm_queue_cmd *event_qcmd;
+		struct msm_cpp_frame_info_t *process_frame;
+		event_qcmd = msm_dequeue(queue, list_eventdata);
+		process_frame = event_qcmd->command;
+		CPP_DBG("fid %d\n", process_frame->frame_id);
+		if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
+				process_frame,
+				sizeof(struct msm_cpp_frame_info_t))) {
+					mutex_unlock(&cpp_dev->mutex);
+					return -EINVAL;
+		}
+		kfree(process_frame);
+		kfree(event_qcmd);
+		break;
+	}
+	}
+	mutex_unlock(&cpp_dev->mutex);
+	CPP_DBG("%s: %d\n", __func__, __LINE__);
+	return 0;
+}
+
+int msm_cpp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+	struct v4l2_event_subscription *sub)
+{
+	CPP_DBG("%s\n", __func__);
+	return v4l2_event_subscribe(fh, sub, MAX_CPP_V4l2_EVENTS);
+}
+
+int msm_cpp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+	struct v4l2_event_subscription *sub)
+{
+	CPP_DBG("%s\n", __func__);
+	return v4l2_event_unsubscribe(fh, sub);
+}
+
+static struct v4l2_subdev_core_ops msm_cpp_subdev_core_ops = {
+	.ioctl = msm_cpp_subdev_ioctl,
+	.subscribe_event = msm_cpp_subscribe_event,
+	.unsubscribe_event = msm_cpp_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_ops msm_cpp_subdev_ops = {
+	.core = &msm_cpp_subdev_core_ops,
+};
+
+static int msm_cpp_enable_debugfs(struct cpp_device *cpp_dev);
+
+static struct v4l2_file_operations msm_cpp_v4l2_subdev_fops;
+
+static long msm_cpp_subdev_do_ioctl(
+	struct file *file, unsigned int cmd, void *arg)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+	struct v4l2_fh *vfh = file->private_data;
+
+	switch (cmd) {
+	case VIDIOC_DQEVENT:
+		if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+			return -ENOIOCTLCMD;
+
+		return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
+
+	case VIDIOC_SUBSCRIBE_EVENT:
+		return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
+
+	case VIDIOC_UNSUBSCRIBE_EVENT:
+		return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
+
+	case VIDIOC_MSM_CPP_GET_INST_INFO: {
+		uint32_t i;
+		struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
+		struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
+		struct msm_cpp_frame_info_t inst_info;
+		for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
+			if (cpp_dev->cpp_subscribe_list[i].vfh == vfh) {
+				inst_info.inst_id = i;
+				break;
+			}
+		}
+		if (copy_to_user(
+				(void __user *)ioctl_ptr->ioctl_ptr, &inst_info,
+				sizeof(struct msm_cpp_frame_info_t))) {
+			return -EINVAL;
+		}
+	}
+	break;
+	default:
+		return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
+	}
+
+	return 0;
+}
+
+static long msm_cpp_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+	unsigned long arg)
+{
+	return video_usercopy(file, cmd, arg, msm_cpp_subdev_do_ioctl);
+}
+
+static int __devinit cpp_probe(struct platform_device *pdev)
+{
+	struct cpp_device *cpp_dev;
+	struct msm_cam_subdev_info sd_info;
+	int rc = 0;
+	CDBG("%s: device id = %d\n", __func__, pdev->id);
+	cpp_dev = kzalloc(sizeof(struct cpp_device), GFP_KERNEL);
+	if (!cpp_dev) {
+		pr_err("%s: no enough memory\n", __func__);
+		return -ENOMEM;
+	}
+	v4l2_subdev_init(&cpp_dev->subdev, &msm_cpp_subdev_ops);
+	cpp_dev->subdev.internal_ops = &msm_cpp_internal_ops;
+	snprintf(cpp_dev->subdev.name, ARRAY_SIZE(cpp_dev->subdev.name),
+		 "cpp");
+	cpp_dev->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+	cpp_dev->subdev.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+	v4l2_set_subdevdata(&cpp_dev->subdev, cpp_dev);
+	platform_set_drvdata(pdev, &cpp_dev->subdev);
+	mutex_init(&cpp_dev->mutex);
+
+	cpp_dev->pdev = pdev;
+
+	media_entity_init(&cpp_dev->subdev.entity, 0, NULL, 0);
+	cpp_dev->subdev.entity.type = MEDIA_ENT_T_DEVNODE_V4L;
+	cpp_dev->subdev.entity.group_id = CPP_DEV;
+	cpp_dev->subdev.entity.name = pdev->name;
+	sd_info.sdev_type = CPP_DEV;
+	sd_info.sd_index = pdev->id;
+	msm_cam_register_subdev_node(&cpp_dev->subdev, &sd_info);
+	msm_cpp_v4l2_subdev_fops.owner = v4l2_subdev_fops.owner;
+	msm_cpp_v4l2_subdev_fops.open = v4l2_subdev_fops.open;
+	msm_cpp_v4l2_subdev_fops.unlocked_ioctl = msm_cpp_subdev_fops_ioctl;
+	msm_cpp_v4l2_subdev_fops.release = v4l2_subdev_fops.release;
+	msm_cpp_v4l2_subdev_fops.poll = v4l2_subdev_fops.poll;
+
+	cpp_dev->subdev.devnode->fops = &msm_cpp_v4l2_subdev_fops;
+	cpp_dev->subdev.entity.revision = cpp_dev->subdev.devnode->num;
+	msm_cpp_enable_debugfs(cpp_dev);
+	msm_queue_init(&cpp_dev->eventData_q, "eventdata");
+	msm_queue_init(&cpp_dev->offline_q, "frame");
+	msm_queue_init(&cpp_dev->realtime_q, "frame");
+	msm_queue_init(&cpp_dev->processing_q, "frame");
+	cpp_dev->cpp_open_cnt = 0;
+
+	return rc;
+}
+
+static struct platform_driver cpp_driver = {
+	.probe = cpp_probe,
+	.driver = {
+		.name = MSM_CPP_DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init msm_cpp_init_module(void)
+{
+	return platform_driver_register(&cpp_driver);
+}
+
+static void __exit msm_cpp_exit_module(void)
+{
+	platform_driver_unregister(&cpp_driver);
+}
+
+static int msm_cpp_debugfs_stream_s(void *data, u64 val)
+{
+	struct cpp_device *cpp_dev = data;
+	CPP_DBG("CPP processing frame E\n");
+	while (1) {
+		mutex_lock(&cpp_dev->mutex);
+		msm_cpp_notify_frame_done(cpp_dev);
+		msm_cpp_send_frame_to_hardware(cpp_dev);
+		mutex_unlock(&cpp_dev->mutex);
+		msleep(20);
+	}
+	CPP_DBG("CPP processing frame X\n");
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cpp_debugfs_stream, NULL,
+			msm_cpp_debugfs_stream_s, "%llu\n");
+
+static int msm_cpp_enable_debugfs(struct cpp_device *cpp_dev)
+{
+	struct dentry *debugfs_base;
+	debugfs_base = debugfs_create_dir("msm_camera", NULL);
+	if (!debugfs_base)
+		return -ENOMEM;
+
+	if (!debugfs_create_file("test", S_IRUGO | S_IWUSR, debugfs_base,
+			(void *)cpp_dev, &cpp_debugfs_stream))
+		return -ENOMEM;
+
+	return 0;
+}
+
+module_init(msm_cpp_init_module);
+module_exit(msm_cpp_exit_module);
+MODULE_DESCRIPTION("MSM CPP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/cpp/msm_cpp.h b/drivers/media/video/msm/cpp/msm_cpp.h
new file mode 100644
index 0000000..8c10cac
--- /dev/null
+++ b/drivers/media/video/msm/cpp/msm_cpp.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <media/v4l2-subdev.h>
+
+#define MAX_ACTIVE_CPP_INSTANCE 8
+#define MAX_CPP_PROCESSING_FRAME 2
+#define MAX_CPP_V4l2_EVENTS 30
+
+#define MSM_CPP_MICRO_BASE          0x4000
+#define MSM_CPP_MICRO_HW_VERSION    0x0000
+#define MSM_CPP_MICRO_IRQGEN_STAT   0x0004
+#define MSM_CPP_MICRO_IRQGEN_CLR    0x0008
+#define MSM_CPP_MICRO_IRQGEN_MASK   0x000C
+#define MSM_CPP_MICRO_FIFO_TX_DATA  0x0010
+#define MSM_CPP_MICRO_FIFO_TX_STAT  0x0014
+#define MSM_CPP_MICRO_FIFO_RX_DATA  0x0018
+#define MSM_CPP_MICRO_FIFO_RX_STAT  0x001C
+#define MSM_CPP_MICRO_BOOT_START    0x0020
+#define MSM_CPP_MICRO_BOOT_LDORG    0x0024
+#define MSM_CPP_MICRO_CLKEN_CTL     0x0030
+
+struct cpp_subscribe_info {
+	struct v4l2_fh *vfh;
+	uint32_t active;
+};
+
+struct cpp_device {
+	struct platform_device *pdev;
+	struct v4l2_subdev subdev;
+	struct resource *mem;
+	struct resource *irq;
+	struct resource *io;
+	void __iomem *base;
+	struct clk *cpp_clk[2];
+	struct mutex mutex;
+
+	struct cpp_subscribe_info cpp_subscribe_list[MAX_ACTIVE_CPP_INSTANCE];
+	uint32_t cpp_open_cnt;
+
+	struct msm_device_queue eventData_q; /*V4L2 Event Payload Queue*/
+
+	/*Offline Frame Queue
+	  process when realtime queue is empty*/
+	struct msm_device_queue offline_q;
+	/*Realtime Frame Queue
+	  process with highest priority*/
+	struct msm_device_queue realtime_q;
+	/*Processing Queue
+	  store frame info for frames sent to microcontroller*/
+	struct msm_device_queue processing_q;
+};
+
diff --git a/drivers/media/video/msm/gemini/msm_gemini_platform.c b/drivers/media/video/msm/gemini/msm_gemini_platform.c
index d7f6bd8..28d2439 100644
--- a/drivers/media/video/msm/gemini/msm_gemini_platform.c
+++ b/drivers/media/video/msm/gemini/msm_gemini_platform.c
@@ -47,7 +47,7 @@
 	unsigned long size;
 	int rc;
 #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
-	*ionhandle = ion_import_fd(gemini_client, fd);
+	*ionhandle = ion_import_dma_buf(gemini_client, fd);
 	if (IS_ERR_OR_NULL(*ionhandle))
 		return 0;
 
diff --git a/drivers/media/video/msm/gemini/msm_gemini_sync.c b/drivers/media/video/msm/gemini/msm_gemini_sync.c
index b55ec18..ae3de13 100644
--- a/drivers/media/video/msm/gemini/msm_gemini_sync.c
+++ b/drivers/media/video/msm/gemini/msm_gemini_sync.c
@@ -280,6 +280,7 @@
 		GMN_DBG("%s:%d] no output return buffer\n", __func__,
 			__LINE__);
 		rc = -1;
+		return rc;
 	}
 
 	buf_out = msm_gemini_q_out(&pgmn_dev->output_buf_q);
diff --git a/drivers/media/video/msm/io/msm_camera_i2c.c b/drivers/media/video/msm/io/msm_camera_i2c.c
index cecf9b0..e946569 100644
--- a/drivers/media/video/msm/io/msm_camera_i2c.c
+++ b/drivers/media/video/msm/io/msm_camera_i2c.c
@@ -267,6 +267,35 @@
 	return rc;
 }
 
+int32_t msm_camera_i2c_write_table_w_microdelay(
+	struct msm_camera_i2c_client *client,
+	struct msm_camera_i2c_reg_tbl *reg_tbl, uint16_t size,
+	enum msm_camera_i2c_data_type data_type)
+{
+	int i;
+	int32_t rc = -EFAULT;
+
+	if (!client || !reg_tbl)
+		return rc;
+
+	if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+		&& client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+		|| (data_type != MSM_CAMERA_I2C_BYTE_DATA
+		&& data_type != MSM_CAMERA_I2C_WORD_DATA))
+		return rc;
+
+	for (i = 0; i < size; i++) {
+		rc = msm_camera_i2c_write(client, reg_tbl->reg_addr,
+			reg_tbl->reg_data, data_type);
+		if (rc < 0)
+			break;
+		if (reg_tbl->delay)
+			usleep_range(reg_tbl->delay, reg_tbl->delay + 1000);
+		reg_tbl++;
+	}
+	return rc;
+}
+
 int32_t msm_camera_i2c_write_tbl(struct msm_camera_i2c_client *client,
 	struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
 	enum msm_camera_i2c_data_type data_type)
diff --git a/drivers/media/video/msm/io/msm_camera_i2c.h b/drivers/media/video/msm/io/msm_camera_i2c.h
index 01c8259..a0cdd77 100644
--- a/drivers/media/video/msm/io/msm_camera_i2c.h
+++ b/drivers/media/video/msm/io/msm_camera_i2c.h
@@ -58,6 +58,12 @@
 	enum msm_camera_i2c_cmd_type cmd_type;
 };
 
+struct msm_camera_i2c_reg_tbl {
+	uint16_t reg_addr;
+	uint16_t reg_data;
+	uint16_t delay;
+};
+
 struct msm_camera_i2c_conf_array {
 	struct msm_camera_i2c_reg_conf *conf;
 	uint16_t size;
@@ -107,6 +113,11 @@
 	uint16_t addr, uint16_t data,
 	enum msm_camera_i2c_data_type data_type);
 
+int32_t msm_camera_i2c_write_table_w_microdelay(
+	struct msm_camera_i2c_client *client,
+	struct msm_camera_i2c_reg_tbl *reg_tbl, uint16_t size,
+	enum msm_camera_i2c_data_type data_type);
+
 int32_t msm_camera_i2c_write_tbl(struct msm_camera_i2c_client *client,
 	struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
 	enum msm_camera_i2c_data_type data_type);
diff --git a/drivers/media/video/msm/mercury/msm_mercury_platform.c b/drivers/media/video/msm/mercury/msm_mercury_platform.c
index 9366ef3..e90c63c 100644
--- a/drivers/media/video/msm/mercury/msm_mercury_platform.c
+++ b/drivers/media/video/msm/mercury/msm_mercury_platform.c
@@ -11,7 +11,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/pm_qos_params.h>
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/android_pmem.h>
@@ -53,7 +52,7 @@
 	unsigned long size;
 	int rc;
 #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
-	*ionhandle = ion_import_fd(mercury_client, fd);
+	*ionhandle = ion_import_dma_buf(mercury_client, fd);
 	if (IS_ERR_OR_NULL(*ionhandle))
 		return 0;
 
diff --git a/drivers/media/video/msm/msm.c b/drivers/media/video/msm/msm.c
index d34b8e1..b67af4f 100644
--- a/drivers/media/video/msm/msm.c
+++ b/drivers/media/video/msm/msm.c
@@ -166,17 +166,22 @@
 		struct msm_cam_v4l2_dev_inst, eventHandle);
 	D("%s\n", __func__);
 	WARN_ON(pctx != f->private_data);
+
+	mutex_lock(&pcam_inst->inst_lock);
 	rc = vb2_reqbufs(&pcam_inst->vid_bufq, pb);
 	if (rc < 0) {
 		pr_err("%s reqbufs failed %d ", __func__, rc);
+		mutex_unlock(&pcam_inst->inst_lock);
 		return rc;
 	}
 	if (!pb->count) {
 		/* Deallocation. free buf_offset array */
 		D("%s Inst %p freeing buffer offsets array",
 			__func__, pcam_inst);
-		for (j = 0 ; j < pcam_inst->buf_count ; j++)
+		for (j = 0 ; j < pcam_inst->buf_count ; j++) {
 			kfree(pcam_inst->buf_offset[j]);
+			pcam_inst->buf_offset[j] = NULL;
+		}
 		kfree(pcam_inst->buf_offset);
 		pcam_inst->buf_offset = NULL;
 		/* If the userspace has deallocated all the
@@ -194,6 +199,7 @@
 							GFP_KERNEL);
 		if (!pcam_inst->buf_offset) {
 			pr_err("%s out of memory ", __func__);
+			mutex_unlock(&pcam_inst->inst_lock);
 			return -ENOMEM;
 		}
 		for (i = 0; i < pb->count; i++) {
@@ -202,15 +208,19 @@
 				pcam_inst->plane_info.num_planes, GFP_KERNEL);
 			if (!pcam_inst->buf_offset[i]) {
 				pr_err("%s out of memory ", __func__);
-				for (j = i-1 ; j >= 0; j--)
+				for (j = i-1 ; j >= 0; j--) {
 					kfree(pcam_inst->buf_offset[j]);
+					pcam_inst->buf_offset[j] = NULL;
+				}
 				kfree(pcam_inst->buf_offset);
 				pcam_inst->buf_offset = NULL;
+				mutex_unlock(&pcam_inst->inst_lock);
 				return -ENOMEM;
 			}
 		}
 	}
 	pcam_inst->buf_count = pb->count;
+	mutex_unlock(&pcam_inst->inst_lock);
 	return rc;
 }
 
@@ -218,13 +228,17 @@
 					struct v4l2_buffer *pb)
 {
 	/* get the video device */
+	int rc = 0;
 	struct msm_cam_v4l2_dev_inst *pcam_inst;
 	pcam_inst = container_of(f->private_data,
 		struct msm_cam_v4l2_dev_inst, eventHandle);
 
 	D("%s\n", __func__);
 	WARN_ON(pctx != f->private_data);
-	return vb2_querybuf(&pcam_inst->vid_bufq, pb);
+	mutex_lock(&pcam_inst->inst_lock);
+	rc = vb2_querybuf(&pcam_inst->vid_bufq, pb);
+	mutex_unlock(&pcam_inst->inst_lock);
+	return rc;
 }
 
 static int msm_camera_v4l2_qbuf(struct file *f, void *pctx,
@@ -240,8 +254,10 @@
 		pcam_inst->image_mode, pb->index);
 	WARN_ON(pctx != f->private_data);
 
+	mutex_lock(&pcam_inst->inst_lock);
 	if (!pcam_inst->buf_offset) {
 		pr_err("%s Buffer is already released. Returning.\n", __func__);
+		mutex_unlock(&pcam_inst->inst_lock);
 		return -EINVAL;
 	}
 
@@ -249,6 +265,7 @@
 		/* Reject the buffer if planes array was not allocated */
 		if (pb->m.planes == NULL) {
 			pr_err("%s Planes array is null\n", __func__);
+			mutex_unlock(&pcam_inst->inst_lock);
 			return -EINVAL;
 		}
 		for (i = 0; i < pcam_inst->plane_info.num_planes; i++) {
@@ -269,7 +286,7 @@
 	rc = vb2_qbuf(&pcam_inst->vid_bufq, pb);
 	D("%s, videobuf_qbuf mode %d and idx %d returns %d\n", __func__,
 		pcam_inst->image_mode, pb->index, rc);
-
+	mutex_unlock(&pcam_inst->inst_lock);
 	return rc;
 }
 
@@ -285,6 +302,11 @@
 	D("%s\n", __func__);
 	WARN_ON(pctx != f->private_data);
 
+	mutex_lock(&pcam_inst->inst_lock);
+	if (0 == pcam_inst->streamon) {
+		mutex_unlock(&pcam_inst->inst_lock);
+		return -EACCES;
+	}
 	rc = vb2_dqbuf(&pcam_inst->vid_bufq, pb,  f->f_flags & O_NONBLOCK);
 	D("%s, videobuf_dqbuf returns %d\n", __func__, rc);
 
@@ -292,6 +314,7 @@
 		/* Reject the buffer if planes array was not allocated */
 		if (pb->m.planes == NULL) {
 			pr_err("%s Planes array is null\n", __func__);
+			mutex_unlock(&pcam_inst->inst_lock);
 			return -EINVAL;
 		}
 		for (i = 0; i < pcam_inst->plane_info.num_planes; i++) {
@@ -309,6 +332,7 @@
 		pb->reserved = pcam_inst->buf_offset[pb->index][0].addr_offset;
 	}
 
+	mutex_unlock(&pcam_inst->inst_lock);
 	return rc;
 }
 
@@ -325,9 +349,13 @@
 	D("%s Inst %p\n", __func__, pcam_inst);
 	WARN_ON(pctx != f->private_data);
 
+	mutex_lock(&pcam->vid_lock);
+	mutex_lock(&pcam_inst->inst_lock);
 	if ((buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
 		(buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)) {
 		pr_err("%s Invalid buffer type ", __func__);
+		mutex_unlock(&pcam_inst->inst_lock);
+		mutex_unlock(&pcam->vid_lock);
 		return -EINVAL;
 	}
 
@@ -336,10 +364,10 @@
 	rc = vb2_streamon(&pcam_inst->vid_bufq, buf_type);
 	D("%s, videobuf_streamon returns %d\n", __func__, rc);
 
-	mutex_lock(&pcam->vid_lock);
 	/* turn HW (VFE/sensor) streaming */
 	pcam_inst->streamon = 1;
 	rc = msm_server_streamon(pcam, pcam_inst->my_index);
+	mutex_unlock(&pcam_inst->inst_lock);
 	mutex_unlock(&pcam->vid_lock);
 	D("%s rc = %d\n", __func__, rc);
 	return rc;
@@ -367,16 +395,20 @@
 	/* first turn of HW (VFE/sensor) streaming so that buffers are
 		not in use when we free the buffers */
 	mutex_lock(&pcam->vid_lock);
+	mutex_lock(&pcam_inst->inst_lock);
 	pcam_inst->streamon = 0;
 	if (msm_server_get_usecount() > 0)
 		rc = msm_server_streamoff(pcam, pcam_inst->my_index);
-	mutex_unlock(&pcam->vid_lock);
+
 	if (rc < 0)
 		pr_err("%s: hw failed to stop streaming\n", __func__);
 
 	/* stop buffer streaming */
 	rc = vb2_streamoff(&pcam_inst->vid_bufq, buf_type);
 	D("%s, videobuf_streamoff returns %d\n", __func__, rc);
+
+	mutex_unlock(&pcam_inst->inst_lock);
+	mutex_unlock(&pcam->vid_lock);
 	return rc;
 }
 
@@ -466,11 +498,13 @@
 	D("%s\n", __func__);
 	WARN_ON(pctx != f->private_data);
 
+	mutex_lock(&pcam->vid_lock);
 	rc = msm_server_try_fmt(pcam, pfmt);
 	if (rc)
 		pr_err("Format %x not found, rc = %d\n",
 				pfmt->fmt.pix.pixelformat, rc);
 
+	mutex_unlock(&pcam->vid_lock);
 	return rc;
 }
 
@@ -484,11 +518,13 @@
 	D("%s\n", __func__);
 	WARN_ON(pctx != f->private_data);
 
+	mutex_lock(&pcam->vid_lock);
 	rc = msm_server_try_fmt_mplane(pcam, pfmt);
 	if (rc)
 		pr_err("Format %x not found, rc = %d\n",
 				pfmt->fmt.pix_mp.pixelformat, rc);
 
+	mutex_unlock(&pcam->vid_lock);
 	return rc;
 }
 
@@ -632,6 +668,10 @@
 		return OUTPUT_TYPE_S;
 	case MSM_V4L2_EXT_CAPTURE_MODE_VIDEO:
 		return OUTPUT_TYPE_V;
+	case MSM_V4L2_EXT_CAPTURE_MODE_RDI:
+		return OUTPUT_TYPE_R;
+	case MSM_V4L2_EXT_CAPTURE_MODE_RDI1:
+		return OUTPUT_TYPE_R1;
 	case MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT:
 	case MSM_V4L2_EXT_CAPTURE_MODE_PREVIEW:
 	default:
@@ -781,10 +821,6 @@
 			break;
 	}
 
-	server_q_idx = msm_find_free_queue();
-	if (server_q_idx < 0)
-		return server_q_idx;
-
 	/* if no instance is available, return error */
 	if (i == MSM_DEV_INST_MAX) {
 		mutex_unlock(&pcam->vid_lock);
@@ -795,6 +831,7 @@
 		mutex_unlock(&pcam->vid_lock);
 		return rc;
 	}
+	mutex_init(&pcam_inst->inst_lock);
 	pcam_inst->sensor_pxlcode = pcam->usr_fmts[0].pxlcode;
 	pcam_inst->my_index = i;
 	pcam_inst->pcam = pcam;
@@ -806,6 +843,10 @@
 	pcam->use_count++;
 	D("%s use_count %d\n", __func__, pcam->use_count);
 	if (pcam->use_count == 1) {
+		server_q_idx = msm_find_free_queue();
+		if (server_q_idx < 0)
+			return server_q_idx;
+
 		rc = msm_server_begin_session(pcam, server_q_idx);
 		if (rc < 0) {
 			pr_err("%s error starting server session ", __func__);
@@ -887,7 +928,9 @@
 		pcam->dev_inst[i] = NULL;
 		pcam->use_count = 0;
 	}
+	pcam->dev_inst[i] = NULL;
 	mutex_unlock(&pcam->vid_lock);
+	mutex_destroy(&pcam_inst->inst_lock);
 	kfree(pcam_inst);
 	pr_err("%s: error end", __func__);
 	return rc;
@@ -986,6 +1029,7 @@
 	}
 
 	mutex_lock(&pcam->vid_lock);
+	mutex_lock(&pcam_inst->inst_lock);
 
 	if (pcam_inst->streamon) {
 		/*something went wrong since instance
@@ -1011,6 +1055,8 @@
 		v4l2_fh_del(&pcam_inst->eventHandle);
 		v4l2_fh_exit(&pcam_inst->eventHandle);
 	}
+	mutex_unlock(&pcam_inst->inst_lock);
+	mutex_destroy(&pcam_inst->inst_lock);
 	kfree(pcam_inst);
 	f->private_data = NULL;
 
diff --git a/drivers/media/video/msm/msm.h b/drivers/media/video/msm/msm.h
index d0322d1..c3e8321 100644
--- a/drivers/media/video/msm/msm.h
+++ b/drivers/media/video/msm/msm.h
@@ -59,6 +59,7 @@
 #define MSM_MERCURY_DRV_NAME "msm_mercury"
 #define MSM_I2C_MUX_DRV_NAME "msm_cam_i2c_mux"
 #define MSM_IRQ_ROUTER_DRV_NAME "msm_cam_irq_router"
+#define MSM_CPP_DRV_NAME "msm_cpp"
 
 #define MAX_NUM_CSIPHY_DEV 3
 #define MAX_NUM_CSID_DEV 4
@@ -68,6 +69,7 @@
 #define MAX_NUM_AXI_DEV 2
 #define MAX_NUM_VPE_DEV 1
 #define MAX_NUM_JPEG_DEV 3
+#define MAX_NUM_CPP_DEV 1
 
 enum msm_cam_subdev_type {
 	CSIPHY_DEV,
@@ -82,6 +84,7 @@
 	EEPROM_DEV,
 	GESTURE_DEV,
 	IRQ_ROUTER_DEV,
+	CPP_DEV,
 };
 
 /* msm queue management APIs*/
@@ -155,6 +158,7 @@
 	NOTIFY_VFE_MSG_STATS,  /* arg = struct isp_msg_stats */
 	NOTIFY_VFE_MSG_COMP_STATS, /* arg = struct msm_stats_buf */
 	NOTIFY_VFE_BUF_EVT, /* arg = struct msm_vfe_resp */
+	NOTIFY_VFE_CAMIF_ERROR,
 	NOTIFY_PCLK_CHANGE, /* arg = pclk */
 	NOTIFY_CSIPHY_CFG, /* arg = msm_camera_csiphy_params */
 	NOTIFY_CSID_CFG, /* arg = msm_camera_csid_params */
@@ -178,7 +182,7 @@
 
 struct msm_cam_v4l2_device;
 struct msm_cam_v4l2_dev_inst;
-#define MSM_MAX_IMG_MODE                8
+#define MSM_MAX_IMG_MODE                MSM_V4L2_EXT_CAPTURE_MODE_MAX
 
 enum msm_buffer_state {
 	MSM_BUFFER_STATE_UNUSED,
@@ -261,6 +265,7 @@
 	struct v4l2_subdev *vpe_sdev; /* vpe sub device */
 	struct v4l2_subdev *axi_sdev; /* axi sub device */
 	struct v4l2_subdev *eeprom_sdev; /* eeprom sub device */
+	struct v4l2_subdev *cpp_sdev;/*cpp sub device*/
 
 	struct msm_isp_ops *isp_sdev;    /* isp sub device : camif/VFE */
 	struct msm_cam_config_dev *config_device;
@@ -343,6 +348,7 @@
 	int is_mem_map_inst;
 	struct img_plane_info plane_info;
 	int vbqueue_initialized;
+	struct mutex inst_lock;
 };
 
 struct msm_cam_mctl_node {
@@ -540,6 +546,7 @@
 	struct v4l2_subdev *axi_device[MAX_NUM_AXI_DEV];
 	struct v4l2_subdev *vpe_device[MAX_NUM_VPE_DEV];
 	struct v4l2_subdev *gesture_device;
+	struct v4l2_subdev *cpp_device[MAX_NUM_CPP_DEV];
 	struct v4l2_subdev *irqr_device;
 
 	spinlock_t  intr_table_lock;
diff --git a/drivers/media/video/msm/msm_camera.c b/drivers/media/video/msm/msm_camera.c
index 9f32bfe..2f1d1ab 100644
--- a/drivers/media/video/msm/msm_camera.c
+++ b/drivers/media/video/msm/msm_camera.c
@@ -314,7 +314,7 @@
 	if (!region)
 		goto out;
 #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
-		region->handle = ion_import_fd(client_for_ion, info->fd);
+		region->handle = ion_import_dma_buf(client_for_ion, info->fd);
 		if (IS_ERR_OR_NULL(region->handle))
 			goto out1;
 		ion_phys(client_for_ion, region->handle,
diff --git a/drivers/media/video/msm/msm_isp.c b/drivers/media/video/msm/msm_isp.c
index 9b42f9b..5c61cdb 100644
--- a/drivers/media/video/msm/msm_isp.c
+++ b/drivers/media/video/msm/msm_isp.c
@@ -85,8 +85,10 @@
 				int vfe_msg)
 {
 	int image_mode;
+	uint32_t vfe_output_mode = pmctl->vfe_output_mode;
+	vfe_output_mode &= ~(VFE_OUTPUTS_RDI0|VFE_OUTPUTS_RDI1);
 	if (vfe_msg == VFE_MSG_OUTPUT_PRIMARY) {
-		switch (pmctl->vfe_output_mode) {
+		switch (vfe_output_mode) {
 		case VFE_OUTPUTS_MAIN_AND_PREVIEW:
 		case VFE_OUTPUTS_MAIN_AND_VIDEO:
 		case VFE_OUTPUTS_MAIN_AND_THUMB:
@@ -110,7 +112,7 @@
 			break;
 		}
 	} else if (vfe_msg == VFE_MSG_OUTPUT_SECONDARY) {
-		switch (pmctl->vfe_output_mode) {
+		switch (vfe_output_mode) {
 		case VFE_OUTPUTS_MAIN_AND_PREVIEW:
 		case VFE_OUTPUTS_VIDEO_AND_PREVIEW:
 			image_mode = MSM_V4L2_EXT_CAPTURE_MODE_PREVIEW;
@@ -137,14 +139,15 @@
 			break;
 		}
 	} else if (vfe_msg == VFE_MSG_OUTPUT_TERTIARY1) {
-		switch (pmctl->vfe_output_mode) {
-		case VFE_OUTPUTS_RDI0:
-			image_mode = MSM_V4L2_EXT_CAPTURE_MODE_PREVIEW;
-			break;
-		default:
+		if (pmctl->vfe_output_mode & VFE_OUTPUTS_RDI0)
+			image_mode = MSM_V4L2_EXT_CAPTURE_MODE_RDI;
+		else
 			image_mode = -1;
-			break;
-		}
+	} else if (vfe_msg == VFE_MSG_OUTPUT_TERTIARY2) {
+		if (pmctl->vfe_output_mode & VFE_OUTPUTS_RDI1)
+			image_mode = MSM_V4L2_EXT_CAPTURE_MODE_RDI1;
+		else
+			image_mode = -1;
 	} else
 		image_mode = -1;
 
@@ -343,6 +346,10 @@
 			case MSG_ID_OUTPUT_TERTIARY1:
 				msgid = VFE_MSG_OUTPUT_TERTIARY1;
 				break;
+			case MSG_ID_OUTPUT_TERTIARY2:
+				msgid = VFE_MSG_OUTPUT_TERTIARY2;
+				break;
+
 			default:
 				pr_err("%s: Invalid VFE output id: %d\n",
 					   __func__, isp_output->output_id);
@@ -686,6 +693,7 @@
 	case CMD_AXI_START:
 	case CMD_AXI_STOP:
 	case CMD_AXI_CFG_TERT1:
+	case CMD_AXI_CFG_TERT2:
 		/* Dont need to pass buffer information.
 		 * subdev will get the buffer from media
 		 * controller free queue.
diff --git a/drivers/media/video/msm/msm_mctl.c b/drivers/media/video/msm/msm_mctl.c
index cdfad3b..be6c543 100644
--- a/drivers/media/video/msm/msm_mctl.c
+++ b/drivers/media/video/msm/msm_mctl.c
@@ -897,6 +897,7 @@
 	pcam_inst->sensor_pxlcode = pcam->usr_fmts[0].pxlcode;
 	pcam_inst->my_index = i;
 	pcam_inst->pcam = pcam;
+	mutex_init(&pcam_inst->inst_lock);
 	pcam->mctl_node.dev_inst[i] = pcam_inst;
 
 	D("%s pcam_inst %p my_index = %d\n", __func__,
@@ -1006,6 +1007,7 @@
 	pcam->mctl_node.dev_inst[pcam_inst->my_index] = NULL;
 	v4l2_fh_del(&pcam_inst->eventHandle);
 	v4l2_fh_exit(&pcam_inst->eventHandle);
+	mutex_destroy(&pcam_inst->inst_lock);
 
 	kfree(pcam_inst);
 	if (NULL != pmctl) {
@@ -1123,17 +1125,22 @@
 		struct msm_cam_v4l2_dev_inst, eventHandle);
 	D("%s\n", __func__);
 	WARN_ON(pctx != f->private_data);
+
+	mutex_lock(&pcam_inst->inst_lock);
 	rc = vb2_reqbufs(&pcam_inst->vid_bufq, pb);
 	if (rc < 0) {
 		pr_err("%s reqbufs failed %d ", __func__, rc);
+		mutex_unlock(&pcam_inst->inst_lock);
 		return rc;
 	}
 	if (!pb->count) {
 		/* Deallocation. free buf_offset array */
 		D("%s Inst %p freeing buffer offsets array",
 			__func__, pcam_inst);
-		for (j = 0 ; j < pcam_inst->buf_count ; j++)
+		for (j = 0 ; j < pcam_inst->buf_count ; j++) {
 			kfree(pcam_inst->buf_offset[j]);
+			pcam_inst->buf_offset[j] = NULL;
+		}
 		kfree(pcam_inst->buf_offset);
 		pcam_inst->buf_offset = NULL;
 		/* If the userspace has deallocated all the
@@ -1151,6 +1158,7 @@
 							GFP_KERNEL);
 		if (!pcam_inst->buf_offset) {
 			pr_err("%s out of memory ", __func__);
+			mutex_unlock(&pcam_inst->inst_lock);
 			return -ENOMEM;
 		}
 		for (i = 0; i < pb->count; i++) {
@@ -1159,10 +1167,13 @@
 				pcam_inst->plane_info.num_planes, GFP_KERNEL);
 			if (!pcam_inst->buf_offset[i]) {
 				pr_err("%s out of memory ", __func__);
-				for (j = i-1 ; j >= 0; j--)
+				for (j = i-1 ; j >= 0; j--) {
 					kfree(pcam_inst->buf_offset[j]);
+					pcam_inst->buf_offset[j] = NULL;
+				}
 				kfree(pcam_inst->buf_offset);
 				pcam_inst->buf_offset = NULL;
+				mutex_unlock(&pcam_inst->inst_lock);
 				return -ENOMEM;
 			}
 		}
@@ -1170,6 +1181,7 @@
 	pcam_inst->buf_count = pb->count;
 	D("%s inst %p, buf count %d ", __func__,
 		pcam_inst, pcam_inst->buf_count);
+	mutex_unlock(&pcam_inst->inst_lock);
 	return rc;
 }
 
@@ -1177,13 +1189,17 @@
 					struct v4l2_buffer *pb)
 {
 	/* get the video device */
+	int rc = 0;
 	struct msm_cam_v4l2_dev_inst *pcam_inst;
 	pcam_inst = container_of(f->private_data,
 		struct msm_cam_v4l2_dev_inst, eventHandle);
 
 	D("%s\n", __func__);
 	WARN_ON(pctx != f->private_data);
-	return vb2_querybuf(&pcam_inst->vid_bufq, pb);
+	mutex_lock(&pcam_inst->inst_lock);
+	rc = vb2_querybuf(&pcam_inst->vid_bufq, pb);
+	mutex_unlock(&pcam_inst->inst_lock);
+	return rc;
 }
 
 static int msm_mctl_v4l2_qbuf(struct file *f, void *pctx,
@@ -1198,8 +1214,10 @@
 	D("%s Inst = %p\n", __func__, pcam_inst);
 	WARN_ON(pctx != f->private_data);
 
+	mutex_lock(&pcam_inst->inst_lock);
 	if (!pcam_inst->buf_offset) {
 		pr_err("%s Buffer is already released. Returning. ", __func__);
+		mutex_unlock(&pcam_inst->inst_lock);
 		return -EINVAL;
 	}
 
@@ -1207,6 +1225,7 @@
 		/* Reject the buffer if planes array was not allocated */
 		if (pb->m.planes == NULL) {
 			pr_err("%s Planes array is null ", __func__);
+			mutex_unlock(&pcam_inst->inst_lock);
 			return -EINVAL;
 		}
 		for (i = 0; i < pcam_inst->plane_info.num_planes; i++) {
@@ -1232,6 +1251,7 @@
 	rc = vb2_qbuf(&pcam_inst->vid_bufq, pb);
 	D("%s, videobuf_qbuf returns %d\n", __func__, rc);
 
+	mutex_unlock(&pcam_inst->inst_lock);
 	return rc;
 }
 
@@ -1246,10 +1266,16 @@
 
 	D("%s\n", __func__);
 	WARN_ON(pctx != f->private_data);
+	mutex_lock(&pcam_inst->inst_lock);
+	if (0 == pcam_inst->streamon) {
+		mutex_unlock(&pcam_inst->inst_lock);
+		return -EACCES;
+	}
 
 	rc = vb2_dqbuf(&pcam_inst->vid_bufq, pb,  f->f_flags & O_NONBLOCK);
 	D("%s, videobuf_dqbuf returns %d\n", __func__, rc);
 
+	mutex_unlock(&pcam_inst->inst_lock);
 	return rc;
 }
 
@@ -1266,9 +1292,13 @@
 	D("%s Inst %p\n", __func__, pcam_inst);
 	WARN_ON(pctx != f->private_data);
 
+	mutex_lock(&pcam->mctl_node.dev_lock);
+	mutex_lock(&pcam_inst->inst_lock);
 	if ((buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
 		(buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)) {
 		pr_err("%s Invalid buffer type ", __func__);
+		mutex_unlock(&pcam_inst->inst_lock);
+		mutex_unlock(&pcam->mctl_node.dev_lock);
 		return -EINVAL;
 	}
 
@@ -1277,9 +1307,9 @@
 	rc = vb2_streamon(&pcam_inst->vid_bufq, buf_type);
 	D("%s, videobuf_streamon returns %d\n", __func__, rc);
 
-	mutex_lock(&pcam->mctl_node.dev_lock);
 	/* turn HW (VFE/sensor) streaming */
 	pcam_inst->streamon = 1;
+	mutex_unlock(&pcam_inst->inst_lock);
 	mutex_unlock(&pcam->mctl_node.dev_lock);
 	D("%s rc = %d\n", __func__, rc);
 	return rc;
@@ -1307,14 +1337,16 @@
 	/* first turn of HW (VFE/sensor) streaming so that buffers are
 		not in use when we free the buffers */
 	mutex_lock(&pcam->mctl_node.dev_lock);
+	mutex_lock(&pcam_inst->inst_lock);
 	pcam_inst->streamon = 0;
-	mutex_unlock(&pcam->mctl_node.dev_lock);
 	if (rc < 0)
 		pr_err("%s: hw failed to stop streaming\n", __func__);
 
 	/* stop buffer streaming */
 	rc = vb2_streamoff(&pcam_inst->vid_bufq, buf_type);
 	D("%s, videobuf_streamoff returns %d\n", __func__, rc);
+	mutex_unlock(&pcam_inst->inst_lock);
+	mutex_unlock(&pcam->mctl_node.dev_lock);
 	return rc;
 }
 
@@ -1529,6 +1561,10 @@
 		return OUTPUT_TYPE_S;
 	case MSM_V4L2_EXT_CAPTURE_MODE_VIDEO:
 		return OUTPUT_TYPE_V;
+	case MSM_V4L2_EXT_CAPTURE_MODE_RDI:
+		return OUTPUT_TYPE_R;
+	case MSM_V4L2_EXT_CAPTURE_MODE_RDI1:
+		return OUTPUT_TYPE_R1;
 	case MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT:
 	case MSM_V4L2_EXT_CAPTURE_MODE_PREVIEW:
 	default:
diff --git a/drivers/media/video/msm/msm_mctl_buf.c b/drivers/media/video/msm/msm_mctl_buf.c
index eade6f1..cd86a80 100644
--- a/drivers/media/video/msm/msm_mctl_buf.c
+++ b/drivers/media/video/msm/msm_mctl_buf.c
@@ -116,6 +116,10 @@
 	}
 	buf_idx = vb->v4l2_buf.index;
 	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+	if (pmctl == NULL) {
+		pr_err("%s No mctl found\n", __func__);
+		return -EINVAL;
+	}
 	for (i = 0; i < vb->num_planes; i++) {
 		mem = vb2_plane_cookie(vb, i);
 		if (buf_type == VIDEOBUF2_MULTIPLE_PLANES)
@@ -147,13 +151,14 @@
 	struct msm_cam_v4l2_dev_inst *pcam_inst;
 	struct msm_cam_v4l2_device *pcam;
 	struct msm_frame_buffer *buf;
-	struct vb2_queue	*vq = vb->vb2_queue;
+	struct vb2_queue *vq;
 
 	D("%s\n", __func__);
-	if (!vb || !vq) {
+	if (!vb || !vb->vb2_queue) {
 		pr_err("%s error : input is NULL\n", __func__);
 		return -EINVAL;
 	}
+	vq = vb->vb2_queue;
 	pcam_inst = vb2_get_drv_priv(vq);
 	pcam = pcam_inst->pcam;
 	buf = container_of(vb, struct msm_frame_buffer, vidbuf);
@@ -207,6 +212,12 @@
 	pcam = pcam_inst->pcam;
 	buf = container_of(vb, struct msm_frame_buffer, vidbuf);
 
+	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+	if (pmctl == NULL) {
+		pr_err("%s No mctl found\n", __func__);
+		return;
+	}
+
 	if (pcam_inst->vid_fmt.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 		for (i = 0; i < vb->num_planes; i++) {
 			mem = vb2_plane_cookie(vb, i);
@@ -251,7 +262,6 @@
 		}
 		spin_unlock_irqrestore(&pcam_inst->vq_irqlock, flags);
 	}
-	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
 	for (i = 0; i < vb->num_planes; i++) {
 		mem = vb2_plane_cookie(vb, i);
 		videobuf2_pmem_contig_user_put(mem, pmctl->client);
@@ -274,13 +284,14 @@
 	struct msm_cam_v4l2_dev_inst *pcam_inst = NULL;
 	struct msm_cam_v4l2_device *pcam = NULL;
 	unsigned long flags = 0;
-	struct vb2_queue *vq = vb->vb2_queue;
+	struct vb2_queue *vq;
 	struct msm_frame_buffer *buf;
 	D("%s\n", __func__);
-	if (!vb || !vq) {
+	if (!vb || !vb->vb2_queue) {
 		pr_err("%s error : input is NULL\n", __func__);
 		return ;
 	}
+	vq = vb->vb2_queue;
 	pcam_inst = vb2_get_drv_priv(vq);
 	pcam = pcam_inst->pcam;
 	D("%s pcam_inst=%p,(vb=0x%p),idx=%d,len=%d\n",
@@ -473,6 +484,10 @@
 {
 	struct msm_cam_media_controller *pmctl;
 	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+	if (pmctl == NULL) {
+		pr_err("%s No mctl found\n", __func__);
+		return -EINVAL;
+	}
 	pmctl->mctl_vbqueue_init = msm_vbqueue_init;
 	return 0;
 }
diff --git a/drivers/media/video/msm/msm_mctl_pp.c b/drivers/media/video/msm/msm_mctl_pp.c
index 844a3ff..abcee4b 100644
--- a/drivers/media/video/msm/msm_mctl_pp.c
+++ b/drivers/media/video/msm/msm_mctl_pp.c
@@ -506,7 +506,7 @@
 		return -EINVAL;
 	}
 	/* Always reserve the buffer from user's video node */
-	pcam_inst = p_mctl->pcam_ptr->dev_inst[image_mode];
+	pcam_inst = p_mctl->pcam_ptr->dev_inst_map[image_mode];
 	if (!pcam_inst) {
 		pr_err("%s Instance already closed ", __func__);
 		return -EINVAL;
diff --git a/drivers/media/video/msm/msm_mem.c b/drivers/media/video/msm/msm_mem.c
index 0043f72..5d412db 100644
--- a/drivers/media/video/msm/msm_mem.c
+++ b/drivers/media/video/msm/msm_mem.c
@@ -132,7 +132,7 @@
 	if (!region)
 		goto out;
 #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
-	region->handle = ion_import_fd(client, info->fd);
+	region->handle = ion_import_dma_buf(client, info->fd);
 	if (IS_ERR_OR_NULL(region->handle))
 		goto out1;
 	if (ion_map_iommu(client, region->handle, CAMERA_DOMAIN, GEN_POOL,
diff --git a/drivers/media/video/msm/msm_vfe31_v4l2.c b/drivers/media/video/msm/msm_vfe31_v4l2.c
index 885cd90..fa985ce 100644
--- a/drivers/media/video/msm/msm_vfe31_v4l2.c
+++ b/drivers/media/video/msm/msm_vfe31_v4l2.c
@@ -3751,19 +3751,11 @@
 		}
 	}
 
-	if (vfe31_ctrl->fs_vfe == NULL) {
-		vfe31_ctrl->fs_vfe =
-			regulator_get(&vfe31_ctrl->pdev->dev, "fs_vfe");
-		if (IS_ERR(vfe31_ctrl->fs_vfe)) {
-			pr_err("%s: Regulator FS_VFE get failed %ld\n",
-				__func__, PTR_ERR(vfe31_ctrl->fs_vfe));
-			vfe31_ctrl->fs_vfe = NULL;
-			goto vfe_fs_failed;
-		} else if (regulator_enable(vfe31_ctrl->fs_vfe)) {
+	if (vfe31_ctrl->fs_vfe) {
+		rc = regulator_enable(vfe31_ctrl->fs_vfe);
+		if (rc) {
 			pr_err("%s: Regulator FS_VFE enable failed\n",
 							__func__);
-			regulator_put(vfe31_ctrl->fs_vfe);
-			vfe31_ctrl->fs_vfe = NULL;
 			goto vfe_fs_failed;
 		}
 	}
@@ -3795,8 +3787,6 @@
 
 vfe_clk_enable_failed:
 	regulator_disable(vfe31_ctrl->fs_vfe);
-	regulator_put(vfe31_ctrl->fs_vfe);
-	vfe31_ctrl->fs_vfe = NULL;
 vfe_fs_failed:
 	if (!mctl->sdata->csi_if)
 		iounmap(vfe31_ctrl->camifbase);
@@ -3822,12 +3812,11 @@
 
 	msm_cam_clk_enable(&vfe31_ctrl->pdev->dev, vfe_clk_info,
 		vfe31_ctrl->vfe_clk, ARRAY_SIZE(vfe_clk_info), 0);
-	if (vfe31_ctrl->fs_vfe) {
+
+	if (vfe31_ctrl->fs_vfe)
 		regulator_disable(vfe31_ctrl->fs_vfe);
-		regulator_put(vfe31_ctrl->fs_vfe);
-		vfe31_ctrl->fs_vfe = NULL;
-	}
-	CDBG("%s, 31ee_irq\n", __func__);
+
+	CDBG("%s Releasing resources\n", __func__);
 	if (!pmctl->sdata->csi_if)
 		iounmap(vfe31_ctrl->camifbase);
 	iounmap(vfe31_ctrl->vfebase);
@@ -3919,6 +3908,13 @@
 	disable_irq(vfe31_ctrl->vfeirq->start);
 
 	vfe31_ctrl->pdev = pdev;
+	vfe31_ctrl->fs_vfe = regulator_get(&vfe31_ctrl->pdev->dev, "vdd");
+	if (IS_ERR(vfe31_ctrl->fs_vfe)) {
+		pr_err("%s: Regulator get failed %ld\n", __func__,
+			PTR_ERR(vfe31_ctrl->fs_vfe));
+		vfe31_ctrl->fs_vfe = NULL;
+	}
+
 	sd_info.sdev_type = VFE_DEV;
 	sd_info.sd_index = 0;
 	sd_info.irq_num = vfe31_ctrl->vfeirq->start;
diff --git a/drivers/media/video/msm/msm_vfe32.c b/drivers/media/video/msm/msm_vfe32.c
index acff492..3ac4c6a 100644
--- a/drivers/media/video/msm/msm_vfe32.c
+++ b/drivers/media/video/msm/msm_vfe32.c
@@ -50,6 +50,9 @@
 	vfe32_put_ch_ping_addr((base), (chn), (addr)))
 
 static uint32_t vfe_clk_rate;
+static void vfe32_send_isp_msg(struct v4l2_subdev *sd,
+	uint32_t vfeFrameId, uint32_t isp_msg_id);
+
 
 struct vfe32_isr_queue_cmd {
 	struct list_head list;
@@ -413,6 +416,7 @@
 {
 	uint32_t *ch_info;
 	uint32_t *axi_cfg = ao+V32_AXI_BUS_FMT_OFF;
+	int vfe_mode = (mode & ~(OUTPUT_TERT1|OUTPUT_TERT2));
 
 	/* Update the corresponding write masters for each output*/
 	ch_info = axi_cfg + V32_AXI_CFG_LEN;
@@ -431,38 +435,50 @@
 	axi_ctrl->share_ctrl->outpath.out2.ch0 = 0x0000FFFF & *ch_info;
 	axi_ctrl->share_ctrl->outpath.out2.ch1 =
 		0x0000FFFF & (*ch_info++ >> 16);
-	axi_ctrl->share_ctrl->outpath.out2.ch2 = 0x0000FFFF & *ch_info++;
+	axi_ctrl->share_ctrl->outpath.out2.ch2 = 0x0000FFFF & *ch_info;
 	axi_ctrl->share_ctrl->outpath.out2.image_mode =
 		0x0000FFFF & (*ch_info++ >> 16);
+	axi_ctrl->share_ctrl->outpath.out3.ch0 = 0x0000FFFF & *ch_info;
+	axi_ctrl->share_ctrl->outpath.out3.ch1 =
+		0x0000FFFF & (*ch_info++ >> 16);
+	axi_ctrl->share_ctrl->outpath.out3.ch2 = 0x0000FFFF & *ch_info;
+	axi_ctrl->share_ctrl->outpath.out3.image_mode =
+		0x0000FFFF & (*ch_info++ >> 16);
+	axi_ctrl->share_ctrl->outpath.output_mode = 0;
 
-
-	switch (mode) {
-	case OUTPUT_TERT1:
-		axi_ctrl->share_ctrl->outpath.output_mode =
+	if (mode & OUTPUT_TERT1)
+		axi_ctrl->share_ctrl->outpath.output_mode |=
 			VFE32_OUTPUT_MODE_TERTIARY1;
-		break;
+	if (mode & OUTPUT_TERT2)
+		axi_ctrl->share_ctrl->outpath.output_mode |=
+			VFE32_OUTPUT_MODE_TERTIARY2;
+	if (mode == OUTPUT_TERT1 || mode == OUTPUT_TERT1
+		|| mode == (OUTPUT_TERT1|OUTPUT_TERT2))
+			goto bus_cfg;
+
+	switch (vfe_mode) {
 	case OUTPUT_PRIM:
-		axi_ctrl->share_ctrl->outpath.output_mode =
+		axi_ctrl->share_ctrl->outpath.output_mode |=
 			VFE32_OUTPUT_MODE_PRIMARY;
 		break;
 	case OUTPUT_PRIM_ALL_CHNLS:
-		axi_ctrl->share_ctrl->outpath.output_mode =
+		axi_ctrl->share_ctrl->outpath.output_mode |=
 			VFE32_OUTPUT_MODE_PRIMARY_ALL_CHNLS;
 		break;
 	case OUTPUT_PRIM|OUTPUT_SEC:
-		axi_ctrl->share_ctrl->outpath.output_mode =
+		axi_ctrl->share_ctrl->outpath.output_mode |=
 			VFE32_OUTPUT_MODE_PRIMARY;
 		axi_ctrl->share_ctrl->outpath.output_mode |=
 			VFE32_OUTPUT_MODE_SECONDARY;
 		break;
 	case OUTPUT_PRIM|OUTPUT_SEC_ALL_CHNLS:
-		axi_ctrl->share_ctrl->outpath.output_mode =
+		axi_ctrl->share_ctrl->outpath.output_mode |=
 			VFE32_OUTPUT_MODE_PRIMARY;
 		axi_ctrl->share_ctrl->outpath.output_mode |=
 			VFE32_OUTPUT_MODE_SECONDARY_ALL_CHNLS;
 		break;
 	case OUTPUT_PRIM_ALL_CHNLS|OUTPUT_SEC:
-		axi_ctrl->share_ctrl->outpath.output_mode =
+		axi_ctrl->share_ctrl->outpath.output_mode |=
 			VFE32_OUTPUT_MODE_PRIMARY_ALL_CHNLS;
 		axi_ctrl->share_ctrl->outpath.output_mode |=
 			VFE32_OUTPUT_MODE_SECONDARY;
@@ -471,6 +487,8 @@
 		pr_err("%s Invalid AXI mode %d ", __func__, mode);
 		return -EINVAL;
 	}
+
+bus_cfg:
 	msm_camera_io_w(*ao, axi_ctrl->share_ctrl->vfebase +
 		VFE_BUS_IO_FORMAT_CFG);
 	msm_camera_io_memcpy(axi_ctrl->share_ctrl->vfebase +
@@ -721,7 +739,10 @@
 
 static void vfe32_start_common(struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	uint32_t irq_mask = 0x00E00021, irq_mask1;
+	uint32_t irq_mask = 0x00E00021, irq_mask1, reg_update;
+	uint16_t vfe_operation_mode =
+		vfe32_ctrl->share_ctrl->operation_mode & ~(VFE_OUTPUTS_RDI0|
+			VFE_OUTPUTS_RDI1);
 	vfe32_ctrl->start_ack_pending = TRUE;
 	CDBG("VFE opertaion mode = 0x%x, output mode = 0x%x\n",
 		vfe32_ctrl->share_ctrl->operation_mode,
@@ -738,18 +759,30 @@
 	msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
 
-	if (vfe32_ctrl->share_ctrl->operation_mode == VFE_OUTPUTS_RDI0) {
-		irq_mask1 =
+	irq_mask1 =
 		msm_camera_io_r(vfe32_ctrl->share_ctrl->vfebase +
 			VFE_IRQ_MASK_1);
+	reg_update =
+		msm_camera_io_r_mb(vfe32_ctrl->share_ctrl->vfebase +
+			VFE_REG_UPDATE_CMD);
+
+	if (vfe32_ctrl->share_ctrl->operation_mode & VFE_OUTPUTS_RDI0) {
 		irq_mask1 |= VFE_IRQ_STATUS1_RDI0_REG_UPDATE_MASK;
 		msm_camera_io_w(irq_mask1, vfe32_ctrl->share_ctrl->vfebase +
 			VFE_IRQ_MASK_1);
-		msm_camera_io_w_mb(2, vfe32_ctrl->share_ctrl->vfebase +
-			VFE_REG_UPDATE_CMD);
-	} else {
-		msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase +
-			VFE_REG_UPDATE_CMD);
+		msm_camera_io_w_mb(reg_update|0x2, vfe32_ctrl->share_ctrl->
+			vfebase + VFE_REG_UPDATE_CMD);
+	}
+	if (vfe32_ctrl->share_ctrl->operation_mode & VFE_OUTPUTS_RDI1) {
+		irq_mask1 |= VFE_IRQ_STATUS1_RDI1_REG_UPDATE_MASK;
+		msm_camera_io_w(irq_mask1, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_MASK_1);
+		msm_camera_io_w_mb(reg_update|0x4, vfe32_ctrl->share_ctrl->
+			vfebase + VFE_REG_UPDATE_CMD);
+	}
+	if (vfe_operation_mode) {
+		msm_camera_io_w_mb(reg_update|0x1, vfe32_ctrl->share_ctrl->
+			vfebase + VFE_REG_UPDATE_CMD);
 		msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase +
 			VFE_CAMIF_COMMAND);
 	}
@@ -796,6 +829,15 @@
 		share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
 }
 
+static void vfe32_stop_liveshot(
+	struct msm_cam_media_controller *pmctl,
+	struct vfe32_ctrl_type *vfe32_ctrl)
+{
+	vfe32_ctrl->share_ctrl->liveshot_state = VFE_STATE_STOP_REQUESTED;
+	msm_camera_io_w_mb(1,
+		vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+}
+
 static int vfe32_zsl(
 	struct msm_cam_media_controller *pmctl,
 	struct vfe32_ctrl_type *vfe32_ctrl)
@@ -1047,6 +1089,15 @@
 		msm_camera_io_w(irq_mask, vfe32_ctrl->share_ctrl->vfebase +
 			VFE_IRQ_MASK_0);
 	}
+	if (vfe32_ctrl->share_ctrl->outpath.output_mode &
+		VFE32_OUTPUT_MODE_TERTIARY2) {
+		irq_mask = msm_camera_io_r(vfe32_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_MASK_0);
+		irq_mask |= (0x1 << (vfe32_ctrl->share_ctrl->outpath.out3.ch0 +
+			VFE_WM_OFFSET));
+		msm_camera_io_w(irq_mask, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_MASK_0);
+	}
 
 	msm_camera_io_w(irq_comp_mask,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
@@ -1267,6 +1318,8 @@
 		ch = &share_ctrl->outpath.out1;
 	else if (path == VFE_MSG_OUTPUT_TERTIARY1)
 		ch = &share_ctrl->outpath.out2;
+	else if (path == VFE_MSG_OUTPUT_TERTIARY2)
+		ch = &share_ctrl->outpath.out3;
 	else
 		pr_err("%s: Invalid path %d\n", __func__,
 			path);
@@ -1287,6 +1340,8 @@
 		image_mode = axi_ctrl->share_ctrl->outpath.out1.image_mode;
 	else if (path == VFE_MSG_OUTPUT_TERTIARY1)
 		image_mode = axi_ctrl->share_ctrl->outpath.out2.image_mode;
+	else if (path == VFE_MSG_OUTPUT_TERTIARY2)
+		image_mode = axi_ctrl->share_ctrl->outpath.out3.image_mode;
 
 	vfe32_subdev_notify(id, path, image_mode,
 		&axi_ctrl->subdev, axi_ctrl->share_ctrl);
@@ -1307,6 +1362,8 @@
 		image_mode = vfe32_ctrl->share_ctrl->outpath.out1.image_mode;
 	else if (path == VFE_MSG_OUTPUT_TERTIARY1)
 		image_mode = vfe32_ctrl->share_ctrl->outpath.out2.image_mode;
+	else if (path == VFE_MSG_OUTPUT_TERTIARY2)
+		image_mode = vfe32_ctrl->share_ctrl->outpath.out3.image_mode;
 
 	vfe32_subdev_notify(id, path, image_mode,
 		&vfe32_ctrl->subdev, vfe32_ctrl->share_ctrl);
@@ -1323,8 +1380,8 @@
 			outch->pong.ch_paddr[0]);
 
 		if ((vfe32_ctrl->share_ctrl->operation_mode !=
-			VFE_OUTPUTS_RAW) &&
-			(path != VFE_MSG_OUTPUT_TERTIARY1)) {
+			VFE_OUTPUTS_RAW) && (path != VFE_MSG_OUTPUT_TERTIARY1)
+			&& (path != VFE_MSG_OUTPUT_TERTIARY2)) {
 			vfe32_put_ch_ping_addr(
 				vfe32_ctrl->share_ctrl->vfebase, outch->ch1,
 				outch->ping.ch_paddr[1]);
@@ -1394,6 +1451,7 @@
 	uint32_t *cmdp_local = NULL;
 	uint32_t snapshot_cnt = 0;
 	uint32_t temp1 = 0, temp2 = 0;
+	uint16_t vfe_mode = 0;
 
 	CDBG("vfe32_proc_general: cmdID = %s, length = %d\n",
 		vfe32_general_cmd[cmd->id], cmd->length);
@@ -1406,24 +1464,36 @@
 	case VFE_CMD_START:
 		pr_info("vfe32_proc_general: cmdID = %s\n",
 			vfe32_general_cmd[cmd->id]);
-		if ((vfe32_ctrl->share_ctrl->operation_mode ==
+		vfe_mode = vfe32_ctrl->share_ctrl->operation_mode
+			& ~(VFE_OUTPUTS_RDI0|VFE_OUTPUTS_RDI1);
+		if (vfe_mode) {
+			if ((vfe32_ctrl->share_ctrl->operation_mode &
 				VFE_OUTPUTS_PREVIEW_AND_VIDEO) ||
-				(vfe32_ctrl->share_ctrl->operation_mode ==
+				(vfe32_ctrl->share_ctrl->operation_mode &
 				VFE_OUTPUTS_PREVIEW))
-			/* Configure primary channel */
-			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_V32_START, VFE_MSG_OUTPUT_PRIMARY,
-				vfe32_ctrl);
-		else if (vfe32_ctrl->share_ctrl->operation_mode ==
+				/* Configure primary channel */
+				rc = vfe32_configure_pingpong_buffers(
+					VFE_MSG_V32_START,
+					VFE_MSG_OUTPUT_PRIMARY,
+					vfe32_ctrl);
+			else
+			/* Configure secondary channel */
+				rc = vfe32_configure_pingpong_buffers(
+					VFE_MSG_V32_START,
+					VFE_MSG_OUTPUT_SECONDARY,
+					vfe32_ctrl);
+		}
+		if (vfe32_ctrl->share_ctrl->operation_mode &
 				VFE_OUTPUTS_RDI0)
 			rc = vfe32_configure_pingpong_buffers(
 				VFE_MSG_V32_START, VFE_MSG_OUTPUT_TERTIARY1,
 				vfe32_ctrl);
-		else
-			/* Configure secondary channel */
+		if (vfe32_ctrl->share_ctrl->operation_mode &
+				VFE_OUTPUTS_RDI1)
 			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_V32_START, VFE_MSG_OUTPUT_SECONDARY,
+				VFE_MSG_V32_START, VFE_MSG_OUTPUT_TERTIARY2,
 				vfe32_ctrl);
+
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
 				   " for preview", __func__);
@@ -1502,13 +1572,13 @@
 	case VFE_CMD_START_RECORDING:
 		pr_info("vfe32_proc_general: cmdID = %s\n",
 			vfe32_general_cmd[cmd->id]);
-		if (vfe32_ctrl->share_ctrl->operation_mode ==
+		if (vfe32_ctrl->share_ctrl->operation_mode &
 			VFE_OUTPUTS_PREVIEW_AND_VIDEO)
 			rc = vfe32_configure_pingpong_buffers(
 				VFE_MSG_V32_START_RECORDING,
 				VFE_MSG_OUTPUT_SECONDARY,
 				vfe32_ctrl);
-		else if (vfe32_ctrl->share_ctrl->operation_mode ==
+		else if (vfe32_ctrl->share_ctrl->operation_mode &
 			VFE_OUTPUTS_VIDEO_AND_PREVIEW)
 			rc = vfe32_configure_pingpong_buffers(
 				VFE_MSG_V32_START_RECORDING,
@@ -2703,6 +2773,10 @@
 			*cmdp & VFE_FRAME_SKIP_PERIOD_MASK) + 1;
 		vfe32_ctrl->frame_skip_pattern = (uint32_t)(*(cmdp + 2));
 		break;
+	case VFE_CMD_STOP_LIVESHOT:
+		CDBG("%s Stopping liveshot ", __func__);
+		vfe32_stop_liveshot(pmctl, vfe32_ctrl);
+		break;
 	default:
 		if (cmd->length != vfe32_cmd[cmd->id].length)
 			return -EINVAL;
@@ -2843,7 +2917,7 @@
 	unsigned long flags;
 
 	if (vfe32_ctrl->recording_state == VFE_STATE_START_REQUESTED) {
-		if (vfe32_ctrl->share_ctrl->operation_mode ==
+		if (vfe32_ctrl->share_ctrl->operation_mode &
 				VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
 			msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
 				vfe32_AXI_WM_CFG[vfe32_ctrl->
@@ -2851,7 +2925,7 @@
 			msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
 				vfe32_AXI_WM_CFG[vfe32_ctrl->
 				share_ctrl->outpath.out0.ch1]);
-		} else if (vfe32_ctrl->share_ctrl->operation_mode ==
+		} else if (vfe32_ctrl->share_ctrl->operation_mode &
 				VFE_OUTPUTS_PREVIEW_AND_VIDEO) {
 			msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
 				vfe32_AXI_WM_CFG[vfe32_ctrl->
@@ -2866,7 +2940,7 @@
 		CDBG("start video triggered .\n");
 	} else if (vfe32_ctrl->recording_state ==
 			VFE_STATE_STOP_REQUESTED) {
-		if (vfe32_ctrl->share_ctrl->operation_mode ==
+		if (vfe32_ctrl->share_ctrl->operation_mode &
 				VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
 			msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
 				vfe32_AXI_WM_CFG[vfe32_ctrl->
@@ -2874,7 +2948,7 @@
 			msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
 				vfe32_AXI_WM_CFG[vfe32_ctrl->
 				share_ctrl->outpath.out0.ch1]);
-		} else if (vfe32_ctrl->share_ctrl->operation_mode ==
+		} else if (vfe32_ctrl->share_ctrl->operation_mode &
 				VFE_OUTPUTS_PREVIEW_AND_VIDEO) {
 			msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
 				vfe32_AXI_WM_CFG[vfe32_ctrl->
@@ -2923,48 +2997,62 @@
 		}
 	}
 
-	if (vfe32_ctrl->share_ctrl->liveshot_state ==
-		VFE_STATE_START_REQUESTED) {
-		pr_info("%s enabling liveshot output\n", __func__);
+	switch (vfe32_ctrl->share_ctrl->liveshot_state) {
+	case VFE_STATE_START_REQUESTED:
+		CDBG("%s enabling liveshot output\n", __func__);
 		if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-				VFE32_OUTPUT_MODE_PRIMARY) {
+			VFE32_OUTPUT_MODE_PRIMARY) {
 			msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
 				vfe32_AXI_WM_CFG[vfe32_ctrl->
 				share_ctrl->outpath.out0.ch0]);
 			msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
-			vfe32_AXI_WM_CFG[vfe32_ctrl->
+				vfe32_AXI_WM_CFG[vfe32_ctrl->
 				share_ctrl->outpath.out0.ch1]);
+
 			vfe32_ctrl->share_ctrl->liveshot_state =
 				VFE_STATE_STARTED;
 		}
-	}
-
-	if (vfe32_ctrl->share_ctrl->liveshot_state == VFE_STATE_STARTED) {
+		break;
+	case VFE_STATE_STARTED:
 		vfe32_ctrl->share_ctrl->vfe_capture_count--;
-		if (!vfe32_ctrl->share_ctrl->vfe_capture_count)
-			vfe32_ctrl->share_ctrl->liveshot_state =
-				VFE_STATE_STOP_REQUESTED;
-		msm_camera_io_w_mb(1, vfe32_ctrl->
-			share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
-	} else if (vfe32_ctrl->share_ctrl->liveshot_state ==
-			VFE_STATE_STOP_REQUESTED) {
-		CDBG("%s: disabling liveshot output\n", __func__);
-		if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-			VFE32_OUTPUT_MODE_PRIMARY) {
+		if (!vfe32_ctrl->share_ctrl->vfe_capture_count &&
+			(vfe32_ctrl->share_ctrl->outpath.output_mode &
+				VFE32_OUTPUT_MODE_PRIMARY)) {
 			msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
 				vfe32_AXI_WM_CFG[vfe32_ctrl->
 				share_ctrl->outpath.out0.ch0]);
 			msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
 				vfe32_AXI_WM_CFG[vfe32_ctrl->
 				share_ctrl->outpath.out0.ch1]);
+		}
+		break;
+	case VFE_STATE_STOP_REQUESTED:
+		if (vfe32_ctrl->share_ctrl->outpath.output_mode &
+				VFE32_OUTPUT_MODE_PRIMARY) {
+			/* Stop requested, stop write masters, and
+			 * trigger REG_UPDATE. Send STOP_LS_ACK in
+			 * next reg update. */
+			msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[vfe32_ctrl->
+				share_ctrl->outpath.out0.ch0]);
+			msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[vfe32_ctrl->
+				share_ctrl->outpath.out0.ch1]);
+
 			vfe32_ctrl->share_ctrl->liveshot_state =
 				VFE_STATE_STOPPED;
 			msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase +
 				VFE_REG_UPDATE_CMD);
 		}
-	} else if (vfe32_ctrl->share_ctrl->liveshot_state ==
-			VFE_STATE_STOPPED) {
+		break;
+	case VFE_STATE_STOPPED:
+		CDBG("%s Sending STOP_LS ACK\n", __func__);
+		vfe32_send_isp_msg(&vfe32_ctrl->subdev,
+		vfe32_ctrl->share_ctrl->vfeFrameId, MSG_ID_STOP_LS_ACK);
 		vfe32_ctrl->share_ctrl->liveshot_state = VFE_STATE_IDLE;
+		break;
+	default:
+		break;
 	}
 
 	if ((vfe32_ctrl->share_ctrl->operation_mode ==
@@ -3039,8 +3127,25 @@
 	}
 }
 
+static void vfe32_process_rdi1_reg_update_irq(
+	struct vfe32_ctrl_type *vfe32_ctrl)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&vfe32_ctrl->start_ack_lock, flags);
+	if (vfe32_ctrl->start_ack_pending == TRUE) {
+		vfe32_ctrl->start_ack_pending = FALSE;
+		spin_unlock_irqrestore(
+				&vfe32_ctrl->start_ack_lock, flags);
+		vfe32_send_isp_msg(&vfe32_ctrl->subdev,
+			vfe32_ctrl->share_ctrl->vfeFrameId, MSG_ID_START_ACK);
+	} else {
+		spin_unlock_irqrestore(
+				&vfe32_ctrl->start_ack_lock, flags);
+	}
+}
+
 static void vfe32_set_default_reg_values(
-			struct vfe32_ctrl_type *vfe32_ctrl)
+	struct vfe32_ctrl_type *vfe32_ctrl)
 {
 	msm_camera_io_w(0x800080,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_DEMUX_GAIN_0);
@@ -3168,6 +3273,8 @@
 		pr_err("vfe32_irq: camif errors\n");
 		reg_value = msm_camera_io_r(
 			axi_ctrl->share_ctrl->vfebase + VFE_CAMIF_STATUS);
+		v4l2_subdev_notify(&axi_ctrl->subdev,
+			NOTIFY_VFE_CAMIF_ERROR, (void *)NULL);
 		pr_err("camifStatus  = 0x%x\n", reg_value);
 		vfe32_send_isp_msg(&axi_ctrl->subdev,
 			axi_ctrl->share_ctrl->vfeFrameId, MSG_ID_CAMIF_ERROR);
@@ -3356,9 +3463,6 @@
 			ch1_paddr, ch2_paddr,
 			axi_ctrl->share_ctrl->outpath.out0.image_mode);
 
-		if (axi_ctrl->share_ctrl->liveshot_state == VFE_STATE_STOPPED)
-			axi_ctrl->share_ctrl->liveshot_state = VFE_STATE_IDLE;
-
 	} else {
 		axi_ctrl->share_ctrl->outpath.out0.frame_drop_cnt++;
 		CDBG("path_irq_0 - no free buffer!\n");
@@ -3451,7 +3555,7 @@
 	/* this must be rdi image output. */
 	struct msm_free_buf *free_buf = NULL;
 	/*RDI0*/
-	if (axi_ctrl->share_ctrl->operation_mode == VFE_OUTPUTS_RDI0) {
+	if (axi_ctrl->share_ctrl->operation_mode & VFE_OUTPUTS_RDI0) {
 		free_buf = vfe32_check_free_buffer(VFE_MSG_OUTPUT_IRQ,
 			VFE_MSG_OUTPUT_TERTIARY1, axi_ctrl);
 		if (free_buf) {
@@ -3485,6 +3589,46 @@
 	}
 }
 
+static void vfe32_process_output_path_irq_rdi1(
+	struct axi_ctrl_t *axi_ctrl)
+{
+	uint32_t ping_pong;
+	uint32_t ch0_paddr = 0;
+	/* this must be rdi image output. */
+	struct msm_free_buf *free_buf = NULL;
+	/*RDI1*/
+	if (axi_ctrl->share_ctrl->operation_mode & VFE_OUTPUTS_RDI1) {
+		free_buf = vfe32_check_free_buffer(VFE_MSG_OUTPUT_IRQ,
+			VFE_MSG_OUTPUT_TERTIARY2, axi_ctrl);
+		if (free_buf) {
+			ping_pong = msm_camera_io_r(axi_ctrl->
+				share_ctrl->vfebase +
+				VFE_BUS_PING_PONG_STATUS);
+
+			/* Y channel */
+			ch0_paddr = vfe32_get_ch_addr(ping_pong,
+				axi_ctrl->share_ctrl->vfebase,
+				axi_ctrl->share_ctrl->outpath.out3.ch0);
+			pr_debug("%s ch0 = 0x%x\n",
+				__func__, ch0_paddr);
+
+			/* Y channel */
+			vfe32_put_ch_addr(ping_pong,
+				axi_ctrl->share_ctrl->vfebase,
+				axi_ctrl->share_ctrl->outpath.out3.ch0,
+				free_buf->ch_paddr[0]);
+
+			vfe_send_outmsg(axi_ctrl,
+				MSG_ID_OUTPUT_TERTIARY2, ch0_paddr,
+				0, 0,
+				axi_ctrl->share_ctrl->outpath.out3.image_mode);
+		} else {
+			axi_ctrl->share_ctrl->outpath.out3.frame_drop_cnt++;
+			pr_err("path_irq irq - no free buffer for rdi1!\n");
+		}
+	}
+}
+
 static uint32_t  vfe32_process_stats_irq_common(
 	struct vfe32_ctrl_type *vfe32_ctrl,
 	uint32_t statsNum, uint32_t newAddr)
@@ -3864,6 +4008,10 @@
 		CDBG("irq	rdi0 regUpdateIrq\n");
 		vfe32_process_rdi0_reg_update_irq(vfe32_ctrl);
 		break;
+	case VFE_IRQ_STATUS1_RDI1_REG_UPDATE:
+		CDBG("irq	rdi1 regUpdateIrq\n");
+		vfe32_process_rdi1_reg_update_irq(vfe32_ctrl);
+		break;
 	case VFE_IMASK_WHILE_STOPPING_1:
 		CDBG("irq	resetAckIrq\n");
 		vfe32_process_reset_irq(vfe32_ctrl);
@@ -3959,6 +4107,12 @@
 				(void *)VFE_IRQ_STATUS1_RDI0_REG_UPDATE);
 
 		if (qcmd->vfeInterruptStatus1 &
+				VFE_IRQ_STATUS1_RDI1_REG_UPDATE_MASK)
+			v4l2_subdev_notify(&axi_ctrl->subdev,
+				NOTIFY_VFE_IRQ,
+				(void *)VFE_IRQ_STATUS1_RDI1_REG_UPDATE);
+
+		if (qcmd->vfeInterruptStatus1 &
 				VFE_IMASK_WHILE_STOPPING_1)
 			v4l2_subdev_notify(&axi_ctrl->subdev,
 				NOTIFY_VFE_IRQ,
@@ -4337,19 +4491,10 @@
 		goto remap_failed;
 	}
 
-	if (axi_ctrl->fs_vfe == NULL) {
-		axi_ctrl->fs_vfe =
-			regulator_get(&axi_ctrl->pdev->dev, "fs_vfe");
-		if (IS_ERR(axi_ctrl->fs_vfe)) {
-			pr_err("%s: Regulator FS_VFE get failed %ld\n",
-				__func__, PTR_ERR(axi_ctrl->fs_vfe));
-			axi_ctrl->fs_vfe = NULL;
-			goto fs_failed;
-		} else if (regulator_enable(axi_ctrl->fs_vfe)) {
-			pr_err("%s: Regulator FS_VFE enable failed\n",
-							__func__);
-			regulator_put(axi_ctrl->fs_vfe);
-			axi_ctrl->fs_vfe = NULL;
+	if (axi_ctrl->fs_vfe) {
+		rc = regulator_enable(axi_ctrl->fs_vfe);
+		if (rc) {
+			pr_err("%s: Regulator enable failed\n",	__func__);
 			goto fs_failed;
 		}
 	}
@@ -4376,8 +4521,6 @@
 	return rc;
 clk_enable_failed:
 	regulator_disable(axi_ctrl->fs_vfe);
-	regulator_put(axi_ctrl->fs_vfe);
-	axi_ctrl->fs_vfe = NULL;
 fs_failed:
 	iounmap(axi_ctrl->share_ctrl->vfebase);
 	axi_ctrl->share_ctrl->vfebase = NULL;
@@ -4432,11 +4575,9 @@
 	tasklet_kill(&axi_ctrl->vfe32_tasklet);
 	msm_cam_clk_enable(&axi_ctrl->pdev->dev, vfe32_clk_info,
 			axi_ctrl->vfe_clk, ARRAY_SIZE(vfe32_clk_info), 0);
-	if (axi_ctrl->fs_vfe) {
+	if (axi_ctrl->fs_vfe)
 		regulator_disable(axi_ctrl->fs_vfe);
-		regulator_put(axi_ctrl->fs_vfe);
-		axi_ctrl->fs_vfe = NULL;
-	}
+
 	iounmap(axi_ctrl->share_ctrl->vfebase);
 	axi_ctrl->share_ctrl->vfebase = NULL;
 
@@ -4457,7 +4598,11 @@
 
 void axi_start(struct axi_ctrl_t *axi_ctrl)
 {
-	switch (axi_ctrl->share_ctrl->operation_mode) {
+	uint16_t operation_mode =
+		(axi_ctrl->share_ctrl->operation_mode &
+		~(VFE_OUTPUTS_RDI0|VFE_OUTPUTS_RDI1));
+
+	switch (operation_mode) {
 	case VFE_OUTPUTS_PREVIEW:
 	case VFE_OUTPUTS_PREVIEW_AND_VIDEO:
 		if (axi_ctrl->share_ctrl->outpath.output_mode &
@@ -4481,11 +4626,6 @@
 				share_ctrl->outpath.out0.ch2]);
 		}
 		break;
-	case VFE_OUTPUTS_RDI0:
-		msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
-			vfe32_AXI_WM_CFG[axi_ctrl->
-			share_ctrl->outpath.out2.ch0]);
-		break;
 	default:
 		if (axi_ctrl->share_ctrl->outpath.output_mode &
 			VFE32_OUTPUT_MODE_SECONDARY) {
@@ -4509,6 +4649,16 @@
 		}
 		break;
 	}
+
+	if (axi_ctrl->share_ctrl->operation_mode & VFE_OUTPUTS_RDI0)
+		msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+			vfe32_AXI_WM_CFG[axi_ctrl->share_ctrl->
+			outpath.out2.ch0]);
+	if (axi_ctrl->share_ctrl->operation_mode & VFE_OUTPUTS_RDI1)
+		msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+			vfe32_AXI_WM_CFG[axi_ctrl->share_ctrl->
+			outpath.out3.ch0]);
+
 }
 
 void axi_stop(struct axi_ctrl_t *axi_ctrl)
@@ -4546,8 +4696,8 @@
 {
 	struct msm_vfe_cfg_cmd cfgcmd;
 	struct msm_isp_cmd vfecmd;
-	int rc = 0;
 	struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+	int rc = 0, vfe_cmd_type = 0, rdi_mode = 0;
 
 	if (!axi_ctrl->share_ctrl->vfebase) {
 		pr_err("%s: base address unmapped\n", __func__);
@@ -4569,7 +4719,65 @@
 		}
 	}
 
+	vfe_cmd_type = (cfgcmd.cmd_type & ~(CMD_AXI_CFG_TERT1|
+		CMD_AXI_CFG_TERT2));
 	switch (cfgcmd.cmd_type) {
+	case CMD_AXI_CFG_TERT1:{
+		uint32_t *axio = NULL;
+		axio = kmalloc(vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			return -ENOMEM;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			return -EFAULT;
+		}
+		vfe32_config_axi(axi_ctrl, OUTPUT_TERT1, axio);
+		kfree(axio);
+		return rc;
+		}
+	case CMD_AXI_CFG_TERT2:{
+		uint32_t *axio = NULL;
+		axio = kmalloc(vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio)
+			return -ENOMEM;
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			return -EFAULT;
+		}
+		vfe32_config_axi(axi_ctrl, OUTPUT_TERT2, axio);
+		kfree(axio);
+		return rc;
+		}
+	case CMD_AXI_CFG_TERT1|CMD_AXI_CFG_TERT2:{
+		uint32_t *axio = NULL;
+		axio = kmalloc(vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio)
+			return -ENOMEM;
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			return -EFAULT;
+		}
+		vfe32_config_axi(axi_ctrl, OUTPUT_TERT1|OUTPUT_TERT2, axio);
+		kfree(axio);
+		return rc;
+		}
+	default:
+		if (cfgcmd.cmd_type & CMD_AXI_CFG_TERT1)
+			rdi_mode |= OUTPUT_TERT1;
+		if (cfgcmd.cmd_type & CMD_AXI_CFG_TERT2)
+			rdi_mode |= OUTPUT_TERT2;
+	}
+	switch (vfe_cmd_type) {
 	case CMD_AXI_CFG_PRIM: {
 		uint32_t *axio = NULL;
 		axio = kmalloc(vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length,
@@ -4585,7 +4793,7 @@
 			rc = -EFAULT;
 			break;
 		}
-		vfe32_config_axi(axi_ctrl, OUTPUT_PRIM, axio);
+		vfe32_config_axi(axi_ctrl, rdi_mode|OUTPUT_PRIM, axio);
 		kfree(axio);
 		break;
 		}
@@ -4604,7 +4812,8 @@
 			rc = -EFAULT;
 			break;
 		}
-		vfe32_config_axi(axi_ctrl, OUTPUT_PRIM_ALL_CHNLS, axio);
+		vfe32_config_axi(axi_ctrl, rdi_mode|OUTPUT_PRIM_ALL_CHNLS,
+			axio);
 		kfree(axio);
 		break;
 		}
@@ -4623,7 +4832,8 @@
 			rc = -EFAULT;
 			break;
 		}
-		vfe32_config_axi(axi_ctrl, OUTPUT_PRIM|OUTPUT_SEC, axio);
+		vfe32_config_axi(axi_ctrl,
+			rdi_mode|OUTPUT_PRIM|OUTPUT_SEC, axio);
 		kfree(axio);
 		break;
 		}
@@ -4643,7 +4853,7 @@
 			break;
 		}
 		vfe32_config_axi(axi_ctrl,
-			OUTPUT_PRIM|OUTPUT_SEC_ALL_CHNLS, axio);
+			rdi_mode|OUTPUT_PRIM|OUTPUT_SEC_ALL_CHNLS, axio);
 		kfree(axio);
 		break;
 		}
@@ -4663,30 +4873,11 @@
 			break;
 		}
 		vfe32_config_axi(axi_ctrl,
-			OUTPUT_PRIM_ALL_CHNLS|OUTPUT_SEC, axio);
+			rdi_mode|OUTPUT_PRIM_ALL_CHNLS|OUTPUT_SEC, axio);
 		kfree(axio);
 		break;
 		}
-	case CMD_AXI_CFG_TERT1: {
-		uint32_t *axio = NULL;
-		axio = kmalloc(vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length,
-				GFP_ATOMIC);
-		if (!axio) {
-			rc = -ENOMEM;
-			break;
-		}
 
-		if (copy_from_user(axio, (void __user *)(vfecmd.value),
-				vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
-			kfree(axio);
-			rc = -EFAULT;
-			break;
-		}
-		vfe32_config_axi(axi_ctrl,
-			OUTPUT_TERT1, axio);
-		kfree(axio);
-		break;
-		}
 	case CMD_AXI_CFG_PRIM_ALL_CHNLS|CMD_AXI_CFG_SEC_ALL_CHNLS:
 		pr_err("%s Invalid/Unsupported AXI configuration %x",
 			__func__, cfgcmd.cmd_type);
@@ -4731,6 +4922,11 @@
 		if (irqstatus & (0x1 << (axi_ctrl->share_ctrl->outpath.out2.ch0
 			+ VFE_WM_OFFSET)))
 			vfe32_process_output_path_irq_rdi0(axi_ctrl);
+	if (axi_ctrl->share_ctrl->outpath.output_mode &
+		VFE32_OUTPUT_MODE_TERTIARY2)
+		if (irqstatus & (0x1 << (axi_ctrl->share_ctrl->outpath.out3.ch0
+			+ VFE_WM_OFFSET)))
+			vfe32_process_output_path_irq_rdi1(axi_ctrl);
 
 	/* in snapshot mode if done then send
 	snapshot done message */
@@ -4932,6 +5128,13 @@
 		goto vfe32_no_resource;
 	}
 
+	axi_ctrl->fs_vfe = regulator_get(&pdev->dev, "vdd");
+	if (IS_ERR(axi_ctrl->fs_vfe)) {
+		pr_err("%s: Regulator get failed %ld\n", __func__,
+			PTR_ERR(axi_ctrl->fs_vfe));
+		axi_ctrl->fs_vfe = NULL;
+	}
+
 	/* Request for this device irq from the camera server. If the
 	 * IRQ Router is present on this target, the interrupt will be
 	 * handled by the camera server and the interrupt service
diff --git a/drivers/media/video/msm/msm_vfe32.h b/drivers/media/video/msm/msm_vfe32.h
index 1746f3f..c41df09 100644
--- a/drivers/media/video/msm/msm_vfe32.h
+++ b/drivers/media/video/msm/msm_vfe32.h
@@ -240,8 +240,8 @@
 #define V32_OPERATION_CFG_LEN     44
 
 #define V32_AXI_OUT_OFF           0x00000038
-#define V32_AXI_OUT_LEN           216
-#define V32_AXI_CH_INF_LEN        24
+#define V32_AXI_OUT_LEN           224
+#define V32_AXI_CH_INF_LEN        32
 #define V32_AXI_CFG_LEN           47
 #define V32_AXI_BUS_FMT_OFF    1
 #define V32_AXI_BUS_FMT_LEN    4
@@ -803,6 +803,7 @@
 	struct vfe32_output_ch out0; /* preview and thumbnail */
 	struct vfe32_output_ch out1; /* snapshot */
 	struct vfe32_output_ch out2; /* rdi0    */
+	struct vfe32_output_ch out3; /* rdi01   */
 };
 
 struct vfe32_frame_extra {
@@ -908,6 +909,7 @@
 #define VFE32_OUTPUT_MODE_SECONDARY		BIT(8)
 #define VFE32_OUTPUT_MODE_SECONDARY_ALL_CHNLS	BIT(9)
 #define VFE32_OUTPUT_MODE_TERTIARY1		BIT(10)
+#define VFE32_OUTPUT_MODE_TERTIARY2		BIT(11)
 
 struct vfe_stats_control {
 	uint8_t  ackPending;
diff --git a/drivers/media/video/msm/msm_vpe.c b/drivers/media/video/msm/msm_vpe.c
index fb22cf9..2b58f44 100644
--- a/drivers/media/video/msm/msm_vpe.c
+++ b/drivers/media/video/msm/msm_vpe.c
@@ -528,16 +528,13 @@
 	vpe_ctrl->state = VPE_STATE_INIT;
 	spin_unlock_irqrestore(&vpe_ctrl->lock, flags);
 	enable_irq(vpe_ctrl->vpeirq->start);
-	vpe_ctrl->fs_vpe = regulator_get(NULL, "fs_vpe");
-	if (IS_ERR(vpe_ctrl->fs_vpe)) {
-		pr_err("%s: Regulator FS_VPE get failed %ld\n", __func__,
-			PTR_ERR(vpe_ctrl->fs_vpe));
-		vpe_ctrl->fs_vpe = NULL;
-		goto vpe_fs_failed;
-	} else if (regulator_enable(vpe_ctrl->fs_vpe)) {
-		pr_err("%s: Regulator FS_VPE enable failed\n", __func__);
-		regulator_put(vpe_ctrl->fs_vpe);
-		goto vpe_fs_failed;
+
+	if (vpe_ctrl->fs_vpe) {
+		rc = regulator_enable(vpe_ctrl->fs_vpe);
+		if (rc) {
+			pr_err("%s: Regulator enable failed\n", __func__);
+			goto vpe_fs_failed;
+		}
 	}
 
 	rc = msm_cam_clk_enable(&vpe_ctrl->pdev->dev, vpe_clk_info,
@@ -549,8 +546,6 @@
 
 vpe_clk_failed:
 	regulator_disable(vpe_ctrl->fs_vpe);
-	regulator_put(vpe_ctrl->fs_vpe);
-	vpe_ctrl->fs_vpe = NULL;
 vpe_fs_failed:
 	disable_irq(vpe_ctrl->vpeirq->start);
 	vpe_ctrl->state = VPE_STATE_IDLE;
@@ -576,8 +571,6 @@
 			vpe_ctrl->vpe_clk, ARRAY_SIZE(vpe_clk_info), 0);
 
 	regulator_disable(vpe_ctrl->fs_vpe);
-	regulator_put(vpe_ctrl->fs_vpe);
-	vpe_ctrl->fs_vpe = NULL;
 	spin_lock_irqsave(&vpe_ctrl->lock, flags);
 	vpe_ctrl->state = VPE_STATE_IDLE;
 	spin_unlock_irqrestore(&vpe_ctrl->lock, flags);
@@ -1026,6 +1019,13 @@
 		goto vpe_no_resource;
 	}
 
+	vpe_ctrl->fs_vpe = regulator_get(&pdev->dev, "vdd");
+	if (IS_ERR(vpe_ctrl->fs_vpe)) {
+		pr_err("%s: Regulator FS_VPE get failed %ld\n", __func__,
+			PTR_ERR(vpe_ctrl->fs_vpe));
+		vpe_ctrl->fs_vpe = NULL;
+	}
+
 	disable_irq(vpe_ctrl->vpeirq->start);
 
 	atomic_set(&vpe_ctrl->active, 0);
@@ -1042,7 +1042,7 @@
 vpe_no_resource:
 	pr_err("%s: VPE Probe failed.\n", __func__);
 	kfree(vpe_ctrl);
-	return 0;
+	return rc;
 }
 
 struct platform_driver msm_vpe_driver = {
diff --git a/drivers/media/video/msm/sensors/imx074_v4l2.c b/drivers/media/video/msm/sensors/imx074_v4l2.c
index 3d23337..ddf0754 100644
--- a/drivers/media/video/msm/sensors/imx074_v4l2.c
+++ b/drivers/media/video/msm/sensors/imx074_v4l2.c
@@ -276,7 +276,7 @@
 	.sensor_config = msm_sensor_config,
 	.sensor_power_up = msm_sensor_power_up,
 	.sensor_power_down = msm_sensor_power_down,
-	.sensor_adjust_frame_lines = msm_sensor_adjust_frame_lines,
+	.sensor_adjust_frame_lines = msm_sensor_adjust_frame_lines1,
 	.sensor_get_csi_params = msm_sensor_get_csi_params,
 };
 
diff --git a/drivers/media/video/msm/sensors/imx091.c b/drivers/media/video/msm/sensors/imx091.c
index 49442e9..7fda037 100644
--- a/drivers/media/video/msm/sensors/imx091.c
+++ b/drivers/media/video/msm/sensors/imx091.c
@@ -303,7 +303,7 @@
 	.sensor_config = msm_sensor_config,
 	.sensor_power_up = msm_sensor_power_up,
 	.sensor_power_down = msm_sensor_power_down,
-	.sensor_adjust_frame_lines = msm_sensor_adjust_frame_lines,
+	.sensor_adjust_frame_lines = msm_sensor_adjust_frame_lines1,
 	.sensor_get_csi_params = msm_sensor_get_csi_params,
 };
 
diff --git a/drivers/media/video/msm/sensors/msm_sensor.c b/drivers/media/video/msm/sensors/msm_sensor.c
index 8ab3963..be1efe0 100644
--- a/drivers/media/video/msm/sensors/msm_sensor.c
+++ b/drivers/media/video/msm/sensors/msm_sensor.c
@@ -16,7 +16,7 @@
 #include "msm_camera_i2c_mux.h"
 
 /*=============================================================*/
-int32_t msm_sensor_adjust_frame_lines(struct msm_sensor_ctrl_t *s_ctrl,
+int32_t msm_sensor_adjust_frame_lines1(struct msm_sensor_ctrl_t *s_ctrl,
 	uint16_t res)
 {
 	uint16_t cur_line = 0;
@@ -50,6 +50,45 @@
 	return 0;
 }
 
+int32_t msm_sensor_adjust_frame_lines2(struct msm_sensor_ctrl_t *s_ctrl,
+	uint16_t res)
+{
+	uint16_t cur_line = 0;
+	uint16_t exp_fl_lines = 0;
+	uint8_t int_time[3];
+	if (s_ctrl->sensor_exp_gain_info) {
+		if (s_ctrl->prev_gain && s_ctrl->prev_line &&
+			s_ctrl->func_tbl->sensor_write_exp_gain)
+			s_ctrl->func_tbl->sensor_write_exp_gain(
+				s_ctrl,
+				s_ctrl->prev_gain,
+				s_ctrl->prev_line);
+
+		msm_camera_i2c_read_seq(s_ctrl->sensor_i2c_client,
+			s_ctrl->sensor_exp_gain_info->coarse_int_time_addr-1,
+			&int_time[0], 3);
+		cur_line |= int_time[0] << 12;
+		cur_line |= int_time[1] << 4;
+		cur_line |= int_time[2] >> 4;
+		exp_fl_lines = cur_line +
+			s_ctrl->sensor_exp_gain_info->vert_offset;
+		if (exp_fl_lines > s_ctrl->msm_sensor_reg->
+			output_settings[res].frame_length_lines)
+			msm_camera_i2c_write(s_ctrl->sensor_i2c_client,
+				s_ctrl->sensor_output_reg_addr->
+				frame_length_lines,
+				exp_fl_lines,
+				MSM_CAMERA_I2C_WORD_DATA);
+		CDBG("%s cur_line %x cur_fl_lines %x, exp_fl_lines %x\n",
+			__func__,
+			cur_line,
+			s_ctrl->msm_sensor_reg->
+			output_settings[res].frame_length_lines,
+			exp_fl_lines);
+	}
+	return 0;
+}
+
 int32_t msm_sensor_write_init_settings(struct msm_sensor_ctrl_t *s_ctrl)
 {
 	int32_t rc;
diff --git a/drivers/media/video/msm/sensors/msm_sensor.h b/drivers/media/video/msm/sensors/msm_sensor.h
index b1e584d..a3ddaa7 100644
--- a/drivers/media/video/msm/sensors/msm_sensor.h
+++ b/drivers/media/video/msm/sensors/msm_sensor.h
@@ -236,7 +236,10 @@
 int32_t msm_sensor_write_output_settings(struct msm_sensor_ctrl_t *s_ctrl,
 	uint16_t res);
 
-int32_t msm_sensor_adjust_frame_lines(struct msm_sensor_ctrl_t *s_ctrl,
+int32_t msm_sensor_adjust_frame_lines1(struct msm_sensor_ctrl_t *s_ctrl,
+	uint16_t res);
+
+int32_t msm_sensor_adjust_frame_lines2(struct msm_sensor_ctrl_t *s_ctrl,
 	uint16_t res);
 
 int32_t msm_sensor_setting(struct msm_sensor_ctrl_t *s_ctrl,
diff --git a/drivers/media/video/msm/sensors/ov2720.c b/drivers/media/video/msm/sensors/ov2720.c
index 03f1af1..e4c5061 100644
--- a/drivers/media/video/msm/sensors/ov2720.c
+++ b/drivers/media/video/msm/sensors/ov2720.c
@@ -783,7 +783,7 @@
 	.sensor_config = msm_sensor_config,
 	.sensor_power_up = msm_sensor_power_up,
 	.sensor_power_down = msm_sensor_power_down,
-	.sensor_adjust_frame_lines = msm_sensor_adjust_frame_lines,
+	.sensor_adjust_frame_lines = msm_sensor_adjust_frame_lines2,
 	.sensor_get_csi_params = msm_sensor_get_csi_params,
 };
 
diff --git a/drivers/media/video/msm/sensors/s5k3l1yx.c b/drivers/media/video/msm/sensors/s5k3l1yx.c
index d7aeb74..c24da00 100644
--- a/drivers/media/video/msm/sensors/s5k3l1yx.c
+++ b/drivers/media/video/msm/sensors/s5k3l1yx.c
@@ -652,7 +652,7 @@
 	.sensor_config = msm_sensor_config,
 	.sensor_power_up = msm_sensor_power_up,
 	.sensor_power_down = msm_sensor_power_down,
-	.sensor_adjust_frame_lines = msm_sensor_adjust_frame_lines,
+	.sensor_adjust_frame_lines = msm_sensor_adjust_frame_lines1,
 	.sensor_get_csi_params = msm_sensor_get_csi_params,
 };
 
diff --git a/drivers/media/video/msm/server/msm_cam_server.c b/drivers/media/video/msm/server/msm_cam_server.c
index dfa7fbe..05f3c4a 100644
--- a/drivers/media/video/msm/server/msm_cam_server.c
+++ b/drivers/media/video/msm/server/msm_cam_server.c
@@ -133,6 +133,15 @@
 	return NULL;
 }
 
+static void msm_cam_server_send_error_evt(int evt_type)
+{
+	struct v4l2_event v4l2_ev;
+	v4l2_ev.id = 0;
+	v4l2_ev.type = evt_type;
+	ktime_get_ts(&v4l2_ev.timestamp);
+	v4l2_event_queue(g_server_dev.pcam_active->pvdev, &v4l2_ev);
+}
+
 static int msm_ctrl_cmd_done(void *arg)
 {
 	void __user *uptr;
@@ -637,6 +646,7 @@
 		__func__, tmp_cmd.type, (uint32_t)tmp_cmd.value,
 		tmp_cmd.length, tmp_cmd.status, rc);
 	kfree(ctrl_data);
+	ctrl_data = NULL;
 	return rc;
 }
 
@@ -899,6 +909,12 @@
 	/*for single VFE msms (8660, 8960v1), just populate the session
 	with our VFE devices that registered*/
 	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+	if (pmctl == NULL) {
+		pr_err("%s: cannot find mctl\n", __func__);
+		msm_mctl_free(pcam);
+		atomic_dec(&ps->number_pcam_active);
+		return -ENODEV;
+	}
 	pmctl->axi_sdev = ps->axi_device[0];
 	pmctl->isp_sdev = ps->isp_subdev[0];
 	return rc;
@@ -1166,7 +1182,6 @@
 	if (g_server_dev.use_count == 0) {
 		mutex_lock(&g_server_dev.server_lock);
 		if (g_server_dev.pcam_active) {
-			struct v4l2_event v4l2_ev;
 			struct msm_cam_media_controller *pmctl = NULL;
 			int rc;
 
@@ -1179,13 +1194,8 @@
 				/*so that it isn't closed again*/
 				pmctl->mctl_release = NULL;
 			}
-
-			v4l2_ev.id = 0;

-			v4l2_ev.type = V4L2_EVENT_PRIVATE_START
-				+ MSM_CAM_APP_NOTIFY_ERROR_EVENT;
-			ktime_get_ts(&v4l2_ev.timestamp);
-			v4l2_event_queue(
-				g_server_dev.pcam_active->pvdev, &v4l2_ev);
+			msm_cam_server_send_error_evt(V4L2_EVENT_PRIVATE_START
+				+ MSM_CAM_APP_NOTIFY_ERROR_EVENT);
 		}
 		sub.type = V4L2_EVENT_ALL;
 		msm_server_v4l2_unsubscribe_event(
@@ -1430,6 +1440,11 @@
 		rc = v4l2_subdev_call(g_server_dev.gesture_device,
 			core, ioctl, VIDIOC_MSM_GESTURE_CAM_EVT, arg);
 		break;
+	case NOTIFY_VFE_CAMIF_ERROR: {
+		msm_cam_server_send_error_evt(V4L2_EVENT_PRIVATE_START
+			+ MSM_CAM_APP_NOTIFY_ERROR_EVENT);
+		break;
+	}
 	default:
 		break;
 	}
@@ -1888,8 +1903,17 @@
 	case GESTURE_DEV:
 		g_server_dev.gesture_device = sd;
 		break;
+
 	case IRQ_ROUTER_DEV:
 		g_server_dev.irqr_device = sd;
+
+	case CPP_DEV:
+		if (index >= MAX_NUM_CPP_DEV) {
+			pr_err("%s Invalid CPP idx %d", __func__, index);
+			err = -EINVAL;
+			break;
+		}
+		g_server_dev.cpp_device[index] = sd;
 		break;
 	default:
 		break;
@@ -2019,7 +2043,7 @@
 	}
 
 	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
-	if (!pmctl->mctl_open) {
+	if (!pmctl || !pmctl->mctl_open) {
 		D("%s: media contoller is not inited\n",
 			 __func__);
 		rc = -ENODEV;
@@ -2297,7 +2321,10 @@
 	/* assume there is only one active camera possible*/
 	config_cam->p_mctl =
 		msm_cam_server_get_mctl(g_server_dev.pcam_active->mctl_handle);
-
+	if (!config_cam->p_mctl) {
+		pr_err("%s: cannot find mctl\n", __func__);
+		return -ENODEV;
+	}
 	INIT_HLIST_HEAD(&config_cam->p_mctl->stats_info.pmem_stats_list);
 	spin_lock_init(&config_cam->p_mctl->stats_info.pmem_stats_spinlock);
 
@@ -2387,6 +2414,7 @@
 		/* Next, copy the userspace event ctrl structure */
 		if (copy_from_user((void *)&u_isp_event, user_ptr,
 				   sizeof(struct msm_isp_event_ctrl))) {
+			rc = -EFAULT;
 			break;
 		}
 		/* Save the pointer of the user allocated command buffer*/
@@ -2398,6 +2426,7 @@
 			&ev, fp->f_flags & O_NONBLOCK);
 		if (rc < 0) {
 			pr_err("no pending events?");
+			rc = -EFAULT;
 			break;
 		}
 		/* Use k_isp_event to point to the event_ctrl structure
@@ -2427,6 +2456,7 @@
 						break;
 					}
 					kfree(k_msg_value);
+					k_msg_value = NULL;
 				}
 			}
 		}
@@ -2439,6 +2469,7 @@
 			break;
 		}
 		kfree(k_isp_event);
+		k_isp_event = NULL;
 
 		/* Copy the v4l2_event structure back to the user*/
 		if (copy_to_user((void __user *)arg, &ev,
diff --git a/drivers/media/video/msm/wfd/wfd-ioctl.c b/drivers/media/video/msm/wfd/wfd-ioctl.c
index 68a8a7d..c198815 100644
--- a/drivers/media/video/msm/wfd/wfd-ioctl.c
+++ b/drivers/media/video/msm/wfd/wfd-ioctl.c
@@ -288,7 +288,7 @@
 		mdp_mregion->ion_handle = enc_mregion->ion_handle;
 
 		rc = ion_map_iommu(wfd_dev->ion_client, mdp_mregion->ion_handle,
-				DISPLAY_WRITE_DOMAIN, GEN_POOL, SZ_4K,
+				DISPLAY_DOMAIN, GEN_POOL, SZ_4K,
 				0, (unsigned long *)&mdp_mregion->paddr,
 				(unsigned long *)&mdp_mregion->size, 0, 0);
 		if (rc) {
@@ -363,7 +363,7 @@
 			if (mpair->mdp->paddr)
 				ion_unmap_iommu(wfd_dev->ion_client,
 						mpair->mdp->ion_handle,
-						DISPLAY_WRITE_DOMAIN, GEN_POOL);
+						DISPLAY_DOMAIN, GEN_POOL);
 
 			if (mpair->enc->paddr)
 				ion_unmap_iommu(wfd_dev->ion_client,
diff --git a/drivers/media/video/msm_vidc/Kconfig b/drivers/media/video/msm_vidc/Kconfig
index 0b5a5fe..2957d45 100644
--- a/drivers/media/video/msm_vidc/Kconfig
+++ b/drivers/media/video/msm_vidc/Kconfig
@@ -4,5 +4,5 @@
 
 menuconfig MSM_VIDC
 	bool "Qualcomm MSM Video Core Driver"
-		depends on ARCH_MSMCOPPER && VIDEO_V4L2
+		depends on ARCH_MSM8974 && VIDEO_V4L2
 		default y
diff --git a/drivers/media/video/msm_vidc/msm_smem.c b/drivers/media/video/msm_vidc/msm_smem.c
index bdace3c..2913d74 100644
--- a/drivers/media/video/msm_vidc/msm_smem.c
+++ b/drivers/media/video/msm_vidc/msm_smem.c
@@ -26,7 +26,7 @@
 	unsigned long ionflag;
 	size_t len;
 	int rc = 0;
-	hndl = ion_import_fd(client->clnt, fd);
+	hndl = ion_import_dma_buf(client->clnt, fd);
 	if (IS_ERR_OR_NULL(hndl)) {
 		pr_err("Failed to get handle: %p, %d, %d, %p\n",
 				client, fd, offset, hndl);
diff --git a/drivers/media/video/msm_vidc/msm_vdec.c b/drivers/media/video/msm_vidc/msm_vdec.c
index 0c26027..09c7215 100644
--- a/drivers/media/video/msm_vidc/msm_vdec.c
+++ b/drivers/media/video/msm_vidc/msm_vdec.c
@@ -202,6 +202,14 @@
 		.type = OUTPUT_PORT,
 	},
 	{
+		.name = "VC1",
+		.description = "VC-1 compressed format",
+		.fourcc = V4L2_PIX_FMT_VC1_ANNEX_G,
+		.num_planes = 1,
+		.get_frame_size = get_frame_size_compressed,
+		.type = OUTPUT_PORT,
+	},
+	{
 		.name = "H264",
 		.description = "H264 compressed format",
 		.fourcc = V4L2_PIX_FMT_H264,
diff --git a/drivers/media/video/msm_vidc/msm_venc.c b/drivers/media/video/msm_vidc/msm_venc.c
index ec93628..e835aaa 100644
--- a/drivers/media/video/msm_vidc/msm_venc.c
+++ b/drivers/media/video/msm_vidc/msm_venc.c
@@ -23,7 +23,7 @@
 #define MIN_NUM_OUTPUT_BUFFERS 2
 #define MAX_NUM_OUTPUT_BUFFERS 8
 #define MIN_BIT_RATE 64
-#define MAX_BIT_RATE 8000
+#define MAX_BIT_RATE 20000
 #define DEFAULT_BIT_RATE 64
 #define BIT_RATE_STEP 1
 #define MIN_FRAME_RATE 1
@@ -37,6 +37,7 @@
 #define B_FRAME_QP 30
 #define MAX_INTRA_REFRESH_MBS 300
 #define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
+#define CODING V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY
 
 static const char *const mpeg_video_rate_control[] = {
 	"No Rate Control",
@@ -79,7 +80,7 @@
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = 0,
 		.maximum = 10*MAX_FRAME_RATE,
-		.default_value = 0,
+		.default_value = DEFAULT_FRAME_RATE,
 		.step = 1,
 		.menu_skip_mask = 0,
 		.qmenu = NULL,
@@ -174,6 +175,26 @@
 		.qmenu = h264_video_entropy_cabac_model,
 	},
 	{
+		.id = V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
+		.name = "MPEG4 Profile",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
+		.maximum = CODING,
+		.default_value = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
+		.step = 1,
+		.menu_skip_mask = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
+		.name = "MPEG4 Level",
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_MPEG4_LEVEL_0,
+		.maximum = V4L2_MPEG_VIDEO_MPEG4_LEVEL_5,
+		.default_value = V4L2_MPEG_VIDEO_MPEG4_LEVEL_0,
+		.step = 1,
+		.menu_skip_mask = 0,
+	},
+	{
 		.id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
 		.name = "H264 Profile",
 		.type = V4L2_CTRL_TYPE_MENU,
@@ -385,14 +406,17 @@
 static struct hal_intra_period
 	venc_intra_period = {2*DEFAULT_FRAME_RATE-1 , 0};
 static struct hal_profile_level
-	venc_profile_level = {V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
-				V4L2_MPEG_VIDEO_H264_LEVEL_1_0};
+	venc_h264_profile_level = {HAL_H264_PROFILE_BASELINE,
+		HAL_H264_LEVEL_1};
+static struct hal_profile_level
+	venc_mpeg4_profile_level = {HAL_H264_PROFILE_BASELINE,
+		HAL_H264_LEVEL_1};
 static struct hal_h264_entropy_control
-	venc_h264_entropy_control = {V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
-				V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_0};
+	venc_h264_entropy_control = {HAL_H264_ENTROPY_CAVLC,
+		HAL_H264_CABAC_MODEL_0};
 static struct hal_multi_slice_control
-	venc_multi_slice_control = {V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE ,
-				0};
+	venc_multi_slice_control = {HAL_MULTI_SLICE_OFF ,
+		0};
 
 static const struct msm_vidc_format venc_formats[] = {
 	{
@@ -477,7 +501,14 @@
 		rc = vidc_hal_session_set_property((void *)inst->session,
 				HAL_PARAM_FRAME_SIZE, &frame_sz);
 		if (rc) {
-			pr_err("Failed to set hal property for framesize\n");
+			pr_err("Failed to set framesize for Output port\n");
+			break;
+		}
+		frame_sz.buffer_type = HAL_BUFFER_OUTPUT;
+		rc = vidc_hal_session_set_property((void *)inst->session,
+				HAL_PARAM_FRAME_SIZE, &frame_sz);
+		if (rc) {
+			pr_err("Failed to set framesize for Capture port\n");
 			break;
 		}
 		rc = msm_comm_try_get_bufreqs(inst);
@@ -710,6 +741,57 @@
 			venc_h264_entropy_control.entropy_mode;
 		pdata = &h264_entropy_control;
 		break;
+	case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+		property_id =
+			HAL_PARAM_PROFILE_LEVEL_CURRENT;
+		switch (control.value) {
+		case V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE:
+			control.value = HAL_MPEG4_PROFILE_SIMPLE;
+			break;
+		case V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE:
+			control.value = HAL_MPEG4_PROFILE_ADVANCEDSIMPLE;
+			break;
+		default:
+			break;
+			}
+		profile_level.profile = control.value;
+		venc_mpeg4_profile_level.profile = control.value;
+		profile_level.level = venc_mpeg4_profile_level.level;
+		pdata = &profile_level;
+		break;
+	case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+		property_id =
+			HAL_PARAM_PROFILE_LEVEL_CURRENT;
+		switch (control.value) {
+		case V4L2_MPEG_VIDEO_MPEG4_LEVEL_0:
+			control.value = HAL_MPEG4_LEVEL_0;
+			break;
+		case V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B:
+			control.value = HAL_MPEG4_LEVEL_0b;
+			break;
+		case V4L2_MPEG_VIDEO_MPEG4_LEVEL_1:
+			control.value = HAL_MPEG4_LEVEL_1;
+			break;
+		case V4L2_MPEG_VIDEO_MPEG4_LEVEL_2:
+			control.value = HAL_MPEG4_LEVEL_2;
+			break;
+		case V4L2_MPEG_VIDEO_MPEG4_LEVEL_3:
+			control.value = HAL_MPEG4_LEVEL_3;
+			break;
+		case V4L2_MPEG_VIDEO_MPEG4_LEVEL_4:
+			control.value = HAL_MPEG4_LEVEL_4;
+			break;
+		case V4L2_MPEG_VIDEO_MPEG4_LEVEL_5:
+			control.value = HAL_MPEG4_LEVEL_5;
+			break;
+		default:
+			break;
+		}
+		profile_level.level = control.value;
+		venc_mpeg4_profile_level.level = control.value;
+		profile_level.profile = venc_mpeg4_profile_level.profile;
+		pdata = &profile_level;
+		break;
 	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
 		property_id =
 			HAL_PARAM_PROFILE_LEVEL_CURRENT;
@@ -740,9 +822,11 @@
 			break;
 			}
 		profile_level.profile = control.value;
-		venc_profile_level.profile = control.value;
-		profile_level.level = venc_profile_level.level;
+		venc_h264_profile_level.profile = control.value;
+		profile_level.level = venc_h264_profile_level.level;
 		pdata = &profile_level;
+		pr_debug("\nprofile: %d\n",
+			   profile_level.profile);
 		break;
 	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
 		property_id =
@@ -801,9 +885,12 @@
 			break;
 		}
 		profile_level.level = control.value;
-		venc_profile_level.level = control.value;
-		profile_level.profile = venc_profile_level.profile;
+		venc_h264_profile_level.level = control.value;
+		profile_level.profile = venc_h264_profile_level.profile;
 		pdata = &profile_level;
+		pdata = &profile_level;
+		pr_debug("\nLevel: %d\n",
+			   profile_level.level);
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION:
 		property_id =
@@ -895,7 +982,7 @@
 	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
 		property_id =
 			HAL_PARAM_VENC_H264_DEBLOCK_CONTROL;
-		h264_db_control.slicebeta_offset = control.value;
+		h264_db_control.slice_beta_offset = control.value;
 		pdata = &h264_db_control;
 	default:
 		break;
@@ -1164,7 +1251,7 @@
 	}
 	rc = vb2_dqbuf(q, b, true);
 	if (rc)
-		pr_err("Failed to qbuf, %d\n", rc);
+		pr_err("Failed to dqbuf, %d\n", rc);
 	return rc;
 }
 
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.c b/drivers/media/video/msm_vidc/msm_vidc_common.c
index 9b617aa..a5cff9c 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.c
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.c
@@ -25,6 +25,11 @@
 	__rc; \
 })
 
+#define V4L2_EVENT_SEQ_CHANGED_SUFFICIENT \
+		V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED_SUFFICIENT
+#define V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT \
+		V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED_INSUFFICIENT
+
 struct msm_vidc_core *get_vidc_core(int core_id)
 {
 	struct msm_vidc_core *core;
@@ -199,9 +204,20 @@
 	struct msm_vidc_cb_event *event_notify;
 	if (response) {
 		inst = (struct msm_vidc_inst *)response->session_id;
-		dqevent.type = V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED;
 		dqevent.id = 0;
 		event_notify = (struct msm_vidc_cb_event *) response->data;
+		switch (event_notify->hal_event_type) {
+		case HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES:
+			dqevent.type =
+				V4L2_EVENT_SEQ_CHANGED_SUFFICIENT;
+			break;
+		case HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES:
+			dqevent.type =
+				V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
+			break;
+		default:
+			break;
+		}
 		inst->reconfig_height = event_notify->height;
 		inst->reconfig_width = event_notify->width;
 		inst->in_reconfig = true;
@@ -813,7 +829,7 @@
 		if (rc || state == inst->state)
 			break;
 	default:
-		pr_err("State not recognized\n");
+		pr_err("State not recognized: %d\n", flipped_state);
 		rc = -EINVAL;
 		break;
 	}
@@ -855,6 +871,7 @@
 		frame_data.alloc_len = vb->v4l2_planes[0].length;
 		frame_data.filled_len = vb->v4l2_planes[0].bytesused;
 		frame_data.device_addr = vb->v4l2_planes[0].m.userptr;
+		frame_data.timestamp = vb->v4l2_buf.timestamp.tv_usec;
 		frame_data.clnt_data = (u32)vb;
 		if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 			frame_data.buffer_type = HAL_BUFFER_INPUT;
@@ -871,10 +888,8 @@
 			frame_data.filled_len = 0;
 			frame_data.buffer_type = HAL_BUFFER_OUTPUT;
 			frame_data.extradata_addr = 0;
-			pr_debug("Sending ftb to hal...: Alloc: %d :filled: %d"
-				" extradata_addr: %d\n", frame_data.alloc_len,
-				   frame_data.filled_len,
-				   frame_data.extradata_addr);
+			pr_debug("Sending ftb to hal..: Alloc: %d :filled: %d\n",
+				frame_data.alloc_len, frame_data.filled_len);
 			rc = vidc_hal_session_ftb((void *) inst->session,
 					&frame_data);
 		} else {
diff --git a/drivers/media/video/msm_vidc/vidc_hal.c b/drivers/media/video/msm_vidc/vidc_hal.c
index 13a319d9..85e984d 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.c
+++ b/drivers/media/video/msm_vidc/vidc_hal.c
@@ -23,7 +23,7 @@
 #define REG_ADDR_OFFSET_BITMASK	0x000FFFFF
 
 /*Workaround for virtio */
-#define HFI_VIRTIO_FW_BIAS		0x34f00000
+#define HFI_VIRTIO_FW_BIAS		0x14f00000
 
 struct hal_device_data hal_ctxt;
 
@@ -40,7 +40,7 @@
 
 	sys_init = (struct hfi_cmd_sys_session_init_packet *)packet;
 	sess = (struct hal_session *) sys_init->session_id;
-	switch (sys_init->packet) {
+	switch (sys_init->packet_type) {
 	case HFI_CMD_SESSION_EMPTY_BUFFER:
 		if (sess->is_decoder) {
 			struct hfi_cmd_session_empty_buffer_compressed_packet
@@ -73,7 +73,7 @@
 			struct hfi_buffer_info *buff;
 			buff = (struct hfi_buffer_info *) pkt->rg_buffer_info;
 			buff->buffer_addr -= HFI_VIRTIO_FW_BIAS;
-			buff->extradata_addr -= HFI_VIRTIO_FW_BIAS;
+			buff->extra_data_addr -= HFI_VIRTIO_FW_BIAS;
 		} else {
 			for (i = 0; i < pkt->num_buffers; i++)
 				pkt->rg_buffer_info[i] -= HFI_VIRTIO_FW_BIAS;
@@ -89,7 +89,7 @@
 			struct hfi_buffer_info *buff;
 			buff = (struct hfi_buffer_info *) pkt->rg_buffer_info;
 			buff->buffer_addr -= HFI_VIRTIO_FW_BIAS;
-			buff->extradata_addr -= HFI_VIRTIO_FW_BIAS;
+			buff->extra_data_addr -= HFI_VIRTIO_FW_BIAS;
 		} else {
 			for (i = 0; i < pkt->num_buffers; i++)
 				pkt->rg_buffer_info[i] -= HFI_VIRTIO_FW_BIAS;
@@ -640,7 +640,8 @@
 		goto err_no_dev;
 	}
 	pkt.size = sizeof(struct hfi_cmd_sys_init_packet);
-	pkt.packet = HFI_CMD_SYS_INIT;
+	pkt.packet_type = HFI_CMD_SYS_INIT;
+	pkt.arch_type = HFI_ARCH_OX_OFFSET;
 	if (vidc_hal_iface_cmdq_write(dev, &pkt)) {
 		rc = -ENOTEMPTY;
 		goto err_write_fail;
@@ -664,8 +665,6 @@
 	}
 	write_register(dev->hal_data->register_base_addr,
 		VIDC_CPU_CS_SCIACMDARG3, 0, 0);
-	disable_irq_nosync(dev->hal_data->irq);
-	vidc_hal_interface_queues_release(dev);
 	HAL_MSG_INFO("\nHAL exited\n");
 	return 0;
 }
@@ -742,8 +741,8 @@
 	switch (resource_hdr->resource_id) {
 	case VIDC_RESOURCE_OCMEM:
 	{
-		struct hfi_resource_ocmem_type *hfioc_mem =
-			(struct hfi_resource_ocmem_type *)
+		struct hfi_resource_ocmem *hfioc_mem =
+			(struct hfi_resource_ocmem *)
 			&pkt->rg_resource_data[0];
 		struct vidc_mem_addr *vidc_oc_mem =
 			(struct vidc_mem_addr *) resource_value;
@@ -751,7 +750,7 @@
 		pkt->resource_type = HFI_RESOURCE_OCMEM;
 		hfioc_mem->size = (u32) vidc_oc_mem->mem_size;
 		hfioc_mem->mem = (u8 *) vidc_oc_mem->align_device_addr;
-		pkt->size += sizeof(struct hfi_resource_ocmem_type);
+		pkt->size += sizeof(struct hfi_resource_ocmem);
 		if (vidc_hal_iface_cmdq_write(dev, pkt))
 			rc = -ENOTEMPTY;
 		break;
@@ -807,7 +806,41 @@
 		rc = -ENOTEMPTY;
 	return rc;
 }
-
+static u32 get_hfi_buffer(int hal_buffer)
+{
+	u32 buffer;
+	switch (hal_buffer) {
+	case HAL_BUFFER_INPUT:
+		buffer = HFI_BUFFER_INPUT;
+		break;
+	case HAL_BUFFER_OUTPUT:
+		buffer = HFI_BUFFER_OUTPUT;
+		break;
+	case HAL_BUFFER_OUTPUT2:
+		buffer = HFI_BUFFER_OUTPUT;
+		break;
+	case HAL_BUFFER_EXTRADATA_INPUT:
+		buffer = HFI_BUFFER_EXTRADATA_INPUT;
+		break;
+	case HAL_BUFFER_EXTRADATA_OUTPUT:
+		buffer = HFI_BUFFER_EXTRADATA_OUTPUT;
+		break;
+	case HAL_BUFFER_EXTRADATA_OUTPUT2:
+		buffer = HFI_BUFFER_EXTRADATA_OUTPUT2;
+		break;
+	case HAL_BUFFER_INTERNAL_SCRATCH:
+		buffer = HFI_BUFFER_INTERNAL_SCRATCH;
+		break;
+	case HAL_BUFFER_INTERNAL_PERSIST:
+		buffer = HFI_BUFFER_INTERNAL_PERSIST;
+		break;
+	default:
+		HAL_MSG_ERROR("Invalid buffer type : 0x%x\n", hal_buffer);
+		buffer = 0;
+		break;
+	}
+	return buffer;
+}
 int vidc_hal_session_set_property(void *sess,
 	enum hal_property ptype, void *pdata)
 {
@@ -832,24 +865,37 @@
 	switch (ptype) {
 	case HAL_CONFIG_FRAME_RATE:
 	{
-		struct hfi_frame_rate *hfi_fps;
+		struct hfi_frame_rate *hfi;
+		u32 buffer;
+		struct hal_frame_rate *prop =
+			(struct hal_frame_rate *) pdata;
 		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_FRAME_RATE;
-		hfi_fps = (struct hfi_frame_rate *) &pkt->rg_property_data[1];
-		memcpy(hfi_fps, (struct hfi_frame_rate *)
-			pdata, sizeof(struct hfi_frame_rate));
+		hfi = (struct hfi_frame_rate *) &pkt->rg_property_data[1];
+		buffer = get_hfi_buffer(prop->buffer_type);
+		if (buffer)
+			hfi->buffer_type = buffer;
+		else
+			return -EINVAL;
+		hfi->frame_rate = prop->frame_rate;
 		pkt->size += sizeof(u32) + sizeof(struct hfi_frame_rate);
 		break;
 	}
 	case HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT:
 	{
-		struct hfi_uncompressed_format_select *hfi_buf_fmt;
+		u32 buffer;
+		struct hfi_uncompressed_format_select *hfi;
+		struct hal_uncompressed_format_select *prop =
+			(struct hal_uncompressed_format_select *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
-		hfi_buf_fmt =
-		(struct hfi_uncompressed_format_select *)
-		&pkt->rg_property_data[1];
-		memcpy(hfi_buf_fmt, (struct hfi_uncompressed_format_select *)
-			pdata, sizeof(struct hfi_uncompressed_format_select));
+		hfi = (struct hfi_uncompressed_format_select *)
+			&pkt->rg_property_data[1];
+		buffer = get_hfi_buffer(prop->buffer_type);
+		if (buffer)
+			hfi->buffer_type = buffer;
+		else
+			return -EINVAL;
+		hfi->format = prop->format;
 		pkt->size += sizeof(u32) + sizeof(struct
 			hfi_uncompressed_format_select);
 		break;
@@ -862,11 +908,18 @@
 		break;
 	case HAL_PARAM_FRAME_SIZE:
 	{
-		struct hfi_frame_size *hfi_rect;
+		u32 buffer;
+		struct hfi_frame_size *hfi;
+		struct hal_frame_size *prop = (struct hal_frame_size *) pdata;
 		pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_FRAME_SIZE;
-		hfi_rect = (struct hfi_frame_size *) &pkt->rg_property_data[1];
-		memcpy(hfi_rect, (struct hfi_frame_size *) pdata,
-			sizeof(struct hfi_frame_size));
+		hfi = (struct hfi_frame_size *) &pkt->rg_property_data[1];
+		buffer = get_hfi_buffer(prop->buffer_type);
+		if (buffer)
+			hfi->buffer_type = buffer;
+		else
+			return -EINVAL;
+		hfi->height = prop->height;
+		hfi->width = prop->width;
 		pkt->size += sizeof(u32) + sizeof(struct hfi_frame_size);
 		break;
 	}
@@ -875,38 +928,85 @@
 		struct hfi_enable *hfi;
 		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_REALTIME;
 		hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_enable *) pdata,
-				sizeof(struct hfi_enable));
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		hfi->enable = ((struct hfi_enable *) pdata)->enable;
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_BUFFER_COUNT_ACTUAL:
 	{
+		u32 buffer;
 		struct hfi_buffer_count_actual *hfi;
+		struct hal_buffer_count_actual *prop =
+			(struct hal_buffer_count_actual *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
 		hfi = (struct hfi_buffer_count_actual *)
 				&pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_buffer_count_actual *) pdata,
-			sizeof(struct hfi_buffer_count_actual));
+		hfi->buffer_count_actual = prop->buffer_count_actual;
+		buffer = get_hfi_buffer(prop->buffer_type);
+		if (buffer)
+			hfi->buffer_type = buffer;
+		else
+			return -EINVAL;
 		pkt->size += sizeof(u32) + sizeof(struct
 					hfi_buffer_count_actual);
 		break;
 	}
 	case HAL_PARAM_NAL_STREAM_FORMAT_SELECT:
 	{
+		struct hal_nal_stream_format_supported *prop =
+			(struct hal_nal_stream_format_supported *)pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT;
-		pkt->rg_property_data[1] = (enum HFI_NAL_STREAM_FORMAT)pdata;
-		pkt->size += sizeof(u32) + sizeof(enum HFI_NAL_STREAM_FORMAT);
+		HAL_MSG_ERROR("\ndata is :%d",
+				prop->nal_stream_format_supported);
+		switch (prop->nal_stream_format_supported) {
+		case HAL_NAL_FORMAT_STARTCODES:
+			pkt->rg_property_data[1] =
+				HFI_NAL_FORMAT_STARTCODES;
+			break;
+		case HAL_NAL_FORMAT_ONE_NAL_PER_BUFFER:
+			pkt->rg_property_data[1] =
+				HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER;
+			break;
+		case HAL_NAL_FORMAT_ONE_BYTE_LENGTH:
+			pkt->rg_property_data[1] =
+				HFI_NAL_FORMAT_ONE_BYTE_LENGTH;
+			break;
+		case HAL_NAL_FORMAT_TWO_BYTE_LENGTH:
+			pkt->rg_property_data[1] =
+				HFI_NAL_FORMAT_TWO_BYTE_LENGTH;
+			break;
+		case HAL_NAL_FORMAT_FOUR_BYTE_LENGTH:
+			pkt->rg_property_data[1] =
+				HFI_NAL_FORMAT_FOUR_BYTE_LENGTH;
+			break;
+		default:
+			HAL_MSG_ERROR("Invalid nal format: 0x%x",
+				  prop->nal_stream_format_supported);
+			break;
+		}
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_VDEC_OUTPUT_ORDER:
 	{
+		int *data = (int *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER;
-		pkt->rg_property_data[1] = (enum HFI_OUTPUT_ORDER)pdata;
-		pkt->size += sizeof(u32) + sizeof(enum HFI_OUTPUT_ORDER);
+		switch (*data) {
+		case HAL_OUTPUT_ORDER_DECODE:
+			pkt->rg_property_data[1] = HFI_OUTPUT_ORDER_DECODE;
+			break;
+		case HAL_OUTPUT_ORDER_DISPLAY:
+			pkt->rg_property_data[1] = HFI_OUTPUT_ORDER_DISPLAY;
+			break;
+		default:
+			HAL_MSG_ERROR("invalid output order: 0x%x",
+						  *data);
+			break;
+		}
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_VDEC_PICTURE_TYPE_DECODE:
@@ -916,7 +1016,7 @@
 			HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE;
 		hfi = (struct hfi_enable_picture *) &pkt->rg_property_data[1];
 		hfi->picture_type = (u32) pdata;
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable_picture);
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO:
@@ -925,9 +1025,8 @@
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO;
 		hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_enable *) pdata,
-				sizeof(struct hfi_enable));
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		hfi->enable = ((struct hfi_enable *) pdata)->enable;
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
@@ -936,41 +1035,64 @@
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER;
 		hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_enable *) pdata,
-				sizeof(struct hfi_enable));
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		hfi->enable = ((struct hfi_enable *) pdata)->enable;
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_VDEC_MULTI_STREAM:
 	{
+		u32 buffer;
 		struct hfi_multi_stream *hfi;
+		struct hal_multi_stream *prop =
+			(struct hal_multi_stream *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
 		hfi = (struct hfi_multi_stream *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_multi_stream *)pdata,
-				sizeof(struct hfi_multi_stream));
+		buffer = get_hfi_buffer(prop->buffer_type);
+		if (buffer)
+			hfi->buffer_type = buffer;
+		else
+			return -EINVAL;
+		hfi->enable = prop->enable;
+		hfi->width = prop->width;
+		hfi->height = prop->height;
 		pkt->size += sizeof(u32) + sizeof(struct hfi_multi_stream);
 		break;
 	}
 	case HAL_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT:
 	{
-		struct hfi_display_picture_buffer_count *hfi_disp_buf;
+		struct hfi_display_picture_buffer_count *hfi;
+		struct hal_display_picture_buffer_count *prop =
+			(struct hal_display_picture_buffer_count *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT;
-		hfi_disp_buf = (struct hfi_display_picture_buffer_count *)
+		hfi = (struct hfi_display_picture_buffer_count *)
 			&pkt->rg_property_data[1];
-		memcpy(hfi_disp_buf,
-			(struct hfi_display_picture_buffer_count *)pdata,
-			sizeof(struct hfi_display_picture_buffer_count));
+		hfi->count = prop->count;
+		hfi->enable = prop->enable;
 		pkt->size += sizeof(u32) +
 			sizeof(struct hfi_display_picture_buffer_count);
 		break;
 	}
 	case HAL_PARAM_DIVX_FORMAT:
 	{
+		int *data = pdata;
 		pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_DIVX_FORMAT;
-		pkt->rg_property_data[1] = (enum HFI_DIVX_FORMAT)pdata;
-		pkt->size += sizeof(u32) + sizeof(enum HFI_DIVX_FORMAT);
+		switch (*data) {
+		case HAL_DIVX_FORMAT_4:
+			pkt->rg_property_data[1] = HFI_DIVX_FORMAT_4;
+			break;
+		case HAL_DIVX_FORMAT_5:
+			pkt->rg_property_data[1] = HFI_DIVX_FORMAT_5;
+			break;
+		case HAL_DIVX_FORMAT_6:
+			pkt->rg_property_data[1] = HFI_DIVX_FORMAT_6;
+			break;
+		default:
+			HAL_MSG_ERROR("Invalid divx format: 0x%x", *data);
+			break;
+		}
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING:
@@ -979,25 +1101,23 @@
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING;
 		hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_enable *) pdata,
-				sizeof(struct hfi_enable));
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		hfi->enable = ((struct hfi_enable *) pdata)->enable;
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER:
 	{
-		struct hfi_enable *enable;
+		struct hfi_enable *hfi;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER;
-		enable = (struct hfi_enable *) &pkt->rg_property_data[1];
-		memcpy(enable, (struct hfi_enable *) pdata,
-				sizeof(struct hfi_enable));
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
+		hfi->enable = ((struct hfi_enable *) pdata)->enable;
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_CONFIG_VENC_REQUEST_IFRAME:
 		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_REQUEST_IFRAME;
+			HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME;
 		break;
 	case HAL_PARAM_VENC_MPEG4_SHORT_HEADER:
 		break;
@@ -1009,31 +1129,64 @@
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE;
 		hfi = (struct hfi_bitrate *) &pkt->rg_property_data[1];
-		hfi->bit_rate = ((struct hfi_bitrate *)pdata)->bit_rate;
-		pkt->size += sizeof(u32) + sizeof(struct hfi_bitrate);
+		hfi->bit_rate = ((struct hal_bitrate *)pdata)->bit_rate;
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_PROFILE_LEVEL_CURRENT:
 	{
-		struct hfi_profile_level *hfi_profile_level;
+		struct hfi_profile_level *hfi;
+		struct hal_profile_level *prop =
+			(struct hal_profile_level *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
-		hfi_profile_level = (struct hfi_profile_level *)
-		&pkt->rg_property_data[1];
-		memcpy(hfi_profile_level, (struct hfi_profile_level *) pdata,
-			sizeof(struct hfi_profile_level));
+		hfi = (struct hfi_profile_level *)
+			&pkt->rg_property_data[1];
+		hfi->level = (u32) prop->level;
+		hfi->profile = prop->profile;
+		if (!hfi->profile)
+			hfi->profile = HFI_H264_PROFILE_HIGH;
+		if (!hfi->level)
+			hfi->level = 1;
 		pkt->size += sizeof(u32) + sizeof(struct hfi_profile_level);
 		break;
 	}
 	case HAL_PARAM_VENC_H264_ENTROPY_CONTROL:
 	{
 		struct hfi_h264_entropy_control *hfi;
+		struct hal_h264_entropy_control *prop =
+			(struct hal_h264_entropy_control *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL;
 		hfi = (struct hfi_h264_entropy_control *)
 			&pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_h264_entropy_control *) pdata,
-				sizeof(struct hfi_h264_entropy_control));
+		switch (prop->entropy_mode) {
+		case HAL_H264_ENTROPY_CAVLC:
+			hfi->cabac_model = HFI_H264_ENTROPY_CAVLC;
+			break;
+		case HAL_H264_ENTROPY_CABAC:
+			hfi->cabac_model = HFI_H264_ENTROPY_CABAC;
+			switch (prop->cabac_model) {
+			case HAL_H264_CABAC_MODEL_0:
+				hfi->cabac_model = HFI_H264_CABAC_MODEL_0;
+				break;
+			case HAL_H264_CABAC_MODEL_1:
+				hfi->cabac_model = HFI_H264_CABAC_MODEL_1;
+				break;
+			case HAL_H264_CABAC_MODEL_2:
+				hfi->cabac_model = HFI_H264_CABAC_MODEL_2;
+				break;
+			default:
+				HAL_MSG_ERROR("Invalid cabac model 0x%x",
+							  prop->entropy_mode);
+				break;
+			}
+		break;
+		default:
+			HAL_MSG_ERROR("Invalid entropy selected: 0x%x",
+				prop->cabac_model);
+			break;
+		}
 		pkt->size += sizeof(u32) + sizeof(
 			struct hfi_h264_entropy_control);
 		break;
@@ -1042,8 +1195,28 @@
 	{
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VENC_RATE_CONTROL;
-		pkt->rg_property_data[1] = (enum HFI_RATE_CONTROL)pdata;
-		pkt->size += sizeof(u32) + sizeof(enum HFI_RATE_CONTROL);
+		switch ((enum hal_rate_control)pdata) {
+		case HAL_RATE_CONTROL_OFF:
+		pkt->rg_property_data[1] = HFI_RATE_CONTROL_OFF;
+			break;
+		case HAL_RATE_CONTROL_CBR_CFR:
+		pkt->rg_property_data[1] = HFI_RATE_CONTROL_CBR_CFR;
+			break;
+		case HAL_RATE_CONTROL_CBR_VFR:
+		pkt->rg_property_data[1] = HFI_RATE_CONTROL_CBR_VFR;
+			break;
+		case HAL_RATE_CONTROL_VBR_CFR:
+		pkt->rg_property_data[1] = HFI_RATE_CONTROL_VBR_CFR;
+			break;
+		case HAL_RATE_CONTROL_VBR_VFR:
+		pkt->rg_property_data[1] = HFI_RATE_CONTROL_VBR_VFR;
+			break;
+		default:
+			HAL_MSG_ERROR("Invalid Rate control setting: 0x%x",
+						  (int) pdata);
+			break;
+		}
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_VENC_MPEG4_TIME_RESOLUTION:
@@ -1056,8 +1229,7 @@
 		hfi->time_increment_resolution =
 			((struct hal_mpeg4_time_resolution *)pdata)->
 					time_increment_resolution;
-		pkt->size += sizeof(u32) + sizeof(
-			struct hfi_mpeg4_time_resolution);
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_VENC_MPEG4_HEADER_EXTENSION:
@@ -1066,20 +1238,36 @@
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION;
 		hfi = (struct hfi_mpeg4_header_extension *)
-		&pkt->rg_property_data[1];
+			&pkt->rg_property_data[1];
 		hfi->header_extension = (u32) pdata;
-		pkt->size += sizeof(u32) +
-			sizeof(struct hfi_mpeg4_header_extension);
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_VENC_H264_DEBLOCK_CONTROL:
 	{
 		struct hfi_h264_db_control *hfi;
+		struct hal_h264_db_control *prop =
+			(struct hal_h264_db_control *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL;
 		hfi = (struct hfi_h264_db_control *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_h264_db_control *) pdata,
-				sizeof(struct hfi_h264_db_control));
+		switch (prop->mode) {
+		case HAL_H264_DB_MODE_DISABLE:
+			hfi->mode = HFI_H264_DB_MODE_DISABLE;
+			break;
+		case HAL_H264_DB_MODE_SKIP_SLICE_BOUNDARY:
+			hfi->mode = HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY;
+			break;
+		case HAL_H264_DB_MODE_ALL_BOUNDARY:
+			hfi->mode = HFI_H264_DB_MODE_ALL_BOUNDARY;
+			break;
+		default:
+			HAL_MSG_ERROR("Invalid deblocking mode: 0x%x",
+						  prop->mode);
+			break;
+		}
+		hfi->slice_alpha_offset = prop->slice_alpha_offset;
+		hfi->slice_beta_offset = prop->slice_beta_offset;
 		pkt->size += sizeof(u32) +
 			sizeof(struct hfi_h264_db_control);
 		break;
@@ -1090,11 +1278,10 @@
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF;
 		hfi = (struct hfi_temporal_spatial_tradeoff *)
-		&pkt->rg_property_data[1];
+			&pkt->rg_property_data[1];
 		hfi->ts_factor = ((struct hfi_temporal_spatial_tradeoff *)
 					pdata)->ts_factor;
-		pkt->size += sizeof(u32) +
-			sizeof(struct hfi_temporal_spatial_tradeoff);
+		pkt->size += sizeof(u32)  * 2;
 		break;
 	}
 	case HAL_PARAM_VENC_SESSION_QP:
@@ -1125,7 +1312,7 @@
 		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD;
 		hfi = (struct hfi_idr_period *) &pkt->rg_property_data[1];
 		hfi->idr_period = ((struct hfi_idr_period *) pdata)->idr_period;
-		pkt->size += sizeof(u32) + sizeof(struct hfi_idr_period);
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_CONFIG_VPE_OPERATIONS:
@@ -1133,25 +1320,67 @@
 	case HAL_PARAM_VENC_INTRA_REFRESH:
 	{
 		struct hfi_intra_refresh *hfi;
+		struct hal_intra_refresh *prop =
+			(struct hal_intra_refresh *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH;
 		hfi = (struct hfi_intra_refresh *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_intra_refresh *) pdata,
-				sizeof(struct hfi_intra_refresh));
+		switch (prop->mode) {
+		case HAL_INTRA_REFRESH_NONE:
+			hfi->mode = HFI_INTRA_REFRESH_NONE;
+			break;
+		case HAL_INTRA_REFRESH_ADAPTIVE:
+			hfi->mode = HFI_INTRA_REFRESH_ADAPTIVE;
+			break;
+		case HAL_INTRA_REFRESH_CYCLIC:
+			hfi->mode = HFI_INTRA_REFRESH_CYCLIC;
+			break;
+		case HAL_INTRA_REFRESH_CYCLIC_ADAPTIVE:
+			hfi->mode = HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE;
+			break;
+		case HAL_INTRA_REFRESH_RANDOM:
+			hfi->mode = HFI_INTRA_REFRESH_RANDOM;
+			break;
+		default:
+			HAL_MSG_ERROR("Invalid intra refresh setting: 0x%x",
+				prop->mode);
+			break;
+		}
+		hfi->air_mbs = prop->air_mbs;
+		hfi->air_ref = prop->air_ref;
+		hfi->cir_mbs = prop->cir_mbs;
 		pkt->size += sizeof(u32) + sizeof(struct hfi_intra_refresh);
 		break;
 	}
 	case HAL_PARAM_VENC_MULTI_SLICE_CONTROL:
 	{
 		struct hfi_multi_slice_control *hfi;
+		struct hal_multi_slice_control *prop =
+			(struct hal_multi_slice_control *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL;
 		hfi = (struct hfi_multi_slice_control *)
-				&pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_multi_slice_control *) pdata,
-				sizeof(struct hfi_multi_slice_control));
+			&pkt->rg_property_data[1];
+		switch (prop->multi_slice) {
+		case HAL_MULTI_SLICE_OFF:
+			hfi->multi_slice = HFI_MULTI_SLICE_OFF;
+			break;
+		case HAL_MULTI_SLICE_GOB:
+			hfi->multi_slice = HFI_MULTI_SLICE_GOB;
+			break;
+		case HAL_MULTI_SLICE_BY_MB_COUNT:
+			hfi->multi_slice = HFI_MULTI_SLICE_BY_MB_COUNT;
+			break;
+		case HAL_MULTI_SLICE_BY_BYTE_COUNT:
+			hfi->multi_slice = HFI_MULTI_SLICE_BY_BYTE_COUNT;
+			break;
+		default:
+			HAL_MSG_ERROR("Invalid slice settings: 0x%x",
+				prop->multi_slice);
+			break;
+		}
 		pkt->size += sizeof(u32) + sizeof(struct
-						hfi_multi_slice_control);
+					hfi_multi_slice_control);
 		break;
 	}
 	case HAL_CONFIG_VPE_DEINTERLACE:
@@ -1161,8 +1390,8 @@
 		struct hfi_debug_config *hfi;
 		pkt->rg_property_data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG;
 		hfi = (struct hfi_debug_config *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_debug_config *) pdata,
-				sizeof(struct hfi_debug_config));
+		hfi->debug_config = ((struct hal_debug_config *)
+					pdata)->debug_config;
 		pkt->size = sizeof(struct hfi_cmd_sys_set_property_packet) +
 			sizeof(struct hfi_debug_config);
 		break;
@@ -1353,7 +1582,7 @@
 	new_session->device = dev;
 	list_add_tail(&new_session->list, &dev->sess_head);
 	pkt.size = sizeof(struct hfi_cmd_sys_session_init_packet);
-	pkt.packet = HFI_CMD_SYS_SESSION_INIT;
+	pkt.packet_type = HFI_CMD_SYS_SESSION_INIT;
 	pkt.session_id = (u32) new_session;
 	pkt.session_domain = session_type;
 	pkt.session_codec = codec_type;
@@ -1363,7 +1592,7 @@
 }
 
 static int vidc_hal_send_session_cmd(void *session_id,
-	 enum HFI_COMMAND pkt_type)
+	 int pkt_type)
 {
 	struct vidc_hal_session_cmd_pkt pkt;
 	int rc = 0;
@@ -1400,6 +1629,7 @@
 int vidc_hal_session_set_buffers(void *sess,
 	struct vidc_buffer_addr_info *buffer_info)
 {
+	u32 buffer;
 	struct hfi_cmd_session_set_buffers_packet *pkt;
 	u8 packet[VIDC_IFACEQ_VAR_LARGE_PKT_SIZE];
 	int rc = 0;
@@ -1430,7 +1660,7 @@
 	if ((buffer_info->buffer_type == HAL_BUFFER_OUTPUT) ||
 		(buffer_info->buffer_type == HAL_BUFFER_OUTPUT2)) {
 		struct hfi_buffer_info *buff;
-		pkt->extradata_size = buffer_info->extradata_size;
+		pkt->extra_data_size = buffer_info->extradata_size;
 		pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) -
 			sizeof(u32) + ((buffer_info->num_buffers) *
 			sizeof(struct hfi_buffer_info));
@@ -1438,25 +1668,23 @@
 		for (i = 0; i < pkt->num_buffers; i++) {
 			buff->buffer_addr =
 				buffer_info->align_device_addr;
-			buff->extradata_addr =
+			buff->extra_data_addr =
 				buffer_info->extradata_addr;
 		}
 	} else {
-		pkt->extradata_size = 0;
+		pkt->extra_data_size = 0;
 		pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) +
 			((buffer_info->num_buffers - 1) * sizeof(u32));
 		for (i = 0; i < pkt->num_buffers; i++)
 			pkt->rg_buffer_info[i] =
 			buffer_info->align_device_addr;
 	}
-
-	if (buffer_info->buffer_type == HAL_BUFFER_INTERNAL_SCRATCH)
-		pkt->buffer_type = HFI_BUFFER_INTERNAL_SCRATCH;
-	else if (buffer_info->buffer_type == HAL_BUFFER_INTERNAL_PERSIST)
-		pkt->buffer_type = HFI_BUFFER_INTERNAL_PERSIST;
+	buffer = get_hfi_buffer(buffer_info->buffer_type);
+	if (buffer)
+		pkt->buffer_type = buffer;
 	else
-		pkt->buffer_type = (enum HFI_BUFFER) buffer_info->buffer_type;
-
+		return -EINVAL;
+	HAL_MSG_INFO("set buffers: 0x%x", buffer_info->buffer_type);
 	if (vidc_hal_iface_cmdq_write(session->device, pkt))
 		rc = -ENOTEMPTY;
 	return rc;
@@ -1465,6 +1693,7 @@
 int vidc_hal_session_release_buffers(void *sess,
 	struct vidc_buffer_addr_info *buffer_info)
 {
+	u32 buffer;
 	struct hfi_cmd_session_release_buffer_packet *pkt;
 	u8 packet[VIDC_IFACEQ_VAR_LARGE_PKT_SIZE];
 	int rc = 0;
@@ -1486,7 +1715,6 @@
 		((buffer_info->num_buffers - 1) * sizeof(u32));
 	pkt->packet_type = HFI_CMD_SESSION_RELEASE_BUFFERS;
 	pkt->session_id = (u32) session;
-	pkt->buffer_type = (enum HFI_BUFFER) buffer_info->buffer_type;
 	pkt->buffer_size = buffer_info->buffer_size;
 	pkt->num_buffers = buffer_info->num_buffers;
 
@@ -1497,10 +1725,10 @@
 		for (i = 0; i < pkt->num_buffers; i++) {
 			buff->buffer_addr =
 				buffer_info->align_device_addr;
-			buff->extradata_addr =
+			buff->extra_data_addr =
 				buffer_info->extradata_addr;
 		}
-		pkt->extradata_size = buffer_info->extradata_size;
+		pkt->extra_data_size = buffer_info->extradata_size;
 		pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) -
 			sizeof(u32) + ((buffer_info->num_buffers) *
 			sizeof(struct hfi_buffer_info));
@@ -1508,11 +1736,16 @@
 		for (i = 0; i < pkt->num_buffers; i++)
 			pkt->rg_buffer_info[i] =
 			buffer_info->align_device_addr;
-		pkt->extradata_size = 0;
+		pkt->extra_data_size = 0;
 		pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) +
 			((buffer_info->num_buffers - 1) * sizeof(u32));
 	}
-
+	buffer = get_hfi_buffer(buffer_info->buffer_type);
+	if (buffer)
+		pkt->buffer_type = buffer;
+	else
+		return -EINVAL;
+	HAL_MSG_INFO("Release buffers: 0x%x", buffer_info->buffer_type);
 	if (vidc_hal_iface_cmdq_write(session->device, pkt))
 		rc = -ENOTEMPTY;
 	return rc;
@@ -1572,8 +1805,8 @@
 			struct hfi_cmd_session_empty_buffer_compressed_packet);
 		pkt.packet_type = HFI_CMD_SESSION_EMPTY_BUFFER;
 		pkt.session_id = (u32) session;
-		pkt.timestamp_hi = (int) (((u64)input_frame->timestamp) >> 32);
-		pkt.timestamp_lo = (int) input_frame->timestamp;
+		pkt.time_stamp_hi = (int) (((u64)input_frame->timestamp) >> 32);
+		pkt.time_stamp_lo = (int) input_frame->timestamp;
 		pkt.flags = input_frame->flags;
 		pkt.mark_target = input_frame->mark_target;
 		pkt.mark_data = input_frame->mark_data;
@@ -1590,11 +1823,11 @@
 			pkt;
 		pkt.size = sizeof(struct
 		hfi_cmd_session_empty_buffer_uncompressed_plane0_packet);
-		pkt.packet = HFI_CMD_SESSION_EMPTY_BUFFER;
+		pkt.packet_type = HFI_CMD_SESSION_EMPTY_BUFFER;
 		pkt.session_id = (u32) session;
 		pkt.view_id = 0;
-		pkt.timestamp_hi = (u32) (((u64)input_frame->timestamp) >> 32);
-		pkt.timestamp_lo = (u32) input_frame->timestamp;
+		pkt.time_stamp_hi = (u32) (((u64)input_frame->timestamp) >> 32);
+		pkt.time_stamp_lo = (u32) input_frame->timestamp;
 		pkt.flags = input_frame->flags;
 		pkt.mark_target = input_frame->mark_target;
 		pkt.mark_data = input_frame->mark_data;
@@ -1734,8 +1967,23 @@
 	pkt.size = sizeof(struct hfi_cmd_session_flush_packet);
 	pkt.packet_type = HFI_CMD_SESSION_FLUSH;
 	pkt.session_id = (u32) session;
-	pkt.flush_type = flush_mode;
-
+	switch (flush_mode) {
+	case HAL_FLUSH_INPUT:
+		pkt.flush_type = HFI_FLUSH_INPUT;
+		break;
+	case HAL_FLUSH_OUTPUT:
+		pkt.flush_type = HFI_FLUSH_OUTPUT;
+		break;
+	case HAL_FLUSH_OUTPUT2:
+		pkt.flush_type = HFI_FLUSH_OUTPUT2;
+		break;
+	case HAL_FLUSH_ALL:
+		pkt.flush_type = HFI_FLUSH_ALL;
+		break;
+	default:
+		HAL_MSG_ERROR("Invalid flush mode: 0x%x\n", flush_mode);
+		break;
+	}
 	if (vidc_hal_iface_cmdq_write(session->device, &pkt))
 		rc = -ENOTEMPTY;
 	return rc;
diff --git a/drivers/media/video/msm_vidc/vidc_hal.h b/drivers/media/video/msm_vidc/vidc_hal.h
index 15441f4..a36d7f3 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.h
+++ b/drivers/media/video/msm_vidc/vidc_hal.h
@@ -11,27 +11,28 @@
  *
  */
 
-#ifndef __VIDC_HAL_H__
-#define __VIDC_HAL_H__
+#ifndef __H_VIDC_HAL_H__
+#define __H_VIDC_HAL_H__
 
 #include <linux/spinlock.h>
 #include <linux/mutex.h>
 #include "vidc_hal_api.h"
 #include "msm_smem.h"
+#include "vidc_hal_helper.h"
 
 #ifdef HAL_MSG_LOG
-#define HAL_MSG_LOW(x...) pr_debug(KERN_INFO x)
-#define HAL_MSG_MEDIUM(x...) pr_debug(KERN_INFO x)
-#define HAL_MSG_HIGH(x...) pr_debug(KERN_INFO x)
+#define HAL_MSG_LOW(x...)		pr_info(KERN_INFO x)
+#define HAL_MSG_MEDIUM(x...)	pr_info(KERN_INFO x)
+#define HAL_MSG_HIGH(x...)		pr_info(KERN_INFO x)
 #else
 #define HAL_MSG_LOW(x...)
 #define HAL_MSG_MEDIUM(x...)
 #define HAL_MSG_HIGH(x...)
 #endif
 
-#define HAL_MSG_ERROR(x...) pr_err(KERN_INFO x)
-#define HAL_MSG_FATAL(x...) pr_err(KERN_INFO x)
-#define HAL_MSG_INFO(x...) pr_info(KERN_INFO x)
+#define HAL_MSG_ERROR(x...)		pr_err(KERN_INFO x)
+#define HAL_MSG_FATAL(x...)		pr_err(KERN_INFO x)
+#define HAL_MSG_INFO(x...)		pr_info(KERN_INFO x)
 
 #define HFI_MASK_QHDR_TX_TYPE			0xFF000000
 #define HFI_MASK_QHDR_RX_TYPE			0x00FF0000
@@ -87,7 +88,7 @@
 #define VIDC_IFACEQ_TABLE_SIZE (sizeof(struct hfi_queue_table_header) \
 	+ sizeof(struct hfi_queue_header) * VIDC_IFACEQ_NUMQ)
 
-#define VIDC_IFACEQ_QUEUE_SIZE		(VIDC_IFACEQ_MAX_PKT_SIZE *  \
+#define VIDC_IFACEQ_QUEUE_SIZE	(VIDC_IFACEQ_MAX_PKT_SIZE *  \
 	VIDC_IFACEQ_MAX_BUF_COUNT * VIDC_IFACE_MAX_PARALLEL_CLNTS)
 
 #define VIDC_IFACEQ_GET_QHDR_START_ADDR(ptr, i)     \
@@ -107,409 +108,162 @@
 	VIDC_HWREG_HVI_SOFTINTEN =  0xA,
 };
 
-enum HFI_EVENT {
-	HFI_EVENT_SYS_ERROR,
-	HFI_EVENT_SESSION_ERROR,
-	HFI_EVENT_SESSION_SEQUENCE_CHANGED,
-	HFI_EVENT_SESSION_PROPERTY_CHANGED,
-	HFI_UNUSED_EVENT = 0x10000000,
-};
+#define HFI_EVENT_SESSION_SEQUENCE_CHANGED (HFI_OX_BASE + 0x3)
+#define HFI_EVENT_SESSION_PROPERTY_CHANGED (HFI_OX_BASE + 0x4)
 
-enum HFI_EVENT_DATA_SEQUENCE_CHANGED {
-	HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUFFER_RESOURCES,
-	HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUFFER_RESOURCES,
-	HFI_UNUSED_SEQCHG = 0x10000000,
-};
+#define HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUFFER_RESOURCES	\
+	(HFI_OX_BASE + 0x1)
+#define HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUFFER_RESOURCES	\
+	(HFI_OX_BASE + 0x2)
 
-#define HFI_BUFFERFLAG_EOS              0x00000001
-#define HFI_BUFFERFLAG_STARTTIME        0x00000002
-#define HFI_BUFFERFLAG_DECODEONLY       0x00000004
-#define HFI_BUFFERFLAG_DATACORRUPT      0x00000008
-#define HFI_BUFFERFLAG_ENDOFFRAME       0x00000010
-#define HFI_BUFFERFLAG_SYNCFRAME        0x00000020
-#define HFI_BUFFERFLAG_EXTRADATA        0x00000040
-#define HFI_BUFFERFLAG_CODECCONFIG      0x00000080
-#define HFI_BUFFERFLAG_TIMESTAMPINVALID 0x00000100
-#define HFI_BUFFERFLAG_READONLY         0x00000200
-#define HFI_BUFFERFLAG_ENDOFSUBFRAME    0x00000400
+#define HFI_BUFFERFLAG_EOS				0x00000001
+#define HFI_BUFFERFLAG_STARTTIME		0x00000002
+#define HFI_BUFFERFLAG_DECODEONLY		0x00000004
+#define HFI_BUFFERFLAG_DATACORRUPT		0x00000008
+#define HFI_BUFFERFLAG_ENDOFFRAME		0x00000010
+#define HFI_BUFFERFLAG_SYNCFRAME		0x00000020
+#define HFI_BUFFERFLAG_EXTRADATA		0x00000040
+#define HFI_BUFFERFLAG_CODECCONFIG		0x00000080
+#define HFI_BUFFERFLAG_TIMESTAMPINVALID	0x00000100
+#define HFI_BUFFERFLAG_READONLY			0x00000200
+#define HFI_BUFFERFLAG_ENDOFSUBFRAME	0x00000400
 
-enum HFI_ERROR {
-	HFI_ERR_NONE                              = 0,
-	HFI_ERR_SYS_UNKNOWN                       = 0x80000001,
-	HFI_ERR_SYS_FATAL                         = 0x80000002,
-	HFI_ERR_SYS_INVALID_PARAMETER             = 0x80000003,
-	HFI_ERR_SYS_VERSION_MISMATCH              = 0x80000004,
-	HFI_ERR_SYS_INSUFFICIENT_RESOURCES        = 0x80000005,
-	HFI_ERR_SYS_MAX_SESSIONS_REACHED          = 0x80000006,
-	HFI_ERR_SYS_UNSUPPORTED_CODEC             = 0x80000007,
-	HFI_ERR_SYS_SESSION_IN_USE                = 0x80000008,
-	HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE       = 0x80000009,
-	HFI_ERR_SYS_UNSUPPORTED_DOMAIN            = 0x8000000A,
-	HFI_ERR_SESSION_START_UNUSED              = 0x80001000,
-	HFI_ERR_SESSION_UNKNOWN                   = 0x80001001,
-	HFI_ERR_SESSION_FATAL                     = 0x80001002,
-	HFI_ERR_SESSION_INVALID_PARAMETER         = 0x80001003,
-	HFI_ERR_SESSION_BAD_POINTER               = 0x80001004,
-	HFI_ERR_SESSION_INVALID_SESSION_ID        = 0x80001005,
-	HFI_ERR_SESSION_INVALID_STREAM_ID         = 0x80001006,
-	HFI_ERR_SESSION_INCORRECT_STATE_OPERATION = 0x80001007,
-	HFI_ERR_SESSION_UNSUPPORTED_PROPERTY      = 0x80001008,
-	HFI_ERR_SESSION_UNSUPPORTED_SETTING       = 0x80001009,
-	HFI_ERR_SESSION_INSUFFICIENT_RESOURCES    = 0x8000100A,
-	HFI_ERR_SESSION_STREAM_CORRUPT            = 0x8000100B,
-	HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED    =  0x8000100C,
-	HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED          =  0x8000100D,
-	HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING =  0x8000100E,
-	HFI_ERR_SESSION_SAME_STATE_OPERATION		= 0x8000100F,
-	HFI_UNUSED_ERR = 0x10000000,
-};
+#define HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING	\
+	(HFI_OX_BASE + 0x1001)
+#define HFI_ERR_SESSION_SAME_STATE_OPERATION		\
+	(HFI_OX_BASE + 0x1002)
+#define HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED		\
+	(HFI_OX_BASE + 0x1003)
 
-enum HFI_DOMAIN {
-	HFI_VIDEO_DOMAIN_VPE,
-	HFI_VIDEO_DOMAIN_ENCODER,
-	HFI_VIDEO_DOMAIN_DECODER,
-	HFI_UNUSED_DOMAIN = 0x10000000,
-};
+#define HFI_BUFFER_INTERNAL_SCRATCH (HFI_OX_BASE + 0x1)
+#define HFI_BUFFER_EXTRADATA_INPUT (HFI_OX_BASE + 0x2)
+#define HFI_BUFFER_EXTRADATA_OUTPUT (HFI_OX_BASE + 0x3)
+#define HFI_BUFFER_EXTRADATA_OUTPUT2 (HFI_OX_BASE + 0x4)
 
-enum HFI_VIDEO_CODEC {
-	HFI_VIDEO_CODEC_UNKNOWN  = 0x00000000,
-	HFI_VIDEO_CODEC_H264     = 0x00000002,
-	HFI_VIDEO_CODEC_H263     = 0x00000004,
-	HFI_VIDEO_CODEC_MPEG1    = 0x00000008,
-	HFI_VIDEO_CODEC_MPEG2    = 0x00000010,
-	HFI_VIDEO_CODEC_MPEG4    = 0x00000020,
-	HFI_VIDEO_CODEC_DIVX_311 = 0x00000040,
-	HFI_VIDEO_CODEC_DIVX     = 0x00000080,
-	HFI_VIDEO_CODEC_VC1      = 0x00000100,
-	HFI_VIDEO_CODEC_SPARK    = 0x00000200,
-	HFI_VIDEO_CODEC_VP6      = 0x00000400,
-	HFI_VIDEO_CODEC_VP7		 = 0x00000800,
-	HFI_VIDEO_CODEC_VP8		 = 0x00001000,
-	HFI_UNUSED_CODEC		 = 0x10000000,
-};
+#define HFI_BUFFER_MODE_STATIC (HFI_OX_BASE + 0x1)
+#define HFI_BUFFER_MODE_RING (HFI_OX_BASE + 0x2)
 
-enum HFI_H263_PROFILE {
-	HFI_H263_PROFILE_BASELINE           = 0x00000001,
-	HFI_H263_PROFILE_H320CODING         = 0x00000002,
-	HFI_H263_PROFILE_BACKWARDCOMPATIBLE = 0x00000004,
-	HFI_H263_PROFILE_ISWV2              = 0x00000008,
-	HFI_H263_PROFILE_ISWV3              = 0x00000010,
-	HFI_H263_PROFILE_HIGHCOMPRESSION    = 0x00000020,
-	HFI_H263_PROFILE_INTERNET           = 0x00000040,
-	HFI_H263_PROFILE_INTERLACE          = 0x00000080,
-	HFI_H263_PROFILE_HIGHLATENCY        = 0x00000100,
-	HFI_UNUSED_H263_PROFILE = 0x10000000,
-};
+#define HFI_FLUSH_INPUT (HFI_OX_BASE + 0x1)
+#define HFI_FLUSH_OUTPUT (HFI_OX_BASE + 0x2)
+#define HFI_FLUSH_OUTPUT2 (HFI_OX_BASE + 0x3)
+#define HFI_FLUSH_ALL (HFI_OX_BASE + 0x4)
 
-enum HFI_H263_LEVEL {
-	HFI_H263_LEVEL_10 = 0x00000001,
-	HFI_H263_LEVEL_20 = 0x00000002,
-	HFI_H263_LEVEL_30 = 0x00000004,
-	HFI_H263_LEVEL_40 = 0x00000008,
-	HFI_H263_LEVEL_45 = 0x00000010,
-	HFI_H263_LEVEL_50 = 0x00000020,
-	HFI_H263_LEVEL_60 = 0x00000040,
-	HFI_H263_LEVEL_70 = 0x00000080,
-	HFI_UNUSED_H263_LEVEL = 0x10000000,
-};
+#define HFI_EXTRADATA_NONE					0x00000000
+#define HFI_EXTRADATA_MB_QUANTIZATION		0x00000001
+#define HFI_EXTRADATA_INTERLACE_VIDEO		0x00000002
+#define HFI_EXTRADATA_VC1_FRAMEDISP			0x00000003
+#define HFI_EXTRADATA_VC1_SEQDISP			0x00000004
+#define HFI_EXTRADATA_TIMESTAMP				0x00000005
+#define HFI_EXTRADATA_S3D_FRAME_PACKING		0x00000006
+#define HFI_EXTRADATA_MULTISLICE_INFO		0x7F100000
+#define HFI_EXTRADATA_NUM_CONCEALED_MB		0x7F100001
+#define HFI_EXTRADATA_INDEX					0x7F100002
+#define HFI_EXTRADATA_METADATA_FILLER		0x7FE00002
 
-enum HFI_MPEG2_PROFILE {
-	HFI_MPEG2_PROFILE_SIMPLE  = 0x00000001,
-	HFI_MPEG2_PROFILE_MAIN    = 0x00000002,
-	HFI_MPEG2_PROFILE_422     = 0x00000004,
-	HFI_MPEG2_PROFILE_SNR     = 0x00000008,
-	HFI_MPEG2_PROFILE_SPATIAL = 0x00000010,
-	HFI_MPEG2_PROFILE_HIGH    = 0x00000020,
-	HFI_UNUSED_MPEG2_PROFILE = 0x10000000,
-};
-
-enum HFI_MPEG2_LEVEL {
-	HFI_MPEG2_LEVEL_LL  = 0x00000001,
-	HFI_MPEG2_LEVEL_ML  = 0x00000002,
-	HFI_MPEG2_LEVEL_H14 = 0x00000004,
-	HFI_MPEG2_LEVEL_HL  = 0x00000008,
-	HFI_UNUSED_MEPG2_LEVEL = 0x10000000,
-};
-
-enum HFI_MPEG4_PROFILE {
-	HFI_MPEG4_PROFILE_SIMPLE           = 0x00000001,
-	HFI_MPEG4_PROFILE_SIMPLESCALABLE   = 0x00000002,
-	HFI_MPEG4_PROFILE_CORE             = 0x00000004,
-	HFI_MPEG4_PROFILE_MAIN             = 0x00000008,
-	HFI_MPEG4_PROFILE_NBIT             = 0x00000010,
-	HFI_MPEG4_PROFILE_SCALABLETEXTURE  = 0x00000020,
-	HFI_MPEG4_PROFILE_SIMPLEFACE       = 0x00000040,
-	HFI_MPEG4_PROFILE_SIMPLEFBA        = 0x00000080,
-	HFI_MPEG4_PROFILE_BASICANIMATED    = 0x00000100,
-	HFI_MPEG4_PROFILE_HYBRID           = 0x00000200,
-	HFI_MPEG4_PROFILE_ADVANCEDREALTIME = 0x00000400,
-	HFI_MPEG4_PROFILE_CORESCALABLE     = 0x00000800,
-	HFI_MPEG4_PROFILE_ADVANCEDCODING   = 0x00001000,
-	HFI_MPEG4_PROFILE_ADVANCEDCORE     = 0x00002000,
-	HFI_MPEG4_PROFILE_ADVANCEDSCALABLE = 0x00004000,
-	HFI_MPEG4_PROFILE_ADVANCEDSIMPLE   = 0x00008000,
-	HFI_UNUSED_MPEG4_PROFILE = 0x10000000,
-};
-
-enum HFI_MPEG4_LEVEL {
-	HFI_MPEG4_LEVEL_0  = 0x00000001,
-	HFI_MPEG4_LEVEL_0b = 0x00000002,
-	HFI_MPEG4_LEVEL_1  = 0x00000004,
-	HFI_MPEG4_LEVEL_2  = 0x00000008,
-	HFI_MPEG4_LEVEL_3  = 0x00000010,
-	HFI_MPEG4_LEVEL_4  = 0x00000020,
-	HFI_MPEG4_LEVEL_4a = 0x00000040,
-	HFI_MPEG4_LEVEL_5  = 0x00000080,
-	HFI_MPEG4_LEVEL_VENDOR_START_UNUSED = 0x7F000000,
-	HFI_MPEG4_LEVEL_6  = 0x7F000001,
-	HFI_MPEG4_LEVEL_7  = 0x7F000002,
-	HFI_MPEG4_LEVEL_8  = 0x7F000003,
-	HFI_MPEG4_LEVEL_9  = 0x7F000004,
-	HFI_MPEG4_LEVEL_3b = 0x7F000005,
-	HFI_UNUSED_MPEG4_LEVEL = 0x10000000,
-};
-
-enum HFI_H264_PROFILE {
-	HFI_H264_PROFILE_BASELINE = 0x00000001,
-	HFI_H264_PROFILE_MAIN     = 0x00000002,
-	HFI_H264_PROFILE_EXTENDED = 0x00000004,
-	HFI_H264_PROFILE_HIGH     = 0x00000008,
-	HFI_H264_PROFILE_HIGH10   = 0x00000010,
-	HFI_H264_PROFILE_HIGH422  = 0x00000020,
-	HFI_H264_PROFILE_HIGH444  = 0x00000040,
-	HFI_H264_PROFILE_STEREO_HIGH = 0x00000080,
-	HFI_H264_PROFILE_MV_HIGH  = 0x00000100,
-	HFI_UNUSED_H264_PROFILE   = 0x10000000,
-};
-
-enum HFI_H264_LEVEL {
-	HFI_H264_LEVEL_1  = 0x00000001,
-	HFI_H264_LEVEL_1b = 0x00000002,
-	HFI_H264_LEVEL_11 = 0x00000004,
-	HFI_H264_LEVEL_12 = 0x00000008,
-	HFI_H264_LEVEL_13 = 0x00000010,
-	HFI_H264_LEVEL_2  = 0x00000020,
-	HFI_H264_LEVEL_21 = 0x00000040,
-	HFI_H264_LEVEL_22 = 0x00000080,
-	HFI_H264_LEVEL_3  = 0x00000100,
-	HFI_H264_LEVEL_31 = 0x00000200,
-	HFI_H264_LEVEL_32 = 0x00000400,
-	HFI_H264_LEVEL_4  = 0x00000800,
-	HFI_H264_LEVEL_41 = 0x00001000,
-	HFI_H264_LEVEL_42 = 0x00002000,
-	HFI_H264_LEVEL_5  = 0x00004000,
-	HFI_H264_LEVEL_51 = 0x00008000,
-	HFI_UNUSED_H264_LEVEL = 0x10000000,
-};
-
-enum HFI_VPX_PROFILE {
-	HFI_VPX_PROFILE_SIMPLE    = 0x00000001,
-	HFI_VPX_PROFILE_ADVANCED  = 0x00000002,
-	HFI_VPX_PROFILE_VERSION_0 = 0x00000004,
-	HFI_VPX_PROFILE_VERSION_1 = 0x00000008,
-	HFI_VPX_PROFILE_VERSION_2 = 0x00000010,
-	HFI_VPX_PROFILE_VERSION_3 = 0x00000020,
-	HFI_VPX_PROFILE_UNUSED = 0x10000000,
-};
-
-enum HFI_VC1_PROFILE {
-	HFI_VC1_PROFILE_SIMPLE   = 0x00000001,
-	HFI_VC1_PROFILE_MAIN     = 0x00000002,
-	HFI_VC1_PROFILE_ADVANCED = 0x00000004,
-	HFI_UNUSED_VC1_PROFILE = 0x10000000,
-};
-
-enum HFI_VC1_LEVEL {
-	HFI_VC1_LEVEL_LOW    = 0x00000001,
-	HFI_VC1_LEVEL_MEDIUM = 0x00000002,
-	HFI_VC1_LEVEL_HIGH   = 0x00000004,
-	HFI_VC1_LEVEL_0      = 0x00000008,
-	HFI_VC1_LEVEL_1      = 0x00000010,
-	HFI_VC1_LEVEL_2      = 0x00000020,
-	HFI_VC1_LEVEL_3      = 0x00000040,
-	HFI_VC1_LEVEL_4      = 0x00000080,
-	HFI_UNUSED_VC1_LEVEL = 0x10000000,
-};
-
-enum HFI_DIVX_FORMAT {
-	HFI_DIVX_FORMAT_4,
-	HFI_DIVX_FORMAT_5,
-	HFI_DIVX_FORMAT_6,
-	HFI_UNUSED_DIVX_FORMAT = 0x10000000,
-};
-
-enum HFI_DIVX_PROFILE {
-	HFI_DIVX_PROFILE_QMOBILE  = 0x00000001,
-	HFI_DIVX_PROFILE_MOBILE   = 0x00000002,
-	HFI_DIVX_PROFILE_MT       = 0x00000004,
-	HFI_DIVX_PROFILE_HT       = 0x00000008,
-	HFI_DIVX_PROFILE_HD       = 0x00000010,
-	HFI_UNUSED_DIVX_PROFILE = 0x10000000,
-};
-
-enum HFI_BUFFER {
-	HFI_BUFFER_INPUT,
-	HFI_BUFFER_OUTPUT,
-	HFI_BUFFER_OUTPUT2,
-	HFI_BUFFER_EXTRADATA_INPUT,
-	HFI_BUFFER_EXTRADATA_OUTPUT,
-	HFI_BUFFER_EXTRADATA_OUTPUT2,
-	HFI_BUFFER_INTERNAL_SCRATCH = 0x7F000001,
-	HFI_BUFFER_INTERNAL_PERSIST = 0x7F000002,
-	HFI_UNUSED_BUFFER = 0x10000000,
-};
-
-enum HFI_BUFFER_MODE {
-	HFI_BUFFER_MODE_STATIC,
-	HFI_BUFFER_MODE_RING,
-	HFI_UNUSED_BUFFER_MODE = 0x10000000,
-};
-
-enum HFI_FLUSH {
-	HFI_FLUSH_INPUT,
-	HFI_FLUSH_OUTPUT,
-	HFI_FLUSH_OUTPUT2,
-	HFI_FLUSH_ALL,
-	HFI_UNUSED_FLUSH = 0x10000000,
-};
-
-enum HFI_EXTRADATA {
-	HFI_EXTRADATA_NONE                 = 0x00000000,
-	HFI_EXTRADATA_MB_QUANTIZATION      = 0x00000001,
-	HFI_EXTRADATA_INTERLACE_VIDEO      = 0x00000002,
-	HFI_EXTRADATA_VC1_FRAMEDISP        = 0x00000003,
-	HFI_EXTRADATA_VC1_SEQDISP          = 0x00000004,
-	HFI_EXTRADATA_TIMESTAMP            = 0x00000005,
-	HFI_EXTRADATA_MULTISLICE_INFO      = 0x7F100000,
-	HFI_EXTRADATA_NUM_CONCEALED_MB     = 0x7F100001,
-	HFI_EXTRADATA_INDEX                = 0x7F100002,
-	HFI_EXTRADATA_METADATA_FILLER      = 0x7FE00002,
-	HFI_UNUSED_EXTRADATA = 0x10000000,
-};
-
-enum HFI_EXTRADATA_INDEX_TYPE {
-	HFI_INDEX_EXTRADATA_INPUT_CROP    = 0x0700000E,
-	HFI_INDEX_EXTRADATA_DIGITAL_ZOOM  = 0x07000010,
-	HFI_INDEX_EXTRADATA_ASPECT_RATIO  = 0x7F100003,
-};
+#define HFI_INDEX_EXTRADATA_INPUT_CROP		0x0700000E
+#define HFI_INDEX_EXTRADATA_DIGITAL_ZOOM	0x07000010
+#define HFI_INDEX_EXTRADATA_ASPECT_RATIO	0x7F100003
 
 struct hfi_extradata_header {
 	u32 size;
 	u32 version;
-	u32 port_tndex;
-	enum HFI_EXTRADATA type;
+	u32 port_index;
+	u32 type;
 	u32 data_size;
 	u8 rg_data[1];
 };
 
-enum HFI_INTERLACE_FORMAT {
-	HFI_INTERLACE_FRAME_PROGRESSIVE                 = 0x01,
-	HFI_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST    = 0x02,
-	HFI_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST = 0x04,
-	HFI_INTERLACE_FRAME_TOPFIELDFIRST               = 0x08,
-	HFI_INTERLACE_FRAME_BOTTOMFIELDFIRST            = 0x10,
-	HFI_UNUSED_INTERLACE = 0x10000000,
-};
+#define HFI_INTERLACE_FRAME_PROGRESSIVE					0x01
+#define HFI_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST	0x02
+#define HFI_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST	0x04
+#define HFI_INTERLACE_FRAME_TOPFIELDFIRST				0x08
+#define HFI_INTERLACE_FRAME_BOTTOMFIELDFIRST			0x10
 
-enum HFI_PROPERTY {
-	HFI_PROPERTY_SYS_UNUSED = 0x08000000,
-	HFI_PROPERTY_SYS_IDLE_INDICATOR,
-	HFI_PROPERTY_SYS_DEBUG_CONFIG,
-	HFI_PROPERTY_SYS_RESOURCE_OCMEM_REQUIREMENT_INFO,
-	HFI_PROPERTY_PARAM_UNUSED = 0x04000000,
-	HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL,
-	HFI_PROPERTY_PARAM_FRAME_SIZE,
-	HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT,
-	HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED,
-	HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO,
-	HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
-	HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED,
-	HFI_PROPERTY_PARAM_CHROMA_SITE,
-	HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG,
-	HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT,
-	HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED,
-	HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED,
-	HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED,
-	HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT,
-	HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT,
-	HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED,
-	HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE,
-	HFI_PROPERTY_PARAM_CODEC_SUPPORTED,
-	HFI_PROPERTY_PARAM_DIVX_FORMAT,
+#define HFI_PROPERTY_SYS_OX_START			\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x0000)
+#define HFI_PROPERTY_SYS_IDLE_INDICATOR		\
+	(HFI_PROPERTY_SYS_OX_START + 0x001)
 
-	HFI_PROPERTY_CONFIG_UNUSED = 0x02000000,
-	HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS,
-	HFI_PROPERTY_CONFIG_REALTIME,
-	HFI_PROPERTY_CONFIG_PRIORITY,
-	HFI_PROPERTY_CONFIG_BATCH_INFO,
-	HFI_PROPERTY_CONFIG_FRAME_RATE,
+#define HFI_PROPERTY_PARAM_OX_START				\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x1000)
+#define HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL			\
+	(HFI_PROPERTY_PARAM_OX_START + 0x001)
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO	\
+	(HFI_PROPERTY_PARAM_OX_START + 0x002)
+#define HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED	\
+	(HFI_PROPERTY_PARAM_OX_START + 0x003)
+#define HFI_PROPERTY_PARAM_CHROMA_SITE					\
+(HFI_PROPERTY_PARAM_OX_START + 0x004)
+#define HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG		\
+	(HFI_PROPERTY_PARAM_OX_START + 0x005)
+#define HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE		\
+	(HFI_PROPERTY_PARAM_OX_START + 0x006)
+#define HFI_PROPERTY_PARAM_DIVX_FORMAT					\
+	(HFI_PROPERTY_PARAM_OX_START + 0x007)
 
-	HFI_PROPERTY_PARAM_VDEC_UNUSED = 0x01000000,
-	HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER,
-	HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT,
-	HFI_PROPERTY_PARAM_VDEC_MULTI_VIEW_SELECT,
-	HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE,
-	HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM,
-	HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER,
-	HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION,
-	HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB,
-	HFI_PROPERTY_PARAM_VDEC_H264_ENTROPY_SWITCHING,
-	HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO,
+#define HFI_PROPERTY_CONFIG_OX_START					\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x02000)
+#define HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS			\
+	(HFI_PROPERTY_CONFIG_OX_START + 0x001)
+#define HFI_PROPERTY_CONFIG_REALTIME					\
+	(HFI_PROPERTY_CONFIG_OX_START + 0x002)
+#define HFI_PROPERTY_CONFIG_PRIORITY					\
+	(HFI_PROPERTY_CONFIG_OX_START + 0x003)
+#define HFI_PROPERTY_CONFIG_BATCH_INFO					\
+	(HFI_PROPERTY_CONFIG_OX_START + 0x004)
 
-	HFI_PROPERTY_CONFIG_VDEC_UNUSED = 0x00800000,
-	HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER,
-	HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING,
-	HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP,
+#define HFI_PROPERTY_PARAM_VDEC_OX_START				\
+	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x3000)
+#define HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER	\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001)
+#define HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x002)
+#define HFI_PROPERTY_PARAM_VDEC_MULTI_VIEW_SELECT		\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x003)
+#define HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE		\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x004)
+#define HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER			\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x005)
+#define HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION			\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x006)
+#define HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB		\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x007)
+#define HFI_PROPERTY_PARAM_VDEC_H264_ENTROPY_SWITCHING	\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x008)
+#define HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x009)
 
-	HFI_PROPERTY_PARAM_VENC_UNUSED = 0x00400000,
-	HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE,
-	HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL,
-	HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL,
-	HFI_PROPERTY_PARAM_VENC_RATE_CONTROL,
-	HFI_PROPERTY_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF,
-	HFI_PROPERTY_PARAM_VENC_SESSION_QP,
-	HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION,
-	HFI_PROPERTY_PARAM_VENC_MPEG4_DATA_PARTITIONING,
-	HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION,
-	HFI_PROPERTY_PARAM_VENC_MPEG4_SHORT_HEADER,
-	HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION,
-	HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO,
-	HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH,
-	HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL,
+#define HFI_PROPERTY_CONFIG_VDEC_OX_START				\
+	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x0000)
+#define HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER	\
+	(HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x001)
+#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING	\
+	(HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x002)
+#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP			\
+	(HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x003)
 
-	HFI_PROPERTY_CONFIG_VENC_UNUSED = 0x00200000,
-	HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE,
-	HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD,
-	HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD,
-	HFI_PROPERTY_CONFIG_VENC_REQUEST_IFRAME,
-	HFI_PROPERTY_CONFIG_VENC_TIMESTAMP_SCALE,
-	HFI_PROPERTY_PARAM_VENC_MPEG4_QPEL,
-	HFI_PROPERTY_PARAM_VENC_ADVANCED,
+#define HFI_PROPERTY_PARAM_VENC_OX_START				\
+	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x5000)
+#define HFI_PROPERTY_CONFIG_VENC_OX_START				\
+	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x6000)
 
-	HFI_PROPERTY_PARAM_VPE_UNUSED = 0x00100000,
-
-	HFI_PROPERTY_CONFIG_VPE_UNUSED = 0x00080000,
-	HFI_PROPERTY_CONFIG_VPE_DEINTERLACE,
-	HFI_PROPERTY_CONFIG_VPE_OPERATIONS,
-	HFI_PROPERTY_UNUSED = 0x10000000,
-};
+#define HFI_PROPERTY_PARAM_VPE_OX_START					\
+	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x7000)
+#define HFI_PROPERTY_CONFIG_VPE_OX_START				\
+	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x8000)
 
 struct hfi_batch_info {
 	u32 input_batch_count;
 	u32 output_batch_count;
 };
 
-struct hfi_bitrate {
-	u32 bit_rate;
-};
-
 struct hfi_buffer_count_actual {
-	enum HFI_BUFFER buffer;
+	u32 buffer_type;
 	u32 buffer_count_actual;
 };
 
 struct hfi_buffer_requirements {
-	enum HFI_BUFFER buffer;
+	u32 buffer_type;
 	u32 buffer_size;
 	u32 buffer_region_size;
 	u32 buffer_hold_count;
@@ -519,35 +273,12 @@
 	u32 buffer_alignment;
 };
 
-enum HFI_CAPABILITY {
-	HFI_CAPABILITY_FRAME_WIDTH,
-	HFI_CAPABILITY_FRAME_HEIGHT,
-	HFI_CAPABILITY_MBS_PER_FRAME,
-	HFI_CAPABILITY_MBS_PER_SECOND,
-	HFI_CAPABILITY_FRAMERATE,
-	HFI_CAPABILITY_SCALE_X,
-	HFI_CAPABILITY_SCALE_Y,
-	HFI_CAPABILITY_BITRATE,
-	HFI_UNUSED_CAPABILITY = 0x10000000,
-};
-
-struct hfi_capability_supported {
-	enum HFI_CAPABILITY eCapabilityType;
-	u32 min;
-	u32 max;
-	u32 step_size;
-};
-
-struct hfi_capability_supported_INFO {
-	u32 num_capabilities;
-	struct hfi_capability_supported rg_data[1];
-};
-
-enum HFI_CHROMA_SITE {
-	HFI_CHROMA_SITE_0,
-	HFI_CHROMA_SITE_1,
-	HFI_UNUSED_CHROMA = 0x10000000,
-};
+#define HFI_CHROMA_SITE_0			(HFI_OX_BASE + 0x1)
+#define HFI_CHROMA_SITE_1			(HFI_OX_BASE + 0x2)
+#define HFI_CHROMA_SITE_2			(HFI_OX_BASE + 0x3)
+#define HFI_CHROMA_SITE_3			(HFI_OX_BASE + 0x4)
+#define HFI_CHROMA_SITE_4			(HFI_OX_BASE + 0x5)
+#define HFI_CHROMA_SITE_5			(HFI_OX_BASE + 0x6)
 
 struct hfi_data_payload {
 	u32 size;
@@ -567,86 +298,17 @@
 	u32 count;
 };
 
-struct hfi_enable {
-	int enable;
-};
-
-enum HFI_H264_DB_MODE {
-	HFI_H264_DB_MODE_DISABLE,
-	HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY,
-	HFI_H264_DB_MODE_ALL_BOUNDARY,
-	HFI_UNUSED_H264_DB = 0x10000000,
-};
-
-struct hfi_h264_db_control {
-	enum HFI_H264_DB_MODE mode;
-	int slice_alpha_offset;
-	int slice_beta_offset;
-};
-
-enum HFI_H264_ENTROPY {
-	HFI_H264_ENTROPY_CAVLC,
-	HFI_H264_ENTROPY_CABAC,
-	HFI_UNUSED_ENTROPY = 0x10000000,
-};
-
-enum HFI_H264_CABAC_MODEL {
-	HFI_H264_CABAC_MODEL_0,
-	HFI_H264_CABAC_MODEL_1,
-	HFI_H264_CABAC_MODEL_2,
-	HFI_UNUSED_CABAC = 0x10000000,
-};
-
-struct hfi_h264_entropy_control {
-	enum HFI_H264_ENTROPY entropy_mode;
-	enum HFI_H264_CABAC_MODEL cabac_model;
-};
-
 struct hfi_extra_data_header_config {
 	u32 type;
-	enum HFI_BUFFER buffer_type;
+	u32 buffer_type;
 	u32 version;
 	u32 port_index;
-	u32 client_extradata_id;
-};
-
-struct hfi_frame_rate {
-	enum HFI_BUFFER buffer_type;
-	u32 frame_rate;
+	u32 client_extra_data_id;
 };
 
 struct hfi_interlace_format_supported {
-	enum HFI_BUFFER buffer;
-	enum HFI_INTERLACE_FORMAT format;
-};
-
-enum hfi_intra_refresh_mode {
-	HFI_INTRA_REFRESH_NONE,
-	HFI_INTRA_REFRESH_CYCLIC,
-	HFI_INTRA_REFRESH_ADAPTIVE,
-	HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE,
-	HFI_INTRA_REFRESH_RANDOM,
-	HFI_UNUSED_INTRA = 0x10000000,
-};
-
-struct hfi_intra_refresh {
-	enum hfi_intra_refresh_mode mode;
-	u32 air_mbs;
-	u32 air_ref;
-	u32 cir_mbs;
-};
-
-struct hfi_idr_period {
-	u32 idr_period;
-};
-
-struct hfi_intra_period {
-	u32 pframes;
-	u32 bframes;
-};
-
-struct hfi_timestamp_scale {
-	u32 time_stamp_scale;
+	u32 buffer_type;
+	u32 format;
 };
 
 struct hfi_mb_error_map {
@@ -659,424 +321,110 @@
 	u32 size;
 };
 
-struct hfi_mpeg4_header_extension {
-	u32 header_extension;
-};
-
-struct hfi_mpeg4_time_resolution {
-	u32 time_increment_resolution;
-};
-
-enum HFI_MULTI_SLICE {
-	HFI_MULTI_SLICE_OFF,
-	HFI_MULTI_SLICE_BY_MB_COUNT,
-	HFI_MULTI_SLICE_BY_BYTE_COUNT,
-	HFI_MULTI_SLICE_GOB,
-	HFI_UNUSED_SLICE = 0x10000000,
-};
-
-struct hfi_multi_slice_control {
-	enum HFI_MULTI_SLICE multi_slice;
-	u32 slice_size;
-};
-
-struct hfi_multi_stream {
-	enum HFI_BUFFER buffer;
-	u32 enable;
-	u32 width;
-	u32 height;
-};
-
-struct hfi_multi_view_format {
-	u32 views;
-	u32 rg_view_order[1];
-};
-
 struct hfi_multi_view_select {
 	u32 view_index;
 };
 
-enum HFI_NAL_STREAM_FORMAT {
-	HFI_NAL_FORMAT_STARTCODES         = 0x00000001,
-	HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER = 0x00000002,
-	HFI_NAL_FORMAT_ONE_BYTE_LENGTH    = 0x00000004,
-	HFI_NAL_FORMAT_TWO_BYTE_LENGTH    = 0x00000008,
-	HFI_NAL_FORMAT_FOUR_BYTE_LENGTH   = 0x00000010,
-	HFI_UNUSED_NAL = 0x10000000,
-};
+#define HFI_PRIORITY_LOW		10
+#define HFI_PRIOIRTY_MEDIUM		20
+#define HFI_PRIORITY_HIGH		30
 
-struct hfi_nal_stream_format_supported {
-	u32 nal_stream_format_supported;
-};
+#define HFI_OUTPUT_ORDER_DISPLAY	(HFI_OX_BASE + 0x1)
+#define HFI_OUTPUT_ORDER_DECODE		(HFI_OX_BASE + 0x2)
 
-enum HFI_PICTURE {
-	HFI_PICTURE_I   = 0x01,
-	HFI_PICTURE_P   = 0x02,
-	HFI_PICTURE_B   = 0x04,
-	HFI_PICTURE_IDR = 0x7F001000,
-	HFI_UNUSED_PICT = 0x10000000,
-};
-
-enum HFI_PRIORITY {
-	HFI_PRIORITY_LOW = 10,
-	HFI_PRIOIRTY_MEDIUM = 20,
-	HFI_PRIORITY_HIGH = 30,
-	HFI_UNUSED_PRIORITY = 0x10000000,
-};
-
-struct hfi_profile_level {
-	u32 profile;
-	u32 level;
-};
-
-struct hfi_profile_level_supported {
-	u32 profile_count;
-	struct hfi_profile_level rg_profile_level[1];
-};
-
-enum HFI_ROTATE {
-	HFI_ROTATE_NONE,
-	HFI_ROTATE_90,
-	HFI_ROTATE_180,
-	HFI_ROTATE_270,
-	HFI_UNUSED_ROTATE = 0x10000000,
-};
-
-enum HFI_FLIP {
-	HFI_FLIP_NONE,
-	HFI_FLIP_HORIZONTAL,
-	HFI_FLIP_VERTICAL,
-	HFI_UNUSED_FLIP = 0x10000000,
-};
-
-struct hfi_operations {
-	enum HFI_ROTATE rotate;
-	enum HFI_FLIP flip;
-};
-
-enum HFI_OUTPUT_ORDER {
-	HFI_OUTPUT_ORDER_DISPLAY,
-	HFI_OUTPUT_ORDER_DECODE,
-	HFI_UNUSED_OUTPUT = 0x10000000,
-};
-
-struct hfi_quantization {
-	u32 qp_i;
-	u32 qp_p;
-	u32 qp_b;
-};
-
-enum HFI_RATE_CONTROL {
-	HFI_RATE_CONTROL_OFF,
-	HFI_RATE_CONTROL_VBR_VFR,
-	HFI_RATE_CONTROL_VBR_CFR,
-	HFI_RATE_CONTROL_CBR_VFR,
-	HFI_RATE_CONTROL_CBR_CFR,
-	HFI_UNUSED_RC = 0x10000000,
-};
-
-struct hfi_slice_delivery_mode {
-	int enable;
-};
-
-struct hfi_temporal_spatial_tradeoff {
-	u32 ts_factor;
-};
-
-struct hfi_frame_size {
-	enum HFI_BUFFER buffer;
-	u32 width;
-	u32 height;
-};
-
-enum HFI_UNCOMPRESSED_FORMAT {
-	HFI_COLOR_FORMAT_MONOCHROME,
-	HFI_COLOR_FORMAT_NV12,
-	HFI_COLOR_FORMAT_NV21,
-	HFI_COLOR_FORMAT_NV12_4x4TILE,
-	HFI_COLOR_FORMAT_NV21_4x4TILE,
-	HFI_COLOR_FORMAT_YUYV,
-	HFI_COLOR_FORMAT_YVYU,
-	HFI_COLOR_FORMAT_UYVY,
-	HFI_COLOR_FORMAT_VYUY,
-	HFI_COLOR_FORMAT_RGB565,
-	HFI_COLOR_FORMAT_BGR565,
-	HFI_COLOR_FORMAT_RGB888,
-	HFI_COLOR_FORMAT_BGR888,
-	HFI_UNUSED_COLOR = 0x10000000,
-};
-
-struct hfi_uncompressed_format_select {
-	enum HFI_BUFFER buffer;
-	enum HFI_UNCOMPRESSED_FORMAT format;
-};
-
-struct hfi_uncompressed_format_supported {
-	enum HFI_BUFFER buffer;
-	u32 format_entries;
-	u32 rg_format_info[1];
-};
-
-struct hfi_uncompressed_plane_actual {
-	int actual_stride;
-	u32 actual_plane_buffer_height;
-};
-
-struct hfi_uncompressed_plane_actual_info {
-	enum HFI_BUFFER buffer;
-	u32 num_planes;
-	struct hfi_uncompressed_plane_actual rg_plane_format[1];
-};
-
-struct hfi_uncompressed_plane_constraints {
-	u32 stride_multiples;
-	u32 max_stride;
-	u32 min_plane_buffer_height_multiple;
-	u32 buffer_alignment;
-};
-
-struct hfi_uncompressed_plane_info {
-	enum HFI_UNCOMPRESSED_FORMAT format;
-	u32 num_planes;
-	struct hfi_uncompressed_plane_constraints rg_plane_format[1];
-};
+#define HFI_RATE_CONTROL_OFF		(HFI_OX_BASE + 0x1)
+#define HFI_RATE_CONTROL_VBR_VFR	(HFI_OX_BASE + 0x2)
+#define HFI_RATE_CONTROL_VBR_CFR	(HFI_OX_BASE + 0x3)
+#define HFI_RATE_CONTROL_CBR_VFR	(HFI_OX_BASE + 0x4)
+#define HFI_RATE_CONTROL_CBR_CFR	(HFI_OX_BASE + 0x5)
 
 struct hfi_uncompressed_plane_actual_constraints_info {
-	enum HFI_BUFFER buffer;
+	u32 buffer_type;
 	u32 num_planes;
 	struct hfi_uncompressed_plane_constraints rg_plane_format[1];
 };
 
-struct hfi_codec_supported {
-	u32 decoder_codec_supported;
-	u32 encoder_codec_supported;
-};
+#define HFI_CMD_SYS_OX_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x0000)
+#define HFI_CMD_SYS_SESSION_ABORT	(HFI_CMD_SYS_OX_START + 0x001)
+#define HFI_CMD_SYS_PING		(HFI_CMD_SYS_OX_START + 0x002)
 
-enum HFI_DEBUG_MSG {
-	HFI_DEBUG_MSG_LOW     = 0x00000001,
-	HFI_DEBUG_MSG_MEDIUM  = 0x00000002,
-	HFI_DEBUG_MSG_HIGH    = 0x00000004,
-	HFI_DEBUG_MSG_ERROR   = 0x00000008,
-	HFI_DEBUG_MSG_FATAL   = 0x00000010,
-	HFI_UNUSED_DEBUG_MSG = 0x10000000,
-};
+#define HFI_CMD_SESSION_OX_START	\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x1000)
+#define HFI_CMD_SESSION_LOAD_RESOURCES	(HFI_CMD_SESSION_OX_START + 0x001)
+#define HFI_CMD_SESSION_START		(HFI_CMD_SESSION_OX_START + 0x002)
+#define HFI_CMD_SESSION_STOP		(HFI_CMD_SESSION_OX_START + 0x003)
+#define HFI_CMD_SESSION_EMPTY_BUFFER	(HFI_CMD_SESSION_OX_START + 0x004)
+#define HFI_CMD_SESSION_FILL_BUFFER	(HFI_CMD_SESSION_OX_START + 0x005)
+#define HFI_CMD_SESSION_SUSPEND		(HFI_CMD_SESSION_OX_START + 0x006)
+#define HFI_CMD_SESSION_RESUME		(HFI_CMD_SESSION_OX_START + 0x007)
+#define HFI_CMD_SESSION_FLUSH		(HFI_CMD_SESSION_OX_START + 0x008)
+#define HFI_CMD_SESSION_GET_PROPERTY	(HFI_CMD_SESSION_OX_START + 0x009)
+#define HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER	\
+	(HFI_CMD_SESSION_OX_START + 0x00A)
+#define HFI_CMD_SESSION_RELEASE_BUFFERS		\
+	(HFI_CMD_SESSION_OX_START + 0x00B)
+#define HFI_CMD_SESSION_RELEASE_RESOURCES	\
+	(HFI_CMD_SESSION_OX_START + 0x00C)
 
-struct hfi_debug_config {
-	u32 debug_config;
-};
+#define HFI_MSG_SYS_OX_START			\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x0000)
+#define HFI_MSG_SYS_IDLE		(HFI_MSG_SYS_OX_START + 0x1)
+#define HFI_MSG_SYS_PING_ACK	(HFI_MSG_SYS_OX_START + 0x2)
+#define HFI_MSG_SYS_PROPERTY_INFO	(HFI_MSG_SYS_OX_START + 0x3)
+#define HFI_MSG_SYS_SESSION_ABORT_DONE	(HFI_MSG_SYS_OX_START + 0x4)
 
-struct hfi_properties_supported {
-	u32 num_properties;
-	u32 rg_properties[1];
-};
-
-enum HFI_RESOURCE {
-	HFI_RESOURCE_OCMEM    = 0x00000001,
-	HFI_UNUSED_RESOURCE = 0x10000000,
-};
-
-struct hfi_resource_ocmem_type {
-	u32 size;
-	u8 *mem;
-};
-
-struct hfi_resource_ocmem_requirement {
-	enum HFI_DOMAIN session_domain;
-	u32 width;
-	u32 height;
-	u32 size;
-};
-
-struct hfi_resource_ocmem_requirement_info {
-	u32 num_entries;
-	struct hfi_resource_ocmem_requirement rg_requirements[1];
-};
-
-struct hfi_venc_config_advanced {
-	u8 pipe2d;
-	u8 hw_mode;
-	u8 low_delay_enforce;
-	int h264_constrain_intra_pred;
-	int h264_transform_8x8_flag;
-	int mpeg4_qpel_enable;
-	int multi_refP_en;
-	int qmatrix_en;
-	u8 vpp_info_packet_mode;
-	u8 ref_tile_mode;
-	u8 bitstream_flush_mode;
-	u32 ds_display_frame_width;
-	u32 ds_display_frame_height;
-	u32 perf_tune_param_ptr;
-};
-
-enum HFI_COMMAND {
-	HFI_CMD_SYS_UNUSED = 0x01000000,
-	HFI_CMD_SYS_INIT,
-	HFI_CMD_SYS_SESSION_INIT,
-	HFI_CMD_SYS_SESSION_END,
-	HFI_CMD_SYS_SESSION_ABORT,
-	HFI_CMD_SYS_SET_RESOURCE,
-	HFI_CMD_SYS_RELEASE_RESOURCE,
-	HFI_CMD_SYS_PING,
-	HFI_CMD_SYS_PC_PREP,
-	HFI_CMD_SYS_SET_PROPERTY,
-	HFI_CMD_SYS_GET_PROPERTY,
-
-	HFI_CMD_SESSION_UNUSED = 0x02000000,
-	HFI_CMD_SESSION_LOAD_RESOURCES,
-	HFI_CMD_SESSION_START,
-	HFI_CMD_SESSION_STOP,
-	HFI_CMD_SESSION_EMPTY_BUFFER,
-	HFI_CMD_SESSION_FILL_BUFFER,
-	HFI_CMD_SESSION_FLUSH,
-	HFI_CMD_SESSION_SUSPEND,
-	HFI_CMD_SESSION_RESUME,
-	HFI_CMD_SESSION_SET_PROPERTY,
-	HFI_CMD_SESSION_GET_PROPERTY,
-	HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER,
-	HFI_CMD_SESSION_GET_SEQUENCE_HEADER,
-	HFI_CMD_SESSION_SET_BUFFERS,
-	HFI_CMD_SESSION_RELEASE_BUFFERS,
-	HFI_CMD_SESSION_RELEASE_RESOURCES,
-
-	HFI_CMD_UNUSED = 0x10000000,
-};
-
-enum HFI_MESSAGE {
-	HFI_MSG_SYS_UNUSED = 0x01000000,
-	HFI_MSG_SYS_IDLE,
-	HFI_MSG_SYS_PC_PREP_DONE,
-	HFI_MSG_SYS_RELEASE_RESOURCE,
-	HFI_MSG_SYS_PING_ACK,
-	HFI_MSG_SYS_DEBUG,
-	HFI_MSG_SYS_INIT_DONE,
-	HFI_MSG_SYS_PROPERTY_INFO,
-	HFI_MSG_SESSION_UNUSED = 0x02000000,
-	HFI_MSG_EVENT_NOTIFY,
-	HFI_MSG_SYS_SESSION_INIT_DONE,
-	HFI_MSG_SYS_SESSION_END_DONE,
-	HFI_MSG_SYS_SESSION_ABORT_DONE,
-	HFI_MSG_SESSION_LOAD_RESOURCES_DONE,
-	HFI_MSG_SESSION_START_DONE,
-	HFI_MSG_SESSION_STOP_DONE,
-	HFI_MSG_SESSION_SUSPEND_DONE,
-	HFI_MSG_SESSION_RESUME_DONE,
-	HFI_MSG_SESSION_EMPTY_BUFFER_DONE,
-	HFI_MSG_SESSION_FILL_BUFFER_DONE,
-	HFI_MSG_SESSION_FLUSH_DONE,
-	HFI_MSG_SESSION_PROPERTY_INFO,
-	HFI_MSG_SESSION_RELEASE_RESOURCES_DONE,
-	HFI_MSG_SESSION_PARSE_SEQUENCE_HEADER_DONE,
-	HFI_MSG_SESSION_GET_SEQUENCE_HEADER_DONE,
-	HFI_MSG_UNUSED = 0x10000000,
-};
-
-struct vidc_hal_msg_pkt_hdr {
-	u32 size;
-	enum HFI_MESSAGE packet;
-};
-
-struct vidc_hal_session_cmd_pkt {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-	u32 session_id;
-};
-
-enum HFI_STATUS {
-	HFI_FAIL = 0,
-	HFI_SUCCESS,
-	HFI_UNUSED_STATUS = 0x10000000,
-};
-
-struct hfi_cmd_sys_init_packet {
-	u32 size;
-	enum HFI_COMMAND packet;
-};
-
-struct hfi_cmd_sys_session_init_packet {
-	u32 size;
-	enum HFI_COMMAND packet;
-	u32 session_id;
-	enum HFI_DOMAIN session_domain;
-	enum HFI_VIDEO_CODEC session_codec;
-};
-
-struct hfi_cmd_sys_session_end_packet {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-	u32 session_id;
-};
+#define HFI_MSG_SESSION_OX_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x1000)
+#define HFI_MSG_SESSION_LOAD_RESOURCES_DONE	(HFI_MSG_SESSION_OX_START + 0x1)
+#define HFI_MSG_SESSION_START_DONE		(HFI_MSG_SESSION_OX_START + 0x2)
+#define HFI_MSG_SESSION_STOP_DONE		(HFI_MSG_SESSION_OX_START + 0x3)
+#define HFI_MSG_SESSION_SUSPEND_DONE	(HFI_MSG_SESSION_OX_START + 0x4)
+#define HFI_MSG_SESSION_RESUME_DONE		(HFI_MSG_SESSION_OX_START + 0x5)
+#define HFI_MSG_SESSION_FLUSH_DONE		(HFI_MSG_SESSION_OX_START + 0x6)
+#define HFI_MSG_SESSION_EMPTY_BUFFER_DONE	(HFI_MSG_SESSION_OX_START + 0x7)
+#define HFI_MSG_SESSION_FILL_BUFFER_DONE	(HFI_MSG_SESSION_OX_START + 0x8)
+#define HFI_MSG_SESSION_PROPERTY_INFO		(HFI_MSG_SESSION_OX_START + 0x9)
+#define HFI_MSG_SESSION_RELEASE_RESOURCES_DONE	(HFI_MSG_SESSION_OX_START + 0xA)
+#define HFI_MSG_SESSION_PARSE_SEQUENCE_HEADER_DONE		\
+	(HFI_MSG_SESSION_OX_START + 0xB)
 
 struct hfi_cmd_sys_session_abort_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
 };
 
-struct hfi_cmd_sys_pc_prep_packet {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-};
-
-struct hfi_cmd_sys_set_resource_packet {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-	u32 resource_handle;
-	enum HFI_RESOURCE resource_type;
-	u32 rg_resource_data[1];
-};
-
-struct hfi_cmd_sys_release_resource_packet {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-	enum HFI_RESOURCE resource_type;
-	u32 resource_handle;
-};
-
 struct hfi_cmd_sys_ping_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 client_data;
 };
 
-struct hfi_cmd_sys_set_property_packet {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-	u32 num_properties;
-	u32 rg_property_data[1];
-};
-
-struct hfi_cmd_sys_get_property_packet {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-	u32 num_properties;
-	enum HFI_PROPERTY rg_property_data[1];
-};
-
 struct hfi_cmd_session_load_resources_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
 };
 
 struct hfi_cmd_session_start_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
 };
 
 struct hfi_cmd_session_stop_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
 };
 
 struct hfi_cmd_session_empty_buffer_compressed_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
-	u32 timestamp_hi;
-	u32 timestamp_lo;
+	u32 time_stamp_hi;
+	u32 time_stamp_lo;
 	u32 flags;
 	u32 mark_target;
 	u32 mark_data;
@@ -1085,15 +433,16 @@
 	u32 filled_len;
 	u32 input_tag;
 	u8 *packet_buffer;
+	u8 *extra_data_buffer;
 };
 
 struct hfi_cmd_session_empty_buffer_uncompressed_plane0_packet {
 	u32 size;
-	enum HFI_COMMAND packet;
+	u32 packet_type;
 	u32 session_id;
 	u32 view_id;
-	u32 timestamp_hi;
-	u32 timestamp_lo;
+	u32 time_stamp_hi;
+	u32 time_stamp_lo;
 	u32 flags;
 	u32 mark_target;
 	u32 mark_data;
@@ -1102,6 +451,7 @@
 	u32 offset;
 	u32 input_tag;
 	u8 *packet_buffer;
+	u8 *extra_data_buffer;
 };
 
 struct hfi_cmd_session_empty_buffer_uncompressed_plane1_packet {
@@ -1122,234 +472,153 @@
 
 struct hfi_cmd_session_fill_buffer_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
 	u32 stream_id;
+	u32 output_tag;
 	u8 *packet_buffer;
 	u8 *extra_data_buffer;
 };
 
 struct hfi_cmd_session_flush_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_FLUSH flush_type;
+	u32 flush_type;
 };
 
 struct hfi_cmd_session_suspend_packet {
 	u32 size;
-	enum HFI_COMMAND packet;
+	u32 packet_type;
 	u32 session_id;
 };
 
 struct hfi_cmd_session_resume_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
 };
 
-struct hfi_cmd_session_set_property_packet {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-	u32 session_id;
-	u32 num_properties;
-	u32 rg_property_data[0];
-};
-
 struct hfi_cmd_session_get_property_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
 	u32 num_properties;
-	enum HFI_PROPERTY rg_property_data[1];
-};
-
-struct hfi_buffer_info {
-	u32 buffer_addr;
-	u32 extradata_addr;
-};
-
-struct hfi_cmd_session_set_buffers_packet {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-	u32 session_id;
-	enum HFI_BUFFER buffer_type;
-	enum HFI_BUFFER_MODE buffer_mode;
-	u32 buffer_size;
-	u32 extradata_size;
-	u32 min_buffer_size;
-	u32 num_buffers;
-	u32 rg_buffer_info[1];
+	u32 rg_property_data[1];
 };
 
 struct hfi_cmd_session_release_buffer_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_BUFFER buffer_type;
+	u32 buffer_type;
 	u32 buffer_size;
-	u32 extradata_size;
+	u32 extra_data_size;
 	u32 num_buffers;
 	u32 rg_buffer_info[1];
 };
 
 struct hfi_cmd_session_release_resources_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
 };
 
 struct hfi_cmd_session_parse_sequence_header_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
 	u32 header_len;
 	u8 *packet_buffer;
 };
 
-struct hfi_cmd_session_get_sequence_header_packet {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-	u32 session_id;
-	u32 buffer_len;
-	u8 *packet_buffer;
-};
-
-struct hfi_msg_event_notify_packet {
-	u32 size;
-	enum HFI_MESSAGE packet_type;
-	u32 session_id;
-	enum HFI_EVENT event_id;
-	u32 event_data1;
-	u32 event_data2;
-	u32 rg_ext_event_data[1];
-};
-
-struct hfi_msg_sys_init_done_packet {
-	u32 size;
-	enum HFI_MESSAGE packet_type;
-	enum HFI_ERROR error_type;
-	u32 num_properties;
-	u32 rg_property_data[1];
-};
-
-struct hfi_msg_sys_session_init_done_packet {
-	u32 size;
-	enum HFI_MESSAGE packet_type;
-	u32 session_id;
-	enum HFI_ERROR error_type;
-	u32 num_properties;
-	u32 rg_property_data[1];
-};
-
-struct hfi_msg_sys_session_end_done_packet {
-	u32 size;
-	enum HFI_MESSAGE packet_type;
-	u32 session_id;
-	enum HFI_ERROR error_type;
-};
-
 struct hfi_msg_sys_session_abort_done_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_ERROR error_type;
+	u32 error_type;
 };
 
 struct hfi_msg_sys_idle_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
-};
-
-struct hfi_msg_sys_pc_prep_done_packet {
-	u32 size;
-	enum HFI_MESSAGE packet_type;
-	enum HFI_ERROR error_type;
-};
-
-struct hfi_msg_sys_release_resource_done_packet {
-	u32 size;
-	enum HFI_MESSAGE packet_type;
-	u32 resource_handle;
-	enum HFI_ERROR error_type;
+	u32 packet_type;
 };
 
 struct hfi_msg_sys_ping_ack_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 client_data;
 };
 
-struct hfi_msg_sys_debug_packet {
-	u32 size;
-	enum HFI_MESSAGE packet_type;
-	enum HFI_DEBUG_MSG msg_type;
-	u32 msg_size;
-	u32 timestamp_hi;
-	u32 timestamp_lo;
-	u8 rg_msg_data[1];
-};
-
 struct hfi_msg_sys_property_info_packet {
-	u32 nsize;
-	enum HFI_MESSAGE packet_type;
+	u32 size;
+	u32 packet_type;
 	u32 num_properties;
 	u32 rg_property_data[1];
 };
 
 struct hfi_msg_session_load_resources_done_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_ERROR error_type;
+	u32 error_type;
 };
 
 struct hfi_msg_session_start_done_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_ERROR error_type;
+	u32 error_type;
 };
 
 struct hfi_msg_session_stop_done_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_ERROR error_type;
+	u32 error_type;
 };
 
 struct hfi_msg_session_suspend_done_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_ERROR error_type;
+	u32 error_type;
 };
 
 struct hfi_msg_session_resume_done_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_ERROR error_type;
+	u32 error_type;
+};
+
+struct hfi_msg_session_flush_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	u32 flush_type;
 };
 
 struct hfi_msg_session_empty_buffer_done_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_ERROR error_type;
+	u32 error_type;
 	u32 offset;
 	u32 filled_len;
 	u32 input_tag;
 	u8 *packet_buffer;
+	u8 *extra_data_buffer;
 };
 
 struct hfi_msg_session_fill_buffer_done_compressed_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	u32 timestamp_hi;
-	u32 timestamp_lo;
-	enum HFI_ERROR error_type;
+	u32 time_stamp_hi;
+	u32 time_stamp_lo;
+	u32 error_type;
 	u32 flags;
 	u32 mark_target;
 	u32 mark_data;
@@ -1358,34 +627,36 @@
 	u32 alloc_len;
 	u32 filled_len;
 	u32 input_tag;
-	enum HFI_PICTURE picture_type;
+	u32 output_tag;
+	u32 picture_type;
 	u8 *packet_buffer;
 	u8 *extra_data_buffer;
 };
 
 struct hfi_msg_session_fbd_uncompressed_plane0_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
 	u32 stream_id;
 	u32 view_id;
-	enum HFI_ERROR error_type;
-	u32 timestamp_hi;
-	u32 timestamp_lo;
+	u32 error_type;
+	u32 time_stamp_hi;
+	u32 time_stamp_lo;
 	u32 flags;
 	u32 mark_target;
 	u32 mark_data;
 	u32 stats;
 	u32 alloc_len;
 	u32 filled_len;
-	u32 oofset;
+	u32 offset;
 	u32 frame_width;
 	u32 frame_height;
-	u32 start_xCoord;
-	u32 start_yCoord;
+	u32 start_x_coord;
+	u32 start_y_coord;
 	u32 input_tag;
-	u32 input_tag1;
-	enum HFI_PICTURE picture_type;
+	u32 input_tag2;
+	u32 output_tag;
+	u32 picture_type;
 	u8 *packet_buffer;
 	u8 *extra_data_buffer;
 };
@@ -1395,7 +666,7 @@
 	u32 alloc_len;
 	u32 filled_len;
 	u32 offset;
-	u8 *packet_buffer;
+	u8 *packet_buffer2;
 };
 
 struct hfi_msg_session_fill_buffer_done_uncompressed_plane2_packet {
@@ -1403,38 +674,21 @@
 	u32 alloc_len;
 	u32 filled_len;
 	u32 offset;
-	u8 *packet_buffer;
-};
-
-struct hfi_msg_session_flush_done_packet {
-	u32 size;
-	enum HFI_MESSAGE packet_type;
-	u32 session_id;
-	enum HFI_ERROR error_type;
-	enum HFI_FLUSH flush_type;
+	u8 *packet_buffer3;
 };
 
 struct hfi_msg_session_parse_sequence_header_done_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_ERROR error_type;
+	u32 error_type;
 	u32 num_properties;
 	u32 rg_property_data[1];
 };
 
-struct hfi_msg_session_get_sequence_header_done_packet {
-	u32 size;
-	enum HFI_MESSAGE packet_type;
-	u32 session_id;
-	enum HFI_ERROR error_type;
-	u32 header_len;
-	u8 *sequence_header;
-};
-
 struct hfi_msg_session_property_info_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
 	u32 num_properties;
 	u32 rg_property_data[1];
@@ -1442,9 +696,9 @@
 
 struct hfi_msg_session_release_resources_done_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_ERROR error_type;
+	u32 error_type;
 };
 
 struct hfi_extradata_mb_quantization_payload {
@@ -1453,7 +707,7 @@
 
 struct hfi_extradata_vc1_pswnd {
 	u32 ps_wnd_h_offset;
-	u32 ps_wndv_offset;
+	u32 ps_wnd_v_offset;
 	u32 ps_wnd_width;
 	u32 ps_wnd_height;
 };
@@ -1481,12 +735,8 @@
 };
 
 struct hfi_extradata_timestamp_payload {
-	u32 timestamp_low;
-	u32 timestamp_high;
-};
-
-struct hfi_extradata_interlace_video_payload {
-	enum HFI_INTERLACE_FORMAT format;
+	u32 time_stamp_low;
+	u32 time_stamp_high;
 };
 
 enum HFI_S3D_FP_LAYOUT {
@@ -1496,14 +746,14 @@
 	HFI_S3D_FP_LAYOUT_INTRLV_ROW,
 	HFI_S3D_FP_LAYOUT_SIDEBYSIDE,
 	HFI_S3D_FP_LAYOUT_TOPBOTTOM,
-	HFI_S3D_FP_LAYOUT_UNUSED = 0x10000000,
+	HFI_S3D_FP_LAYOUT_UNUSED = 0x10000000
 };
 
 enum HFI_S3D_FP_VIEW_ORDER {
 	HFI_S3D_FP_LEFTVIEW_FIRST,
 	HFI_S3D_FP_RIGHTVIEW_FIRST,
 	HFI_S3D_FP_UNKNOWN,
-	HFI_S3D_FP_VIEWORDER_UNUSED = 0x10000000,
+	HFI_S3D_FP_VIEWORDER_UNUSED = 0x10000000
 };
 
 enum HFI_S3D_FP_FLIP {
@@ -1512,18 +762,22 @@
 	HFI_S3D_FP_FLIP_LEFT_VERT,
 	HFI_S3D_FP_FLIP_RIGHT_HORIZ,
 	HFI_S3D_FP_FLIP_RIGHT_VERT,
-	HFI_S3D_FP_FLIP_UNUSED = 0x10000000,
+	HFI_S3D_FP_FLIP_UNUSED = 0x10000000
 };
 
 struct hfi_extradata_s3d_frame_packing_payload {
-	enum HFI_S3D_FP_LAYOUT eLayout;
-	enum HFI_S3D_FP_VIEW_ORDER eOrder;
-	enum HFI_S3D_FP_FLIP eFlip;
-	int bQuinCunx;
-	u32 nLeftViewLumaSiteX;
-	u32 nLeftViewLumaSiteY;
-	u32 nRightViewLumaSiteX;
-	u32 nRightViewLumaSiteY;
+	enum HFI_S3D_FP_LAYOUT layout;
+	enum HFI_S3D_FP_VIEW_ORDER order;
+	enum HFI_S3D_FP_FLIP flip;
+	int quin_cunx;
+	u32 left_view_luma_site_x;
+	u32 left_view_luma_site_y;
+	u32 right_view_luma_site_x;
+	u32 right_view_luma_site_y;
+};
+
+struct hfi_extradata_interlace_video_payload {
+	u32 format;
 };
 
 struct hfi_extradata_num_concealed_mb_payload {
@@ -1615,4 +869,4 @@
 /* Interrupt Processing:*/
 void vidc_hal_response_handler(struct hal_device *device);
 
-#endif /*__VIDC_HAL_H__ */
+#endif
diff --git a/drivers/media/video/msm_vidc/vidc_hal_api.h b/drivers/media/video/msm_vidc/vidc_hal_api.h
index 036091b..332bbac 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_api.h
+++ b/drivers/media/video/msm_vidc/vidc_hal_api.h
@@ -40,6 +40,12 @@
 #define HAL_BUFFERFLAG_READONLY         0x00000200
 #define HAL_BUFFERFLAG_ENDOFSUBFRAME    0x00000400
 
+#define HAL_DEBUG_MSG_LOW				0x00000001
+#define HAL_DEBUG_MSG_MEDIUM			0x00000002
+#define HAL_DEBUG_MSG_HIGH				0x00000004
+#define HAL_DEBUG_MSG_ERROR				0x00000008
+#define HAL_DEBUG_MSG_FATAL				0x00000010
+
 enum vidc_status {
 	VIDC_ERR_NONE = 0x0,
 	VIDC_ERR_FAIL = 0x80000000,
@@ -242,11 +248,12 @@
 enum hal_h264_profile {
 	HAL_H264_PROFILE_BASELINE = 0x00000001,
 	HAL_H264_PROFILE_MAIN     = 0x00000002,
-	HAL_H264_PROFILE_EXTENDED = 0x00000004,
-	HAL_H264_PROFILE_HIGH     = 0x00000008,
+	HAL_H264_PROFILE_HIGH     = 0x00000004,
+	HAL_H264_PROFILE_EXTENDED = 0x00000008,
 	HAL_H264_PROFILE_HIGH10   = 0x00000010,
 	HAL_H264_PROFILE_HIGH422  = 0x00000020,
 	HAL_H264_PROFILE_HIGH444  = 0x00000040,
+	HAL_H264_PROFILE_CONSTRAINED_HIGH  = 0x00000080,
 	HAL_UNUSED_H264_PROFILE = 0x10000000,
 };
 
@@ -541,7 +548,7 @@
 struct hal_h264_db_control {
 	enum hal_h264_db_mode mode;
 	int slice_alpha_offset;
-	int slicebeta_offset;
+	int slice_beta_offset;
 };
 
 struct hal_temporal_spatial_tradeoff {
@@ -786,6 +793,12 @@
 	HAL_UNUSED_FLUSH = 0x10000000,
 };
 
+enum hal_event_type {
+	HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES,
+	HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES,
+	HAL_UNUSED_SEQCHG = 0x10000000,
+};
+
 /* HAL Response */
 
 enum command_response {
@@ -836,6 +849,7 @@
 	u32 status;
 	u32 height;
 	u32 width;
+	u32 hal_event_type;
 };
 
 /* Data callback structure */
diff --git a/drivers/media/video/msm_vidc/vidc_hal_helper.h b/drivers/media/video/msm_vidc/vidc_hal_helper.h
new file mode 100644
index 0000000..d4e2619
--- /dev/null
+++ b/drivers/media/video/msm_vidc/vidc_hal_helper.h
@@ -0,0 +1,832 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __H_VIDC_HAL_HELPER_H__
+#define __H_VIDC_HAL_HELPER_H__
+
+#define HFI_NV12_IL_CALC_Y_STRIDE(stride, frame_width, stride_multiple) \
+	{ stride = (frame_width + stride_multiple - 1) & \
+	(0xffffffff - (stride_multiple - 1))}
+
+#define HFI_NV12_IL_CALC_Y_BUFHEIGHT(buf_height, frame_height,\
+	min_buf_height_multiple) \
+	{ buf_height = (frame_height + min_buf_height_multiple - 1) & \
+	(0xffffffff - (min_buf_height_multiple - 1)) }
+
+#define HFI_NV12_IL_CALC_UV_STRIDE(stride, frame_width, stride_multiple) \
+	{ stride = ((((frame_width + 1) >> 1) + stride_multiple - 1) & \
+	(0xffffffff - (stride_multiple - 1))) << 1 }
+
+#define HFI_NV12_IL_CALC_UV_BUFHEIGHT(buf_height, frame_height,\
+	min_buf_height_multiple) \
+	{ buf_height = ((((frame_height + 1) >> 1) + \
+	min_buf_height_multiple - 1) & (0xffffffff - \
+	(min_buf_height_multiple - 1))) }
+
+#define HFI_NV12_IL_CALC_BUF_SIZE(buf_size, y_buf_size, y_stride, \
+	y_buf_height, uv_buf_size, uv_stride, uv_buf_height, uv_alignment) \
+	{ y_buf_size = (y_stride * y_buf_height); \
+	uv_buf_size = (uv_stride * uv_buf_height) + uv_alignment; \
+	buf_size = y_buf_size + uv_buf_size }
+
+#define HFI_YUYV_CALC_STRIDE(stride, frame_width, stride_multiple) \
+	{ stride = ((frame_width << 1) + stride_multiple - 1) & \
+	(0xffffffff - (stride_multiple - 1)) }
+
+#define HFI_YUYV_CALC_BUFHEIGHT(buf_height, frame_height,\
+	min_buf_height_multiple) \
+	{ buf_height = ((frame_height + min_buf_height_multiple - 1) & \
+	(0xffffffff - (min_buf_height_multiple - 1))) }
+
+#define HFI_YUYV_CALC_BUF_SIZE(buf_size, stride, buf_height) \
+	{ buf_size = stride * buf_height }
+
+#define HFI_RGB888_CALC_STRIDE(stride, frame_width, stride_multiple) \
+	{ stride = ((frame_width * 3) + stride_multiple - 1) & \
+	(0xffffffff - (stride_multiple - 1)) }
+
+#define HFI_RGB888_CALC_BUFHEIGHT(buf_height, frame_height,\
+	min_buf_height_multiple) \
+	{ buf_height = ((frame_height + min_buf_height_multiple - 1) & \
+	(0xffffffff - (min_buf_height_multiple - 1))) }
+
+#define HFI_RGB888_CALC_BUF_SIZE(buf_size, stride, buf_height) \
+	{ buf_size = (stride * buf_height) }
+
+#define HFI_COMMON_BASE				(0)
+#define HFI_OX_BASE					(0x01000000)
+
+#define HFI_VIDEO_DOMAIN_ENCODER	(HFI_COMMON_BASE + 0x1)
+#define HFI_VIDEO_DOMAIN_DECODER	(HFI_COMMON_BASE + 0x2)
+#define HFI_VIDEO_DOMAIN_VPE		(HFI_COMMON_BASE + 0x3)
+#define HFI_VIDEO_DOMAIN_MBI		(HFI_COMMON_BASE + 0x4)
+
+#define HFI_DOMAIN_BASE_COMMON		(HFI_COMMON_BASE + 0)
+#define HFI_DOMAIN_BASE_VDEC		(HFI_COMMON_BASE + 0x01000000)
+#define HFI_DOMAIN_BASE_VENC		(HFI_COMMON_BASE + 0x02000000)
+#define HFI_DOMAIN_BASE_VPE			(HFI_COMMON_BASE + 0x03000000)
+
+#define HFI_VIDEO_ARCH_OX			(HFI_COMMON_BASE + 0x1)
+
+#define HFI_ARCH_COMMON_OFFSET		(0)
+#define HFI_ARCH_OX_OFFSET			(0x00200000)
+
+#define HFI_ERR_NONE						HFI_COMMON_BASE
+#define HFI_ERR_SYS_FATAL				(HFI_COMMON_BASE + 0x1)
+#define HFI_ERR_SYS_INVALID_PARAMETER		(HFI_COMMON_BASE + 0x2)
+#define HFI_ERR_SYS_VERSION_MISMATCH		(HFI_COMMON_BASE + 0x3)
+#define HFI_ERR_SYS_INSUFFICIENT_RESOURCES	(HFI_COMMON_BASE + 0x4)
+#define HFI_ERR_SYS_MAX_SESSIONS_REACHED	(HFI_COMMON_BASE + 0x5)
+#define HFI_ERR_SYS_UNSUPPORTED_CODEC		(HFI_COMMON_BASE + 0x6)
+#define HFI_ERR_SYS_SESSION_IN_USE			(HFI_COMMON_BASE + 0x7)
+#define HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE	(HFI_COMMON_BASE + 0x8)
+#define HFI_ERR_SYS_UNSUPPORTED_DOMAIN		(HFI_COMMON_BASE + 0x9)
+
+#define HFI_ERR_SESSION_FATAL			(HFI_COMMON_BASE + 0x1001)
+#define HFI_ERR_SESSION_INVALID_PARAMETER	(HFI_COMMON_BASE + 0x1002)
+#define HFI_ERR_SESSION_BAD_POINTER		(HFI_COMMON_BASE + 0x1003)
+#define HFI_ERR_SESSION_INVALID_SESSION_ID	(HFI_COMMON_BASE + 0x1004)
+#define HFI_ERR_SESSION_INVALID_STREAM_ID	(HFI_COMMON_BASE + 0x1005)
+#define HFI_ERR_SESSION_INCORRECT_STATE_OPERATION		\
+	(HFI_COMMON_BASE + 0x1006)
+#define HFI_ERR_SESSION_UNSUPPORTED_PROPERTY	(HFI_COMMON_BASE + 0x1007)
+
+#define HFI_ERR_SESSION_UNSUPPORTED_SETTING	(HFI_COMMON_BASE + 0x1008)
+
+#define HFI_ERR_SESSION_INSUFFICIENT_RESOURCES	(HFI_COMMON_BASE + 0x1009)
+
+#define HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED	\
+	(HFI_COMMON_BASE + 0x100A)
+
+#define HFI_ERR_SESSION_STREAM_CORRUPT		(HFI_COMMON_BASE + 0x100B)
+#define HFI_ERR_SESSION_ENC_OVERFLOW		(HFI_COMMON_BASE + 0x100C)
+
+#define HFI_EVENT_SYS_ERROR				(HFI_COMMON_BASE + 0x1)
+#define HFI_EVENT_SESSION_ERROR			(HFI_COMMON_BASE + 0x2)
+
+#define HFI_VIDEO_CODEC_H264				0x00000002
+#define HFI_VIDEO_CODEC_H263				0x00000004
+#define HFI_VIDEO_CODEC_MPEG1				0x00000008
+#define HFI_VIDEO_CODEC_MPEG2				0x00000010
+#define HFI_VIDEO_CODEC_MPEG4				0x00000020
+#define HFI_VIDEO_CODEC_DIVX_311			0x00000040
+#define HFI_VIDEO_CODEC_DIVX				0x00000080
+#define HFI_VIDEO_CODEC_VC1					0x00000100
+#define HFI_VIDEO_CODEC_SPARK				0x00000200
+#define HFI_VIDEO_CODEC_VP8					0x00001000
+
+#define HFI_H264_PROFILE_BASELINE			0x00000001
+#define HFI_H264_PROFILE_MAIN				0x00000002
+#define HFI_H264_PROFILE_HIGH				0x00000004
+#define HFI_H264_PROFILE_STEREO_HIGH		0x00000008
+#define HFI_H264_PROFILE_MULTIVIEW_HIGH		0x00000010
+#define HFI_H264_PROFILE_CONSTRAINED_HIGH	0x00000020
+
+#define HFI_H264_LEVEL_1					0x00000001
+#define HFI_H264_LEVEL_1b					0x00000002
+#define HFI_H264_LEVEL_11					0x00000004
+#define HFI_H264_LEVEL_12					0x00000008
+#define HFI_H264_LEVEL_13					0x00000010
+#define HFI_H264_LEVEL_2					0x00000020
+#define HFI_H264_LEVEL_21					0x00000040
+#define HFI_H264_LEVEL_22					0x00000080
+#define HFI_H264_LEVEL_3					0x00000100
+#define HFI_H264_LEVEL_31					0x00000200
+#define HFI_H264_LEVEL_32					0x00000400
+#define HFI_H264_LEVEL_4					0x00000800
+#define HFI_H264_LEVEL_41					0x00001000
+#define HFI_H264_LEVEL_42					0x00002000
+#define HFI_H264_LEVEL_5					0x00004000
+#define HFI_H264_LEVEL_51					0x00008000
+
+#define HFI_H263_PROFILE_BASELINE			0x00000001
+
+#define HFI_H263_LEVEL_10					0x00000001
+#define HFI_H263_LEVEL_20					0x00000002
+#define HFI_H263_LEVEL_30					0x00000004
+#define HFI_H263_LEVEL_40					0x00000008
+#define HFI_H263_LEVEL_45					0x00000010
+#define HFI_H263_LEVEL_50					0x00000020
+#define HFI_H263_LEVEL_60					0x00000040
+#define HFI_H263_LEVEL_70					0x00000080
+
+#define HFI_MPEG2_PROFILE_SIMPLE			0x00000001
+#define HFI_MPEG2_PROFILE_MAIN				0x00000002
+#define HFI_MPEG2_PROFILE_422				0x00000004
+#define HFI_MPEG2_PROFILE_SNR				0x00000008
+#define HFI_MPEG2_PROFILE_SPATIAL			0x00000010
+#define HFI_MPEG2_PROFILE_HIGH				0x00000020
+
+#define HFI_MPEG2_LEVEL_LL					0x00000001
+#define HFI_MPEG2_LEVEL_ML					0x00000002
+#define HFI_MPEG2_LEVEL_H14					0x00000004
+#define HFI_MPEG2_LEVEL_HL					0x00000008
+
+#define HFI_MPEG4_PROFILE_SIMPLE			0x00000001
+#define HFI_MPEG4_PROFILE_ADVANCEDSIMPLE	0x00000002
+
+#define HFI_MPEG4_LEVEL_0					0x00000001
+#define HFI_MPEG4_LEVEL_0b					0x00000002
+#define HFI_MPEG4_LEVEL_1					0x00000004
+#define HFI_MPEG4_LEVEL_2					0x00000008
+#define HFI_MPEG4_LEVEL_3					0x00000010
+#define HFI_MPEG4_LEVEL_4					0x00000020
+#define HFI_MPEG4_LEVEL_4a					0x00000040
+#define HFI_MPEG4_LEVEL_5					0x00000080
+#define HFI_MPEG4_LEVEL_6					0x00000100
+#define HFI_MPEG4_LEVEL_7					0x00000200
+#define HFI_MPEG4_LEVEL_8					0x00000400
+#define HFI_MPEG4_LEVEL_9					0x00000800
+#define HFI_MPEG4_LEVEL_3b					0x00001000
+
+#define HFI_VC1_PROFILE_SIMPLE				0x00000001
+#define HFI_VC1_PROFILE_MAIN				0x00000002
+#define HFI_VC1_PROFILE_ADVANCED			0x00000004
+
+#define HFI_VC1_LEVEL_LOW					0x00000001
+#define HFI_VC1_LEVEL_MEDIUM				0x00000002
+#define HFI_VC1_LEVEL_HIGH					0x00000004
+#define HFI_VC1_LEVEL_0						0x00000008
+#define HFI_VC1_LEVEL_1						0x00000010
+#define HFI_VC1_LEVEL_2						0x00000020
+#define HFI_VC1_LEVEL_3						0x00000040
+#define HFI_VC1_LEVEL_4						0x00000080
+
+#define HFI_VPX_PROFILE_SIMPLE				0x00000001
+#define HFI_VPX_PROFILE_ADVANCED			0x00000002
+#define HFI_VPX_PROFILE_VERSION_0			0x00000004
+#define HFI_VPX_PROFILE_VERSION_1			0x00000008
+#define HFI_VPX_PROFILE_VERSION_2			0x00000010
+#define HFI_VPX_PROFILE_VERSION_3			0x00000020
+
+#define HFI_DIVX_FORMAT_4				(HFI_COMMON_BASE + 0x1)
+#define HFI_DIVX_FORMAT_5				(HFI_COMMON_BASE + 0x2)
+#define HFI_DIVX_FORMAT_6				(HFI_COMMON_BASE + 0x3)
+
+#define HFI_DIVX_PROFILE_QMOBILE		0x00000001
+#define HFI_DIVX_PROFILE_MOBILE			0x00000002
+#define HFI_DIVX_PROFILE_MT				0x00000004
+#define HFI_DIVX_PROFILE_HT				0x00000008
+#define HFI_DIVX_PROFILE_HD				0x00000010
+
+#define HFI_BUFFER_INPUT				(HFI_COMMON_BASE + 0x1)
+#define HFI_BUFFER_OUTPUT				(HFI_COMMON_BASE + 0x2)
+#define HFI_BUFFER_OUTPUT2				(HFI_COMMON_BASE + 0x3)
+#define HFI_BUFFER_INTERNAL_PERSIST		(HFI_COMMON_BASE + 0x4)
+
+struct hfi_buffer_info {
+	u32 buffer_addr;
+	u32 extra_data_addr;
+};
+
+#define HFI_PROPERTY_SYS_COMMON_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000)
+#define HFI_PROPERTY_SYS_DEBUG_CONFIG		\
+	(HFI_PROPERTY_SYS_COMMON_START + 0x001)
+#define HFI_PROPERTY_SYS_RESOURCE_OCMEM_REQUIREMENT_INFO	\
+(HFI_PROPERTY_SYS_COMMON_START + 0x002)
+#define HFI_PROPERTY_PARAM_COMMON_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000)
+#define HFI_PROPERTY_PARAM_FRAME_SIZE		\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x001)
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO	\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x002)
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT		\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x003)
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED	\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x004)
+#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT			\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x005)
+#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED			\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x006)
+#define HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED				\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x007)
+#define HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED				\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x008)
+#define HFI_PROPERTY_PARAM_CODEC_SUPPORTED			\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x009)
+#define HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED		\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x00A)
+#define HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT			\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x00B)
+#define HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT				\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x00C)
+
+#define HFI_PROPERTY_CONFIG_COMMON_START				\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x2000)
+#define HFI_PROPERTY_CONFIG_FRAME_RATE					\
+	(HFI_PROPERTY_CONFIG_COMMON_START + 0x001)
+
+#define HFI_PROPERTY_PARAM_VDEC_COMMON_START				\
+	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x3000)
+#define HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM				\
+	(HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x001)
+
+#define HFI_PROPERTY_CONFIG_VDEC_COMMON_START				\
+	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x4000)
+
+#define HFI_PROPERTY_PARAM_VENC_COMMON_START				\
+	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x5000)
+#define HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x001)
+#define HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x002)
+#define HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x003)
+#define HFI_PROPERTY_PARAM_VENC_RATE_CONTROL				\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x004)
+#define HFI_PROPERTY_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x005)
+#define HFI_PROPERTY_PARAM_VENC_QUALITY_VS_SPEED			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x010)
+#define HFI_PROPERTY_PARAM_VENC_SESSION_QP				\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x006)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x007)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_DATA_PARTITIONING		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x008)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x009)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_SHORT_HEADER			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00A)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00B)
+#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00C)
+#define HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH				\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00D)
+#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00E)
+#define HFI_PROPERTY_PARAM_VENC_VBVBUFFER_SIZE				\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00F)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_QPEL				\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x011)
+#define HFI_PROPERTY_PARAM_VENC_ADVANCED				\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x012)
+#define HFI_PROPERTY_PARAM_VENC_SYNC_FRAME_SEQUENCE_HEADER	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x013)
+#define HFI_PROPERTY_PARAM_VENC_H264_IDR_S3D_FRAME_PACKING_NAL	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x014)
+
+#define HFI_PROPERTY_CONFIG_VENC_COMMON_START				\
+	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000)
+#define HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE				\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x001)
+#define HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD				\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x002)
+#define HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD				\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x003)
+#define HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME			\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x004)
+#define HFI_PROPERTY_CONFIG_VENC_TIMESTAMP_SCALE			\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x005)
+#define HFI_PROPERTY_CONFIG_VENC_FRAME_QP				\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x006)
+#define HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE				\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x007)
+
+#define HFI_PROPERTY_PARAM_VPE_COMMON_START				\
+	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000)
+
+#define HFI_PROPERTY_CONFIG_VPE_COMMON_START				\
+	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x8000)
+#define HFI_PROPERTY_CONFIG_VPE_DEINTERLACE				\
+	(HFI_PROPERTY_CONFIG_VPE_COMMON_START + 0x001)
+#define HFI_PROPERTY_CONFIG_VPE_OPERATIONS				\
+	(HFI_PROPERTY_CONFIG_VPE_COMMON_START + 0x002)
+
+struct hfi_bitrate {
+	u32 bit_rate;
+};
+
+#define HFI_CAPABILITY_FRAME_WIDTH			(HFI_COMMON_BASE + 0x1)
+#define HFI_CAPABILITY_FRAME_HEIGHT			(HFI_COMMON_BASE + 0x2)
+#define HFI_CAPABILITY_MBS_PER_FRAME		(HFI_COMMON_BASE + 0x3)
+#define HFI_CAPABILITY_MBS_PER_SECOND		(HFI_COMMON_BASE + 0x4)
+#define HFI_CAPABILITY_FRAMERATE			(HFI_COMMON_BASE + 0x5)
+#define HFI_CAPABILITY_SCALE_X				(HFI_COMMON_BASE + 0x6)
+#define HFI_CAPABILITY_SCALE_Y				(HFI_COMMON_BASE + 0x7)
+#define HFI_CAPABILITY_BITRATE				(HFI_COMMON_BASE + 0x8)
+
+struct hfi_capability_supported {
+	u32 capability_type;
+	u32 min;
+	u32 max;
+	u32 step_size;
+};
+
+struct hfi_capability_supported_info {
+	u32 num_capabilities;
+	struct hfi_capability_supported rg_data[1];
+};
+
+#define HFI_DEBUG_MSG_LOW					0x00000001
+#define HFI_DEBUG_MSG_MEDIUM				0x00000002
+#define HFI_DEBUG_MSG_HIGH					0x00000004
+#define HFI_DEBUG_MSG_ERROR					0x00000008
+#define HFI_DEBUG_MSG_FATAL					0x00000010
+
+struct hfi_debug_config {
+	u32 debug_config;
+};
+
+struct hfi_enable {
+	int enable;
+};
+
+#define HFI_H264_DB_MODE_DISABLE			(HFI_COMMON_BASE + 0x1)
+#define HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY	(HFI_COMMON_BASE + 0x2)
+#define HFI_H264_DB_MODE_ALL_BOUNDARY			(HFI_COMMON_BASE + 0x3)
+
+struct hfi_h264_db_control {
+	u32 mode;
+	int slice_alpha_offset;
+	int slice_beta_offset;
+};
+
+#define HFI_H264_ENTROPY_CAVLC				(HFI_COMMON_BASE + 0x1)
+#define HFI_H264_ENTROPY_CABAC				(HFI_COMMON_BASE + 0x2)
+
+#define HFI_H264_CABAC_MODEL_0				(HFI_COMMON_BASE + 0x1)
+#define HFI_H264_CABAC_MODEL_1				(HFI_COMMON_BASE + 0x2)
+#define HFI_H264_CABAC_MODEL_2				(HFI_COMMON_BASE + 0x3)
+
+struct hfi_h264_entropy_control {
+	u32 entropy_mode;
+	u32 cabac_model;
+};
+
+struct hfi_frame_rate {
+	u32 buffer_type;
+	u32 frame_rate;
+};
+
+#define HFI_INTRA_REFRESH_NONE				(HFI_COMMON_BASE + 0x1)
+#define HFI_INTRA_REFRESH_CYCLIC			(HFI_COMMON_BASE + 0x2)
+#define HFI_INTRA_REFRESH_ADAPTIVE			(HFI_COMMON_BASE + 0x3)
+#define HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE	(HFI_COMMON_BASE + 0x4)
+#define HFI_INTRA_REFRESH_RANDOM			(HFI_COMMON_BASE + 0x5)
+
+struct hfi_intra_refresh {
+	u32 mode;
+	u32 air_mbs;
+	u32 air_ref;
+	u32 cir_mbs;
+};
+
+struct hfi_idr_period {
+	u32 idr_period;
+};
+
+struct hfi_intra_period {
+	u32 pframes;
+	u32 bframes;
+};
+
+struct hfi_timestamp_scale {
+	u32 time_stamp_scale;
+};
+
+struct hfi_mpeg4_header_extension {
+	u32 header_extension;
+};
+
+struct hfi_mpeg4_time_resolution {
+	u32 time_increment_resolution;
+};
+
+struct hfi_multi_stream {
+	u32 buffer_type;
+	u32 enable;
+	u32 width;
+	u32 height;
+};
+
+struct hfi_multi_view_format {
+	u32 views;
+	u32 rg_view_order[1];
+};
+
+#define HFI_MULTI_SLICE_OFF				(HFI_COMMON_BASE + 0x1)
+#define HFI_MULTI_SLICE_BY_MB_COUNT			(HFI_COMMON_BASE + 0x2)
+#define HFI_MULTI_SLICE_BY_BYTE_COUNT		(HFI_COMMON_BASE + 0x3)
+#define HFI_MULTI_SLICE_GOB				(HFI_COMMON_BASE + 0x4)
+
+struct hfi_multi_slice_control {
+	u32 multi_slice;
+	u32 slice_size;
+};
+
+#define HFI_NAL_FORMAT_STARTCODES			0x00000001
+#define HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER	0x00000002
+#define HFI_NAL_FORMAT_ONE_BYTE_LENGTH		0x00000004
+#define HFI_NAL_FORMAT_TWO_BYTE_LENGTH		0x00000008
+#define HFI_NAL_FORMAT_FOUR_BYTE_LENGTH		0x00000010
+
+struct hfi_nal_stream_format_supported {
+	u32 nal_stream_format_supported;
+};
+
+#define HFI_PICTURE_TYPE_I					0x01
+#define HFI_PICTURE_TYPE_P					0x02
+#define HFI_PICTURE_TYPE_B					0x04
+#define HFI_PICTURE_TYPE_IDR				0x08
+
+struct hfi_profile_level {
+	u32 profile;
+	u32 level;
+};
+
+struct hfi_profile_level_supported {
+	u32 profile_count;
+	struct hfi_profile_level rg_profile_level[1];
+};
+
+struct hfi_quantization {
+	u32 qp_i;
+	u32 qp_p;
+	u32 qp_b;
+	u32 layer_id;
+};
+
+struct hfi_temporal_spatial_tradeoff {
+	u32 ts_factor;
+};
+
+struct hfi_frame_size {
+	u32 buffer_type;
+	u32 width;
+	u32 height;
+};
+
+#define HFI_COLOR_FORMAT_MONOCHROME			(HFI_COMMON_BASE + 0x1)
+#define HFI_COLOR_FORMAT_NV12				(HFI_COMMON_BASE + 0x2)
+#define HFI_COLOR_FORMAT_NV21				(HFI_COMMON_BASE + 0x3)
+#define HFI_COLOR_FORMAT_NV12_4x4TILE		(HFI_COMMON_BASE + 0x4)
+#define HFI_COLOR_FORMAT_NV21_4x4TILE		(HFI_COMMON_BASE + 0x5)
+#define HFI_COLOR_FORMAT_YUYV				(HFI_COMMON_BASE + 0x6)
+#define HFI_COLOR_FORMAT_YVYU				(HFI_COMMON_BASE + 0x7)
+#define HFI_COLOR_FORMAT_UYVY				(HFI_COMMON_BASE + 0x8)
+#define HFI_COLOR_FORMAT_VYUY				(HFI_COMMON_BASE + 0x9)
+#define HFI_COLOR_FORMAT_RGB565				(HFI_COMMON_BASE + 0xA)
+#define HFI_COLOR_FORMAT_BGR565				(HFI_COMMON_BASE + 0xB)
+#define HFI_COLOR_FORMAT_RGB888				(HFI_COMMON_BASE + 0xC)
+#define HFI_COLOR_FORMAT_BGR888				(HFI_COMMON_BASE + 0xD)
+
+struct hfi_uncompressed_format_select {
+	u32 buffer_type;
+	u32 format;
+};
+
+struct hfi_uncompressed_format_supported {
+	u32 buffer_type;
+	u32 format_entries;
+	u32 rg_format_info[1];
+};
+
+struct hfi_uncompressed_plane_actual {
+	int actual_stride;
+	u32 actual_plane_buffer_height;
+};
+
+struct hfi_uncompressed_plane_actual_info {
+	u32 buffer_type;
+	u32 num_planes;
+	struct hfi_uncompressed_plane_actual rg_plane_format[1];
+};
+
+struct hfi_uncompressed_plane_constraints {
+	u32 stride_multiples;
+	u32 max_stride;
+	u32 min_plane_buffer_height_multiple;
+	u32 buffer_alignment;
+};
+
+struct hfi_uncompressed_plane_info {
+	u32 format;
+	u32 num_planes;
+	struct hfi_uncompressed_plane_constraints rg_plane_format[1];
+};
+
+struct hfi_codec_supported {
+	u32 decoder_codec_supported;
+	u32 encoder_codec_supported;
+};
+
+struct hfi_properties_supported {
+	u32 num_properties;
+	u32 rg_properties[1];
+};
+
+#define HFI_ROTATE_NONE					(HFI_COMMON_BASE + 0x1)
+#define HFI_ROTATE_90					(HFI_COMMON_BASE + 0x2)
+#define HFI_ROTATE_180					(HFI_COMMON_BASE + 0x3)
+#define HFI_ROTATE_270					(HFI_COMMON_BASE + 0x4)
+
+#define HFI_FLIP_NONE					(HFI_COMMON_BASE + 0x1)
+#define HFI_FLIP_HORIZONTAL				(HFI_COMMON_BASE + 0x2)
+#define HFI_FLIP_VERTICAL				(HFI_COMMON_BASE + 0x3)
+
+struct hfi_operations {
+	u32 rotate;
+	u32 flip;
+};
+
+#define HFI_RESOURCE_OCMEM 0x00000001
+
+struct hfi_resource_ocmem {
+	u32 size;
+	u8 *mem;
+};
+
+struct hfi_resource_ocmem_requirement {
+	u32 session_domain;
+	u32 width;
+	u32 height;
+	u32 size;
+};
+
+struct hfi_resource_ocmem_requirement_info {
+	u32 num_entries;
+	struct hfi_resource_ocmem_requirement rg_requirements[1];
+};
+
+struct hfi_venc_config_advanced {
+	u8 pipe2d;
+	u8 hw_mode;
+	u8 low_delay_enforce;
+	int h264_constrain_intra_pred;
+	int h264_transform_8x8_flag;
+	int mpeg4_qpel_enable;
+	int multi_refp_en;
+	int qmatrix_en;
+	u8 vpp_info_packet_mode;
+	u8 ref_tile_mode;
+	u8 bitstream_flush_mode;
+	u32 ds_display_frame_width;
+	u32 ds_display_frame_height;
+	u32 perf_tune_param_ptr;
+	u32 input_x_offset;
+	u32 input_y_offset;
+	u32 input_roi_width;
+	u32 input_roi_height;
+	u32 vsp_fifo_dma_sel;
+	u32 h264_num_ref_frames;
+};
+
+#define HFI_CMD_SYS_COMMON_START			\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000)
+#define HFI_CMD_SYS_INIT		(HFI_CMD_SYS_COMMON_START + 0x001)
+#define HFI_CMD_SYS_PC_PREP		(HFI_CMD_SYS_COMMON_START + 0x002)
+#define HFI_CMD_SYS_SET_RESOURCE	(HFI_CMD_SYS_COMMON_START + 0x003)
+#define HFI_CMD_SYS_RELEASE_RESOURCE (HFI_CMD_SYS_COMMON_START + 0x004)
+#define HFI_CMD_SYS_SET_PROPERTY	(HFI_CMD_SYS_COMMON_START + 0x005)
+#define HFI_CMD_SYS_GET_PROPERTY	(HFI_CMD_SYS_COMMON_START + 0x006)
+#define HFI_CMD_SYS_SESSION_INIT	(HFI_CMD_SYS_COMMON_START + 0x007)
+#define HFI_CMD_SYS_SESSION_END		(HFI_CMD_SYS_COMMON_START + 0x008)
+#define HFI_CMD_SYS_SET_BUFFERS		(HFI_CMD_SYS_COMMON_START + 0x009)
+
+#define HFI_CMD_SESSION_COMMON_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000)
+#define HFI_CMD_SESSION_SET_PROPERTY		\
+	(HFI_CMD_SESSION_COMMON_START + 0x001)
+#define HFI_CMD_SESSION_SET_BUFFERS			\
+	(HFI_CMD_SESSION_COMMON_START + 0x002)
+#define HFI_CMD_SESSION_GET_SEQUENCE_HEADER	\
+	(HFI_CMD_SESSION_COMMON_START + 0x003)
+
+#define HFI_MSG_SYS_COMMON_START			\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000)
+#define HFI_MSG_SYS_INIT_DONE			(HFI_MSG_SYS_COMMON_START + 0x1)
+#define HFI_MSG_SYS_PC_PREP_DONE		(HFI_MSG_SYS_COMMON_START + 0x2)
+#define HFI_MSG_SYS_RELEASE_RESOURCE	(HFI_MSG_SYS_COMMON_START + 0x3)
+#define HFI_MSG_SYS_DEBUG			(HFI_MSG_SYS_COMMON_START + 0x4)
+#define HFI_MSG_SYS_SESSION_INIT_DONE	(HFI_MSG_SYS_COMMON_START + 0x6)
+#define HFI_MSG_SYS_SESSION_END_DONE	(HFI_MSG_SYS_COMMON_START + 0x7)
+
+#define HFI_MSG_SESSION_COMMON_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000)
+#define HFI_MSG_EVENT_NOTIFY	(HFI_MSG_SESSION_COMMON_START + 0x1)
+#define HFI_MSG_SESSION_GET_SEQUENCE_HEADER_DONE	\
+	(HFI_MSG_SESSION_COMMON_START + 0x2)
+
+struct vidc_hal_msg_pkt_hdr {
+	u32 size;
+	u32 packet;
+};
+
+struct vidc_hal_session_cmd_pkt {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+};
+
+struct hfi_cmd_sys_init_packet {
+	u32 size;
+	u32 packet_type;
+	u32 arch_type;
+};
+
+struct hfi_cmd_sys_pc_prep_packet {
+	u32 size;
+	u32 packet_type;
+};
+
+struct hfi_cmd_sys_set_resource_packet {
+	u32 size;
+	u32 packet_type;
+	u32 resource_handle;
+	u32 resource_type;
+	u32 rg_resource_data[1];
+};
+
+struct hfi_cmd_sys_release_resource_packet {
+	u32 size;
+	u32 packet_type;
+	u32 resource_type;
+	u32 resource_handle;
+};
+
+struct hfi_cmd_sys_set_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct hfi_cmd_sys_get_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct hfi_cmd_sys_session_init_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 session_domain;
+	u32 session_codec;
+};
+
+struct hfi_cmd_sys_session_end_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+};
+
+struct hfi_cmd_sys_set_buffers_packet {
+	u32 size;
+	u32 packet_type;
+	u32 buffer_type;
+	u32 buffer_size;
+	u32 num_buffers;
+	u32 rg_buffer_addr[1];
+};
+
+struct hfi_cmd_session_set_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 num_properties;
+	u32 rg_property_data[0];
+};
+
+struct hfi_cmd_session_set_buffers_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 buffer_type;
+	u32 buffer_mode;
+	u32 buffer_size;
+	u32 extra_data_size;
+	u32 min_buffer_size;
+	u32 num_buffers;
+	u32 rg_buffer_info[1];
+};
+
+struct hfi_cmd_session_get_sequence_header_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 buffer_len;
+	u8 *packet_buffer;
+};
+
+struct hfi_msg_event_notify_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 event_id;
+	u32 event_data1;
+	u32 event_data2;
+	u32 rg_ext_event_data[1];
+};
+
+struct hfi_msg_sys_init_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 error_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct hfi_msg_sys_pc_prep_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 error_type;
+};
+
+struct hfi_msg_sys_release_resource_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 resource_handle;
+	u32 error_type;
+};
+
+struct hfi_msg_sys_session_init_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct hfi_msg_sys_session_end_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+};
+
+struct hfi_msg_session_get_sequence_header_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	u32 header_len;
+	u8 *sequence_header;
+};
+
+struct hfi_msg_sys_debug_packet {
+	u32 size;
+	u32 packet_type;
+	u32 msg_type;
+	u32 msg_size;
+	u32 time_stamp_hi;
+	u32 time_stamp_lo;
+	u8 rg_msg_data[1];
+};
+
+#endif
diff --git a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
index 02b9699..b604d0a 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
+++ b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
@@ -15,7 +15,7 @@
 #include <linux/list.h>
 #include "vidc_hal.h"
 
-static enum vidc_status vidc_map_hal_err_status(enum HFI_ERROR hfi_err)
+static enum vidc_status vidc_map_hal_err_status(int hfi_err)
 {
 	enum vidc_status vidc_err;
 	switch (hfi_err) {
@@ -64,8 +64,6 @@
 	case HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED:
 		vidc_err = VIDC_ERR_IFRAME_EXPECTED;
 		break;
-	case HFI_ERR_SYS_UNKNOWN:
-	case HFI_ERR_SESSION_UNKNOWN:
 	case HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING:
 	default:
 		vidc_err = VIDC_ERR_FAIL;
@@ -82,7 +80,7 @@
 	int num_properties_changed;
 	struct hfi_frame_size frame_sz;
 	u8 *data_ptr;
-	enum HFI_PROPERTY prop_id;
+	int prop_id;
 	HAL_MSG_LOW("RECEIVED:EVENT_NOTIFY");
 	if (sizeof(struct hfi_msg_event_notify_packet)
 		> pkt->size) {
@@ -103,12 +101,11 @@
 	if (num_properties_changed) {
 		data_ptr = (u8 *) &pkt->rg_ext_event_data[0];
 		do {
-			prop_id = (enum HFI_PROPERTY) *((u32 *)data_ptr);
+			prop_id = (int) *((u32 *)data_ptr);
 			switch (prop_id) {
 			case HFI_PROPERTY_PARAM_FRAME_SIZE:
-				frame_sz.buffer =
-					(enum HFI_BUFFER)
-						*((((u32 *)data_ptr)+1));
+				frame_sz.buffer_type =
+					(int) *((((u32 *)data_ptr)+1));
 				frame_sz.width =
 					event_notify.width =
 						*((((u32 *)data_ptr)+2));
@@ -165,7 +162,7 @@
 	struct vidc_hal_sys_init_done sys_init_done;
 	u32 rem_bytes, bytes_read = 0, num_properties;
 	u8 *data_ptr;
-	enum HFI_PROPERTY prop_id;
+	int prop_id;
 	enum vidc_status status = VIDC_ERR_NONE;
 
 	HAL_MSG_LOW("RECEIVED:SYS_INIT_DONE");
@@ -202,7 +199,7 @@
 		num_properties = pkt->num_properties;
 
 		while ((num_properties != 0) && (rem_bytes >= sizeof(u32))) {
-			prop_id = (enum HFI_PROPERTY) *((u32 *)data_ptr);
+			prop_id = *((u32 *)data_ptr);
 			data_ptr = data_ptr + 4;
 
 			switch (prop_id) {
@@ -282,8 +279,8 @@
 			rc = VIDC_ERR_FAIL;
 		}
 		HAL_MSG_LOW("got buffer requirements for: %d",
-					hfi_buf_req->buffer);
-		switch (hfi_buf_req->buffer) {
+					hfi_buf_req->buffer_type);
+		switch (hfi_buf_req->buffer_type) {
 		case HFI_BUFFER_INPUT:
 			memcpy(&buffreq->buffer[0], hfi_buf_req,
 				sizeof(struct hfi_buffer_requirements));
@@ -330,8 +327,8 @@
 				HAL_BUFFER_INTERNAL_PERSIST;
 			break;
 		default:
-			HAL_MSG_ERROR("hal_process_sess_get_prop_buf_req:"
-			"bad_buffer_type: %d", hfi_buf_req->buffer);
+			HAL_MSG_ERROR("%s: bad_buffer_type: %d",
+				__func__, hfi_buf_req->buffer_type);
 			break;
 		}
 		req_bytes -= sizeof(struct hfi_buffer_requirements);
@@ -525,8 +522,8 @@
 		data_done.size = sizeof(struct msm_vidc_cb_data_done);
 		data_done.clnt_data = (void *) pkt->input_tag;
 
-		data_done.output_done.timestamp_hi = pkt->timestamp_hi;
-		data_done.output_done.timestamp_lo = pkt->timestamp_lo;
+		data_done.output_done.timestamp_hi = pkt->time_stamp_hi;
+		data_done.output_done.timestamp_lo = pkt->time_stamp_lo;
 		data_done.output_done.flags1 = pkt->flags;
 		data_done.output_done.mark_target = pkt->mark_target;
 		data_done.output_done.mark_data = pkt->mark_data;
@@ -559,20 +556,20 @@
 
 		data_done.output_done.stream_id = pkt->stream_id;
 		data_done.output_done.view_id = pkt->view_id;
-		data_done.output_done.timestamp_hi = pkt->timestamp_hi;
-		data_done.output_done.timestamp_lo = pkt->timestamp_lo;
+		data_done.output_done.timestamp_hi = pkt->time_stamp_hi;
+		data_done.output_done.timestamp_lo = pkt->time_stamp_lo;
 		data_done.output_done.flags1 = pkt->flags;
 		data_done.output_done.mark_target = pkt->mark_target;
 		data_done.output_done.mark_data = pkt->mark_data;
 		data_done.output_done.stats = pkt->stats;
 		data_done.output_done.alloc_len1 = pkt->alloc_len;
 		data_done.output_done.filled_len1 = pkt->filled_len;
-		data_done.output_done.offset1 = pkt->oofset;
+		data_done.output_done.offset1 = pkt->offset;
 		data_done.output_done.frame_width = pkt->frame_width;
 		data_done.output_done.frame_height = pkt->frame_height;
-		data_done.output_done.start_xCoord = pkt->start_xCoord;
-		data_done.output_done.start_yCoord = pkt->start_yCoord;
-		data_done.output_done.input_tag1 = pkt->input_tag1;
+		data_done.output_done.start_xCoord = pkt->start_x_coord;
+		data_done.output_done.start_yCoord = pkt->start_y_coord;
+		data_done.output_done.input_tag1 = pkt->input_tag;
 		data_done.output_done.picture_type = pkt->picture_type;
 		data_done.output_done.packet_buffer1 = pkt->packet_buffer;
 		data_done.output_done.extra_data_buffer =
diff --git a/drivers/media/video/vcap_v4l2.c b/drivers/media/video/vcap_v4l2.c
index db0dbc2..0a033ae 100644
--- a/drivers/media/video/vcap_v4l2.c
+++ b/drivers/media/video/vcap_v4l2.c
@@ -339,7 +339,7 @@
 
 	buf = container_of(vb, struct vcap_buffer, vb);
 
-	buf->ion_handle = ion_import_fd(dev->ion_client, b->m.userptr);
+	buf->ion_handle = ion_import_dma_buf(dev->ion_client, b->m.userptr);
 	if (IS_ERR((void *)buf->ion_handle)) {
 		pr_err("%s: Could not alloc memory\n", __func__);
 		buf->ion_handle = NULL;
diff --git a/drivers/media/video/videobuf2-msm-mem.c b/drivers/media/video/videobuf2-msm-mem.c
index 186195d..740d183 100644
--- a/drivers/media/video/videobuf2-msm-mem.c
+++ b/drivers/media/video/videobuf2-msm-mem.c
@@ -185,7 +185,7 @@
 	if (mem->phyaddr != 0)
 		return 0;
 #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
-	mem->ion_handle = ion_import_fd(client, (int)mem->vaddr);
+	mem->ion_handle = ion_import_dma_buf(client, (int)mem->vaddr);
 	if (IS_ERR_OR_NULL(mem->ion_handle)) {
 		pr_err("%s ION import failed\n", __func__);
 		return PTR_ERR(mem->ion_handle);
diff --git a/drivers/mfd/pm8038-core.c b/drivers/mfd/pm8038-core.c
index 8fef786..b32932b 100644
--- a/drivers/mfd/pm8038-core.c
+++ b/drivers/mfd/pm8038-core.c
@@ -327,6 +327,17 @@
 	.pdata_size	= sizeof(struct pm8xxx_tm_core_data),
 };
 
+static const struct resource ccadc_cell_resources[] __devinitconst = {
+	SINGLE_IRQ_RESOURCE("PM8921_BMS_CCADC_EOC", PM8921_BMS_CCADC_EOC),
+};
+
+static struct mfd_cell ccadc_cell __devinitdata = {
+	.name		= PM8XXX_CCADC_DEV_NAME,
+	.id		= -1,
+	.resources	= ccadc_cell_resources,
+	.num_resources	= ARRAY_SIZE(ccadc_cell_resources),
+};
+
 static struct pm8xxx_vreg regulator_data[] = {
 	/*   name	     pc_name	    ctrl   test   hpm_min */
 	NLDO1200("8038_l1",		    0x0AE, 0x0AF, LDO_1200),
@@ -641,6 +652,19 @@
 		goto bail;
 	}
 
+	if (pdata->ccadc_pdata) {
+		ccadc_cell.platform_data = pdata->ccadc_pdata;
+		ccadc_cell.pdata_size =
+				sizeof(struct pm8xxx_ccadc_platform_data);
+
+		ret = mfd_add_devices(pmic->dev, 0, &ccadc_cell, 1, NULL,
+					irq_base);
+		if (ret) {
+			pr_err("Failed to add ccadc subdevice ret=%d\n", ret);
+			goto bail;
+		}
+	}
+
 	return 0;
 bail:
 	if (pmic->irq_chip) {
diff --git a/drivers/mfd/wcd9xxx-slimslave.c b/drivers/mfd/wcd9xxx-slimslave.c
index 889c416..789242d 100644
--- a/drivers/mfd/wcd9xxx-slimslave.c
+++ b/drivers/mfd/wcd9xxx-slimslave.c
@@ -537,3 +537,18 @@
 	return ret;
 }
 EXPORT_SYMBOL_GPL(wcd9xxx_close_slim_sch_tx);
+
+int wcd9xxx_get_slave_port(unsigned int ch_num)
+{
+	int ret = 0;
+
+	pr_debug("%s: ch_num[%d]\n", __func__, ch_num);
+	ret = (ch_num - BASE_CH_NUM);
+	if (ret < 0) {
+		pr_err("%s: Error:- Invalid slave port found = %d\n",
+			__func__, ret);
+		return -EINVAL;
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(wcd9xxx_get_slave_port);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index d5afe8d..d4eb6e0 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -141,7 +141,6 @@
 static uint32_t pil_ref_cnt;
 static DEFINE_MUTEX(pil_access_lock);
 
-static DEFINE_MUTEX(send_msg_lock);
 static DEFINE_MUTEX(qsee_bw_mutex);
 static DEFINE_MUTEX(qsee_sfpb_bw_mutex);
 static DEFINE_MUTEX(app_access_lock);
@@ -262,7 +261,8 @@
 	ion_phys_addr_t pa;
 
 	/* Get the handle of the shared fd */
-	svc->ihandle = ion_import_fd(qseecom.ion_clnt, listener->ifd_data_fd);
+	svc->ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+					listener->ifd_data_fd);
 	if (svc->ihandle == NULL) {
 		pr_err("Ion client could not retrieve the handle\n");
 		return -ENOMEM;
@@ -504,7 +504,8 @@
 		return -EFAULT;
 
 	/* Get the handle of the shared fd */
-	data->client.ihandle = ion_import_fd(qseecom.ion_clnt, req.ifd_data_fd);
+	data->client.ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+						req.ifd_data_fd);
 	if (IS_ERR_OR_NULL(data->client.ihandle)) {
 		pr_err("Ion client could not retrieve the handle\n");
 		return -ENOMEM;
@@ -594,6 +595,26 @@
 	return ret;
 }
 
+static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req)
+{
+	int32_t ret;
+	struct qseecom_command_scm_resp resp;
+
+	/*  SCM_CALL  to check if app_id for the mentioned app exists */
+	ret = scm_call(SCM_SVC_TZSCHEDULER, 1,  &req,
+				sizeof(struct qseecom_check_app_ireq),
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to check if app is already loaded failed\n");
+		return -EINVAL;
+	}
+
+	if (resp.result == QSEOS_RESULT_FAILURE)
+		return 0;
+	else
+		return resp.data;
+}
+
 static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
 {
 	struct qseecom_registered_app_list *entry = NULL;
@@ -621,19 +642,11 @@
 	req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
 	memcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
 
-	/*  SCM_CALL  to check if app_id for the mentioned app exists */
-	ret = scm_call(SCM_SVC_TZSCHEDULER, 1,  &req,
-				sizeof(struct qseecom_check_app_ireq),
-				&resp, sizeof(resp));
-	if (ret) {
-		pr_err("scm_call to check if app is already loaded failed\n");
-		return -EINVAL;
-	}
-
-	if (resp.result == QSEOS_RESULT_FAILURE)
-		app_id = 0;
+	ret = __qseecom_check_app_exists(req);
+	if (ret < 0)
+		return ret;
 	else
-		app_id = resp.data;
+		app_id = ret;
 
 	if (app_id) {
 		pr_warn("App id %d (%s) already exists\n", app_id,
@@ -654,7 +667,7 @@
 		pr_warn("App (%s) does not exist, loading apps for first time\n",
 			(char *)(req.app_name));
 		/* Get the handle of the shared fd */
-		ihandle = ion_import_fd(qseecom.ion_clnt,
+		ihandle = ion_import_dma_buf(qseecom.ion_clnt,
 					load_img_req.ifd_data_fd);
 		if (IS_ERR_OR_NULL(ihandle)) {
 			pr_err("Ion client could not retrieve the handle\n");
@@ -1054,7 +1067,7 @@
 	for (i = 0; i < MAX_ION_FD; i++) {
 		if (req->ifd_data[i].fd > 0) {
 			/* Get the handle of the shared fd */
-			ihandle = ion_import_fd(qseecom.ion_clnt,
+			ihandle = ion_import_dma_buf(qseecom.ion_clnt,
 						req->ifd_data[i].fd);
 			if (IS_ERR_OR_NULL(ihandle)) {
 				pr_err("Ion client can't retrieve the handle\n");
@@ -1288,7 +1301,7 @@
 	}
 
 	/* Get the handle of the shared fd */
-	ihandle = ion_import_fd(qseecom.ion_clnt,
+	ihandle = ion_import_dma_buf(qseecom.ion_clnt,
 				load_img_req.ifd_data_fd);
 	if (IS_ERR_OR_NULL(ihandle)) {
 		pr_err("Ion client could not retrieve the handle\n");
@@ -1412,6 +1425,37 @@
 	return ret;
 }
 
+static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+
+	int32_t ret;
+	struct qseecom_qseos_app_load_query query_req;
+	struct qseecom_check_app_ireq req;
+
+	/* Copy the relevant information needed for loading the image */
+	if (__copy_from_user(&query_req,
+				(void __user *)argp,
+				sizeof(struct qseecom_qseos_app_load_query))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+	memcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
+
+	ret = __qseecom_check_app_exists(req);
+	if (ret == -EINVAL) {
+		pr_err(" scm call to check if app is loaded failed");
+		return ret;	/* scm call failed */
+	} else if (ret > 0) {
+		pr_err("app is already loaded in QSEE");
+		return -EEXIST;	/* app already loaded */
+	} else {
+		return 0;	/* app not loaded */
+	}
+}
+
 static long qseecom_ioctl(struct file *file, unsigned cmd,
 		unsigned long arg)
 {
@@ -1447,24 +1491,24 @@
 	}
 	case QSEECOM_IOCTL_SEND_CMD_REQ: {
 		/* Only one client allowed here at a time */
-		mutex_lock(&send_msg_lock);
+		mutex_lock(&app_access_lock);
 		atomic_inc(&data->ioctl_count);
 		ret = qseecom_send_cmd(data, argp);
 		atomic_dec(&data->ioctl_count);
 		wake_up_all(&data->abort_wq);
-		mutex_unlock(&send_msg_lock);
+		mutex_unlock(&app_access_lock);
 		if (ret)
 			pr_err("failed qseecom_send_cmd: %d\n", ret);
 		break;
 	}
 	case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ: {
 		/* Only one client allowed here at a time */
-		mutex_lock(&send_msg_lock);
+		mutex_lock(&app_access_lock);
 		atomic_inc(&data->ioctl_count);
 		ret = qseecom_send_modfd_cmd(data, argp);
 		atomic_dec(&data->ioctl_count);
 		wake_up_all(&data->abort_wq);
-		mutex_unlock(&send_msg_lock);
+		mutex_unlock(&app_access_lock);
 		if (ret)
 			pr_err("failed qseecom_send_cmd: %d\n", ret);
 		break;
@@ -1568,6 +1612,14 @@
 			pr_err("failed unload_app request: %d\n", ret);
 		break;
 	}
+	case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_query_app_loaded(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/misc/tsif.c b/drivers/misc/tsif.c
index 2b09d7c..aeda38c 100644
--- a/drivers/misc/tsif.c
+++ b/drivers/misc/tsif.c
@@ -304,7 +304,9 @@
 	for (i = size-1; i >= 0; i--) {
 		int tmp;
 		g = table + i;
-		tmp = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_DISABLE);
+		tmp = gpio_tlmm_config(GPIO_CFG(GPIO_PIN(g->gpio_cfg),
+			0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA),
+			GPIO_CFG_DISABLE);
 		if (tmp) {
 			pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_DISABLE)"
 			       " <%s> failed: %d\n",
@@ -681,7 +683,7 @@
 		while (tsif_device->xfer[0].busy ||
 		       tsif_device->xfer[1].busy) {
 			msm_dmov_flush(tsif_device->dma, 1);
-			msleep(10);
+			usleep(10000);
 		}
 	}
 	tsif_device->state = tsif_state_stopped;
@@ -1031,6 +1033,15 @@
 		return rc;
 	}
 	tsif_device->state = tsif_state_running;
+
+	/* make sure the GPIO's are set up */
+	rc = tsif_start_gpios(tsif_device);
+	if (rc) {
+		dev_err(&tsif_device->pdev->dev, "failed to start GPIOs\n");
+		tsif_dma_exit(tsif_device);
+		return rc;
+	}
+
 	/*
 	 * DMA should be scheduled prior to TSIF hardware initialization,
 	 * otherwise "bus error" will be reported by Data Mover
@@ -1046,6 +1057,7 @@
 	rc = tsif_start_hw(tsif_device);
 	if (rc) {
 		dev_err(&tsif_device->pdev->dev, "Unable to start HW\n");
+		tsif_stop_gpios(tsif_device);
 		tsif_dma_exit(tsif_device);
 		tsif_clock(tsif_device, 0);
 		return rc;
@@ -1067,10 +1079,19 @@
 {
 	dev_info(&tsif_device->pdev->dev, "%s, state %d\n", __func__,
 		 (int)tsif_device->state);
-	/*
-	 * DMA should be flushed/stopped prior to TSIF hardware stop,
-	 * otherwise "bus error" will be reported by Data Mover
+
+	/* turn off the GPIO's to prevent new data from entering */
+	tsif_stop_gpios(tsif_device);
+
+	/* we unfortunately must sleep here to give the ADM time to
+	 * complete any outstanding reads after the GPIO's are turned
+	 * off.  There is no indication from the ADM hardware that
+	 * there are any outstanding reads on the bus, and if we
+	 * stop the TSIF too quickly, it can cause a bus error.
 	 */
+	msleep(100);
+
+	/* now we can stop the core */
 	tsif_stop_hw(tsif_device);
 	tsif_dma_exit(tsif_device);
 	tsif_clock(tsif_device, 0);
@@ -1317,9 +1338,6 @@
 	}
 	dev_info(&pdev->dev, "remapped phys 0x%08x => virt %p\n",
 		 tsif_device->memres->start, tsif_device->base);
-	rc = tsif_start_gpios(tsif_device);
-	if (rc)
-		goto err_gpio;
 
 	pm_runtime_set_active(&pdev->dev);
 	pm_runtime_enable(&pdev->dev);
@@ -1355,8 +1373,6 @@
 	free_irq(tsif_device->irq, tsif_device);
 err_irq:
 	tsif_debugfs_exit(tsif_device);
-	tsif_stop_gpios(tsif_device);
-err_gpio:
 	iounmap(tsif_device->base);
 err_ioremap:
 err_rgn:
diff --git a/drivers/misc/tspp.c b/drivers/misc/tspp.c
index 81c6b65..4d7553e 100644
--- a/drivers/misc/tspp.c
+++ b/drivers/misc/tspp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1687,7 +1687,7 @@
 
 	/* map clocks */
 	if (data->tsif_pclk) {
-		device->tsif_pclk = clk_get(NULL, data->tsif_pclk);
+		device->tsif_pclk = clk_get(&pdev->dev, data->tsif_pclk);
 		if (IS_ERR(device->tsif_pclk)) {
 			pr_err("tspp: failed to get %s",
 				data->tsif_pclk);
@@ -1697,7 +1697,7 @@
 		}
 	}
 	if (data->tsif_ref_clk) {
-		device->tsif_ref_clk = clk_get(NULL, data->tsif_ref_clk);
+		device->tsif_ref_clk = clk_get(&pdev->dev, data->tsif_ref_clk);
 		if (IS_ERR(device->tsif_ref_clk)) {
 			pr_err("tspp: failed to get %s",
 				data->tsif_ref_clk);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 96eae7d..0c3f994 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -465,13 +465,6 @@
 	  This provides support for the SD/MMC cell found in the
           MSM and QSD SOCs from Qualcomm.
 
-config MMC_MSM_CARD_HW_DETECTION
-	boolean "Qualcomm MMC Hardware detection support"
-	depends on MMC_MSM
-	default n
-	help
-	  Select Y if the hardware has support to detect card insertion/removal.
-
 config MMC_MSM_SDC1_SUPPORT
 	boolean "Qualcomm SDC1 support"
 	depends on MMC_MSM
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 4534cd8..a4af6c9 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -162,7 +162,7 @@
 {
 	unsigned short ret = NR_SG;
 
-	if (host->is_sps_mode) {
+	if (is_sps_mode(host)) {
 		ret = SPS_MAX_DESCS;
 	} else { /* DMA or PIO mode */
 		if (NR_SG > MAX_NR_SG_DMA_PIO)
@@ -263,41 +263,89 @@
 static void msmsdcc_soft_reset(struct msmsdcc_host *host)
 {
 	/*
-	 * Reset SDCC controller's DPSM (data path state machine
-	 * and CPSM (command path state machine).
+	 * Reset controller state machines without resetting
+	 * configuration registers (MCI_POWER, MCI_CLK, MCI_INT_MASKn).
 	 */
-	writel_relaxed(0, host->base + MMCICOMMAND);
-	msmsdcc_sync_reg_wr(host);
-	writel_relaxed(0, host->base + MMCIDATACTRL);
-	msmsdcc_sync_reg_wr(host);
+	if (is_sw_reset_save_config(host)) {
+		ktime_t start;
+
+		writel_relaxed(readl_relaxed(host->base + MMCIPOWER)
+				| MCI_SW_RST_CFG, host->base + MMCIPOWER);
+		msmsdcc_sync_reg_wr(host);
+
+		start = ktime_get();
+		while (readl_relaxed(host->base + MMCIPOWER) & MCI_SW_RST_CFG) {
+			/*
+			 * SW reset can take upto 10HCLK + 15MCLK cycles.
+			 * Calculating based on min clk rates (hclk = 27MHz,
+			 * mclk = 400KHz) it comes to ~40us. Let's poll for
+			 * max. 1ms for reset completion.
+			 */
+			if (ktime_to_us(ktime_sub(ktime_get(), start)) > 1000) {
+				pr_err("%s: %s failed\n",
+					mmc_hostname(host->mmc), __func__);
+				BUG();
+			}
+		}
+	} else {
+		writel_relaxed(0, host->base + MMCICOMMAND);
+		msmsdcc_sync_reg_wr(host);
+		writel_relaxed(0, host->base + MMCIDATACTRL);
+		msmsdcc_sync_reg_wr(host);
+	}
 }
 
 static void msmsdcc_hard_reset(struct msmsdcc_host *host)
 {
 	int ret;
 
-	/* Reset the controller */
-	ret = clk_reset(host->clk, CLK_RESET_ASSERT);
-	if (ret)
-		pr_err("%s: Clock assert failed at %u Hz"
-			" with err %d\n", mmc_hostname(host->mmc),
+	/*
+	 * Reset SDCC controller to power on default state.
+	 * Don't issue a reset request to clock control block if
+	 * SDCC controller itself can support hard reset.
+	 */
+	if (is_sw_hard_reset(host)) {
+		ktime_t start;
+
+		writel_relaxed(readl_relaxed(host->base + MMCIPOWER)
+				| MCI_SW_RST, host->base + MMCIPOWER);
+		msmsdcc_sync_reg_wr(host);
+
+		start = ktime_get();
+		while (readl_relaxed(host->base + MMCIPOWER) & MCI_SW_RST) {
+			/*
+			 * See comment in msmsdcc_soft_reset() on choosing 1ms
+			 * poll timeout.
+			 */
+			if (ktime_to_us(ktime_sub(ktime_get(), start)) > 1000) {
+				pr_err("%s: %s failed\n",
+					mmc_hostname(host->mmc), __func__);
+				BUG();
+			}
+		}
+	} else {
+		ret = clk_reset(host->clk, CLK_RESET_ASSERT);
+		if (ret)
+			pr_err("%s: Clock assert failed at %u Hz" \
+				" with err %d\n", mmc_hostname(host->mmc),
 				host->clk_rate, ret);
 
-	ret = clk_reset(host->clk, CLK_RESET_DEASSERT);
-	if (ret)
-		pr_err("%s: Clock deassert failed at %u Hz"
-			" with err %d\n", mmc_hostname(host->mmc),
-			host->clk_rate, ret);
+		ret = clk_reset(host->clk, CLK_RESET_DEASSERT);
+		if (ret)
+			pr_err("%s: Clock deassert failed at %u Hz" \
+				" with err %d\n", mmc_hostname(host->mmc),
+				host->clk_rate, ret);
 
-	mb();
-	/* Give some delay for clock reset to propogate to controller */
-	msmsdcc_delay(host);
+		mb();
+		/* Give some delay for clock reset to propogate to controller */
+		msmsdcc_delay(host);
+	}
 }
 
 static void msmsdcc_reset_and_restore(struct msmsdcc_host *host)
 {
-	if (host->sdcc_version) {
-		if (host->is_sps_mode) {
+	if (is_soft_reset(host)) {
+		if (is_sps_mode(host)) {
 			/* Reset DML first */
 			msmsdcc_dml_reset(host);
 			/*
@@ -313,7 +361,7 @@
 		pr_debug("%s: Applied soft reset to Controller\n",
 				mmc_hostname(host->mmc));
 
-		if (host->is_sps_mode)
+		if (is_sps_mode(host))
 			msmsdcc_dml_init(host);
 	} else {
 		/* Give Clock reset (hard reset) to controller */
@@ -393,7 +441,7 @@
 static inline void msmsdcc_sync_reg_wr(struct msmsdcc_host *host)
 {
 	mb();
-	if (!host->sdcc_version)
+	if (!is_wait_for_reg_write(host))
 		udelay(host->reg_write_delay);
 	else if (readl_relaxed(host->base + MCI_STATUS2) &
 			MCI_MCLK_REG_WR_ACTIVE) {
@@ -773,14 +821,14 @@
 	bool ret = true;
 	u32 xfer_size = data->blksz * data->blocks;
 
-	if (host->is_sps_mode) {
+	if (is_sps_mode(host)) {
 		/*
 		 * BAM Mode: Fall back on PIO if size is less
 		 * than or equal to SPS_MIN_XFER_SIZE bytes.
 		 */
 		if (xfer_size <= SPS_MIN_XFER_SIZE)
 			ret = false;
-	} else if (host->is_dma_mode) {
+	} else if (is_dma_mode(host)) {
 		/*
 		 * ADM Mode: Fall back on PIO if size is less than FIFO size
 		 * or not integer multiple of FIFO size
@@ -1125,9 +1173,9 @@
 		datactrl |= MCI_AUTO_PROG_DONE;
 
 	if (msmsdcc_is_dma_possible(host, data)) {
-		if (host->is_dma_mode && !msmsdcc_config_dma(host, data)) {
+		if (is_dma_mode(host) && !msmsdcc_config_dma(host, data)) {
 			datactrl |= MCI_DPSM_DMAENABLE;
-		} else if (host->is_sps_mode) {
+		} else if (is_sps_mode(host)) {
 			if (!msmsdcc_is_dml_busy(host)) {
 				if (!msmsdcc_sps_start_xfer(host, data)) {
 					/* Now kick start DML transfer */
@@ -1176,7 +1224,7 @@
 	     "%s: data timeout is zero. timeout_ns=0x%x, timeout_clks=0x%x\n",
 	     mmc_hostname(host->mmc), data->timeout_ns, data->timeout_clks);
 
-	if (host->is_dma_mode && (datactrl & MCI_DPSM_DMAENABLE)) {
+	if (is_dma_mode(host) && (datactrl & MCI_DPSM_DMAENABLE)) {
 		/* Use ADM (Application Data Mover) HW for Data transfer */
 		/* Save parameters for the dma exec function */
 		host->cmd_timeout = timeout;
@@ -1626,10 +1674,10 @@
 
 	if (!cmd->data || cmd->error) {
 		if (host->curr.data && host->dma.sg &&
-			host->is_dma_mode)
+			is_dma_mode(host))
 			msm_dmov_flush(host->dma.channel, 0);
 		else if (host->curr.data && host->sps.sg &&
-			host->is_sps_mode){
+			is_sps_mode(host)) {
 			/* Stop current SPS transfer */
 			msmsdcc_sps_exit_curr_xfer(host);
 		}
@@ -1682,7 +1730,7 @@
 			msmsdcc_delay(host);
 		}
 
-		if (!host->clks_on) {
+		if (!atomic_read(&host->clks_on)) {
 			pr_debug("%s: %s: SDIO async irq received\n",
 					mmc_hostname(host->mmc), __func__);
 
@@ -1782,9 +1830,9 @@
 				      MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
 				msmsdcc_data_err(host, data, status);
 				host->curr.data_xfered = 0;
-				if (host->dma.sg && host->is_dma_mode)
+				if (host->dma.sg && is_dma_mode(host))
 					msm_dmov_flush(host->dma.channel, 0);
-				else if (host->sps.sg && host->is_sps_mode) {
+				else if (host->sps.sg && is_sps_mode(host)) {
 					/* Stop current SPS transfer */
 					msmsdcc_sps_exit_curr_xfer(host);
 				} else {
@@ -1969,7 +2017,7 @@
 		msmsdcc_sdio_al_lpm(mmc, false);
 
 	/* check if sps pipe reset is pending? */
-	if (host->is_sps_mode && host->sps.pipe_reset_pending) {
+	if (is_sps_mode(host) && host->sps.pipe_reset_pending) {
 		msmsdcc_sps_pipes_reset_and_restore(host);
 		host->sps.pipe_reset_pending = false;
 	}
@@ -1992,7 +2040,8 @@
 	/*
 	 * Don't start the request if SDCC is not in proper state to handle it
 	 */
-	if (!host->pwr || !host->clks_on || host->sdcc_irq_disabled) {
+	if (!host->pwr || !atomic_read(&host->clks_on)
+			|| host->sdcc_irq_disabled) {
 		WARN(1, "%s: %s: SDCC is in bad state. don't process"
 		     " new request (CMD%d)\n", mmc_hostname(host->mmc),
 		     __func__, mrq->cmd->opcode);
@@ -2034,7 +2083,7 @@
 	}
 
 	if (mrq->data && (mrq->data->flags & MMC_DATA_WRITE)) {
-		if (host->sdcc_version) {
+		if (is_auto_prog_done(host)) {
 			if (!mrq->stop)
 				host->curr.wait_for_auto_prog_done = true;
 		} else {
@@ -2111,8 +2160,15 @@
 		goto out;
 	}
 
-	if (regulator_count_voltages(vreg->reg) > 0)
+	if (regulator_count_voltages(vreg->reg) > 0) {
 		vreg->set_voltage_sup = 1;
+		/* sanity check */
+		if (!vreg->high_vol_level || !vreg->hpm_uA) {
+			pr_err("%s: %s invalid constraints specified\n",
+					__func__, vreg->name);
+			rc = -EINVAL;
+		}
+	}
 
 out:
 	return rc;
@@ -2202,7 +2258,7 @@
 	return rc;
 }
 
-static int msmsdcc_vreg_disable(struct msm_mmc_reg_data *vreg)
+static int msmsdcc_vreg_disable(struct msm_mmc_reg_data *vreg, bool is_init)
 {
 	int rc = 0;
 
@@ -2224,17 +2280,33 @@
 		rc = msmsdcc_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
 		if (rc)
 			goto out;
-	} else if (vreg->is_enabled && vreg->always_on && vreg->lpm_sup) {
-		/* Put always_on regulator in LPM (low power mode) */
-		rc = msmsdcc_vreg_set_optimum_mode(vreg, vreg->lpm_uA);
-		if (rc < 0)
-			goto out;
+	} else if (vreg->is_enabled && vreg->always_on) {
+		if (!is_init && vreg->lpm_sup) {
+			/* Put always_on regulator in LPM (low power mode) */
+			rc = msmsdcc_vreg_set_optimum_mode(vreg, vreg->lpm_uA);
+			if (rc < 0)
+				goto out;
+		} else if (is_init && vreg->reset_at_init) {
+			/**
+			 * The regulator might not actually be disabled if it
+			 * is shared and in use by other drivers.
+			 */
+			rc = regulator_disable(vreg->reg);
+			if (rc) {
+				pr_err("%s: regulator_disable(%s) failed at " \
+					"bootup. rc=%d\n", __func__,
+					vreg->name, rc);
+				goto out;
+			}
+			vreg->is_enabled = false;
+		}
 	}
 out:
 	return rc;
 }
 
-static int msmsdcc_setup_vreg(struct msmsdcc_host *host, bool enable)
+static int msmsdcc_setup_vreg(struct msmsdcc_host *host, bool enable,
+		bool is_init)
 {
 	int rc = 0, i;
 	struct msm_mmc_slot_reg_data *curr_slot;
@@ -2252,7 +2324,8 @@
 			if (enable)
 				rc = msmsdcc_vreg_enable(vreg_table[i]);
 			else
-				rc = msmsdcc_vreg_disable(vreg_table[i]);
+				rc = msmsdcc_vreg_disable(vreg_table[i],
+						is_init);
 			if (rc)
 				goto out;
 		}
@@ -2269,10 +2342,10 @@
 {
 	int rc;
 
-	rc = msmsdcc_setup_vreg(host, 1);
+	rc = msmsdcc_setup_vreg(host, 1, true);
 	if (rc)
 		return rc;
-	rc = msmsdcc_setup_vreg(host, 0);
+	rc = msmsdcc_setup_vreg(host, 0, true);
 	return rc;
 }
 
@@ -2337,17 +2410,37 @@
  * Any function calling msmsdcc_setup_clocks must
  * acquire clk_mutex. May sleep.
  */
-static inline void msmsdcc_setup_clocks(struct msmsdcc_host *host, bool enable)
+static int msmsdcc_setup_clocks(struct msmsdcc_host *host, bool enable)
 {
-	if (enable) {
-		if (!IS_ERR_OR_NULL(host->bus_clk))
-			clk_prepare_enable(host->bus_clk);
-		if (!IS_ERR(host->pclk))
-			clk_prepare_enable(host->pclk);
-		clk_prepare_enable(host->clk);
+	int rc = 0;
+
+	if (enable && !atomic_read(&host->clks_on)) {
+		if (!IS_ERR_OR_NULL(host->bus_clk)) {
+			rc = clk_prepare_enable(host->bus_clk);
+			if (rc) {
+				pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
+					mmc_hostname(host->mmc), __func__, rc);
+				goto out;
+			}
+		}
+		if (!IS_ERR(host->pclk)) {
+			rc = clk_prepare_enable(host->pclk);
+			if (rc) {
+				pr_err("%s: %s: failed to enable the pclk with error %d\n",
+					mmc_hostname(host->mmc), __func__, rc);
+				goto disable_bus;
+			}
+		}
+		rc = clk_prepare_enable(host->clk);
+		if (rc) {
+			pr_err("%s: %s: failed to enable the host-clk with error %d\n",
+				mmc_hostname(host->mmc), __func__, rc);
+			goto disable_pclk;
+		}
 		mb();
 		msmsdcc_delay(host);
-	} else {
+		atomic_set(&host->clks_on, 1);
+	} else if (!enable && atomic_read(&host->clks_on)) {
 		mb();
 		msmsdcc_delay(host);
 		clk_disable_unprepare(host->clk);
@@ -2355,7 +2448,18 @@
 			clk_disable_unprepare(host->pclk);
 		if (!IS_ERR_OR_NULL(host->bus_clk))
 			clk_disable_unprepare(host->bus_clk);
+		atomic_set(&host->clks_on, 0);
 	}
+	goto out;
+
+disable_pclk:
+	if (!IS_ERR_OR_NULL(host->pclk))
+		clk_disable_unprepare(host->pclk);
+disable_bus:
+	if (!IS_ERR_OR_NULL(host->bus_clk))
+		clk_disable_unprepare(host->bus_clk);
+out:
+	return rc;
 }
 
 static inline unsigned int msmsdcc_get_sup_clk_rate(struct msmsdcc_host *host,
@@ -2534,7 +2638,7 @@
 	if (host->plat->translate_vdd && !host->sdio_gpio_lpm)
 		ret = host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
 	else if (!host->plat->translate_vdd && !host->sdio_gpio_lpm)
-		ret = msmsdcc_setup_vreg(host, !!ios->vdd);
+		ret = msmsdcc_setup_vreg(host, !!ios->vdd, false);
 
 	if (ret) {
 		pr_err("%s: Failed to setup voltage regulators\n",
@@ -2888,18 +2992,16 @@
 
 	spin_lock_irqsave(&host->lock, flags);
 	if (ios->clock) {
-		if (!host->clks_on) {
-			spin_unlock_irqrestore(&host->lock, flags);
-			msmsdcc_setup_clocks(host, true);
-			spin_lock_irqsave(&host->lock, flags);
-			host->clks_on = 1;
-			writel_relaxed(host->mci_irqenable,
-					host->base + MMCIMASK0);
-			mb();
-			msmsdcc_cfg_sdio_wakeup(host, false);
-		}
-
+		spin_unlock_irqrestore(&host->lock, flags);
+		rc = msmsdcc_setup_clocks(host, true);
+		if (rc)
+			goto out;
+		spin_lock_irqsave(&host->lock, flags);
+		writel_relaxed(host->mci_irqenable, host->base + MMCIMASK0);
+		mb();
+		msmsdcc_cfg_sdio_wakeup(host, false);
 		clock = msmsdcc_get_sup_clk_rate(host, ios->clock);
+
 		/*
 		 * For DDR50 mode, controller needs clock rate to be
 		 * double than what is required on the SD card CLK pin.
@@ -2942,7 +3044,6 @@
 		msmsdcc_delay(host);
 		clk |= MCI_CLK_ENABLE;
 	}
-
 	if (ios->bus_width == MMC_BUS_WIDTH_8)
 		clk |= MCI_CLK_WIDEBUS_8;
 	else if (ios->bus_width == MMC_BUS_WIDTH_4)
@@ -2981,7 +3082,7 @@
 		clk |= IO_PAD_PWR_SWITCH;
 
 	/* Don't write into registers if clocks are disabled */
-	if (host->clks_on) {
+	if (atomic_read(&host->clks_on)) {
 		if (readl_relaxed(host->base + MMCICLOCK) != clk) {
 			writel_relaxed(clk, host->base + MMCICLOCK);
 			msmsdcc_sync_reg_wr(host);
@@ -2993,7 +3094,7 @@
 		}
 	}
 
-	if (!(clk & MCI_CLK_ENABLE) && host->clks_on) {
+	if (!(clk & MCI_CLK_ENABLE) && atomic_read(&host->clks_on)) {
 		msmsdcc_cfg_sdio_wakeup(host, true);
 		spin_unlock_irqrestore(&host->lock, flags);
 		/*
@@ -3002,11 +3103,10 @@
 		 */
 		msmsdcc_setup_clocks(host, false);
 		spin_lock_irqsave(&host->lock, flags);
-		host->clks_on = 0;
 	}
 
 	if (host->tuning_in_progress)
-		WARN(!host->clks_on,
+		WARN(!atomic_read(&host->clks_on),
 			"tuning_in_progress but SDCC clocks are OFF\n");
 
 	/* Let interrupts be disabled if the host is powered off */
@@ -3014,8 +3114,8 @@
 		enable_irq(host->core_irqres->start);
 		host->sdcc_irq_disabled = 0;
 	}
-
 	spin_unlock_irqrestore(&host->lock, flags);
+out:
 	mutex_unlock(&host->clk_mutex);
 }
 
@@ -3088,14 +3188,14 @@
 	spin_lock_irqsave(&host->lock, flags);
 	if (enable) {
 		host->mci_irqenable |= MCI_SDIOINTOPERMASK;
-		if (host->clks_on) {
+		if (atomic_read(&host->clks_on)) {
 			writel_relaxed(readl_relaxed(host->base + MMCIMASK0) |
 				MCI_SDIOINTOPERMASK, host->base + MMCIMASK0);
 			mb();
 		}
 	} else {
 		host->mci_irqenable &= ~MCI_SDIOINTOPERMASK;
-		if (host->clks_on) {
+		if (atomic_read(&host->clks_on)) {
 			writel_relaxed(readl_relaxed(host->base + MMCIMASK0) &
 				~MCI_SDIOINTOPERMASK, host->base + MMCIMASK0);
 			mb();
@@ -3143,6 +3243,9 @@
 			pm_runtime_get_noresume(dev);
 			goto out;
 		}
+	} else if (dev->power.runtime_status == RPM_RESUMING) {
+		pm_runtime_get_noresume(dev);
+		goto out;
 	}
 
 	rc = pm_runtime_get_sync(dev);
@@ -3217,20 +3320,14 @@
 	}
 
 	mutex_lock(&host->clk_mutex);
-	spin_lock_irqsave(&host->lock, flags);
-	if (!host->clks_on) {
-		spin_unlock_irqrestore(&host->lock, flags);
-		msmsdcc_setup_clocks(host, true);
-		spin_lock_irqsave(&host->lock, flags);
-		host->clks_on = 1;
-	}
-	spin_unlock_irqrestore(&host->lock, flags);
+	rc = msmsdcc_setup_clocks(host, true);
 	mutex_unlock(&host->clk_mutex);
 
 out:
 	if (rc < 0) {
 		pr_info("%s: %s: failed with error %d", mmc_hostname(mmc),
 				__func__, rc);
+		msmsdcc_pm_qos_update_latency(host, 0);
 		return rc;
 	}
 	msmsdcc_msm_bus_cancel_work_and_set_vote(host, &mmc->ios);
@@ -3241,6 +3338,7 @@
 {
 	struct msmsdcc_host *host = mmc_priv(mmc);
 	unsigned long flags;
+	int rc = 0;
 
 	msmsdcc_pm_qos_update_latency(host, 0);
 
@@ -3248,19 +3346,16 @@
 		goto out;
 
 	mutex_lock(&host->clk_mutex);
-	spin_lock_irqsave(&host->lock, flags);
-	if (host->clks_on) {
-		spin_unlock_irqrestore(&host->lock, flags);
-		msmsdcc_setup_clocks(host, false);
-		spin_lock_irqsave(&host->lock, flags);
-		host->clks_on = 0;
-	}
-	spin_unlock_irqrestore(&host->lock, flags);
+	rc = msmsdcc_setup_clocks(host, false);
 	mutex_unlock(&host->clk_mutex);
 
+	if (rc) {
+		msmsdcc_pm_qos_update_latency(host, 1);
+		return rc;
+	}
 out:
 	msmsdcc_msm_bus_queue_work(host);
-	return 0;
+	return rc;
 }
 #endif
 
@@ -3717,7 +3812,7 @@
 
 	spin_lock_irqsave(&host->lock, flags);
 	WARN(!host->pwr, "SDCC power is turned off\n");
-	WARN(!host->clks_on, "SDCC clocks are turned off\n");
+	WARN(!atomic_read(&host->clks_on), "SDCC clocks are turned off\n");
 	WARN(host->sdcc_irq_disabled, "SDCC IRQ is disabled\n");
 
 	host->tuning_in_progress = 1;
@@ -4462,25 +4557,26 @@
 	pr_info("%s: SDCC PWR is %s\n", mmc_hostname(host->mmc),
 		(host->pwr ? "ON" : "OFF"));
 	pr_info("%s: SDCC clks are %s, MCLK rate=%d\n",
-		mmc_hostname(host->mmc), (host->clks_on ? "ON" : "OFF"),
+		mmc_hostname(host->mmc),
+		(atomic_read(&host->clks_on) ? "ON" : "OFF"),
 		(u32)clk_get_rate(host->clk));
 	pr_info("%s: SDCC irq is %s\n", mmc_hostname(host->mmc),
 		(host->sdcc_irq_disabled ? "disabled" : "enabled"));
 
 	/* Now dump SDCC registers. Don't print FIFO registers */
-	if (host->clks_on)
+	if (atomic_read(&host->clks_on))
 		msmsdcc_print_regs("SDCC-CORE", host->base,
 				   host->core_memres->start, 28);
 
 	if (host->curr.data) {
 		if (!msmsdcc_is_dma_possible(host, host->curr.data))
 			pr_info("%s: PIO mode\n", mmc_hostname(host->mmc));
-		else if (host->is_dma_mode)
+		else if (is_dma_mode(host))
 			pr_info("%s: ADM mode: busy=%d, chnl=%d, crci=%d\n",
 				mmc_hostname(host->mmc), host->dma.busy,
 				host->dma.channel, host->dma.crci);
-		else if (host->is_sps_mode) {
-			if (host->sps.busy && host->clks_on)
+		else if (is_sps_mode(host)) {
+			if (host->sps.busy && atomic_read(&host->clks_on))
 				msmsdcc_print_regs("SDCC-DML", host->dml_base,
 						   host->dml_memres->start,
 						   16);
@@ -4529,9 +4625,9 @@
 			if (mrq->data && !mrq->data->error)
 				mrq->data->error = -ETIMEDOUT;
 			host->curr.data_xfered = 0;
-			if (host->dma.sg && host->is_dma_mode) {
+			if (host->dma.sg && is_dma_mode(host)) {
 				msm_dmov_flush(host->dma.channel, 0);
-			} else if (host->sps.sg && host->is_sps_mode) {
+			} else if (host->sps.sg && is_sps_mode(host)) {
 				/* Stop current SPS transfer */
 				msmsdcc_sps_exit_curr_xfer(host);
 			} else {
@@ -4553,16 +4649,78 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
+#define MAX_PROP_SIZE 32
+static int msmsdcc_dt_parse_vreg_info(struct device *dev,
+		struct msm_mmc_reg_data **vreg_data, const char *vreg_name)
+{
+	int len, ret = 0;
+	const __be32 *prop;
+	char prop_name[MAX_PROP_SIZE];
+	struct msm_mmc_reg_data *vreg;
+	struct device_node *np = dev->of_node;
+
+	snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
+	if (of_parse_phandle(np, prop_name, 0)) {
+		vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+		if (!vreg) {
+			dev_err(dev, "No memory for vreg: %s\n", vreg_name);
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		vreg->name = vreg_name;
+
+		snprintf(prop_name, MAX_PROP_SIZE,
+				"qcom,sdcc-%s-always_on", vreg_name);
+		if (of_get_property(np, prop_name, NULL))
+			vreg->always_on = true;
+
+		snprintf(prop_name, MAX_PROP_SIZE,
+				"qcom,sdcc-%s-lpm_sup", vreg_name);
+		if (of_get_property(np, prop_name, NULL))
+			vreg->lpm_sup = true;
+
+		snprintf(prop_name, MAX_PROP_SIZE,
+				"qcom,sdcc-%s-voltage_level", vreg_name);
+		prop = of_get_property(np, prop_name, &len);
+		if (!prop || (len != (2 * sizeof(__be32)))) {
+			dev_warn(dev, "%s %s property\n",
+				prop ? "invalid format" : "no", prop_name);
+		} else {
+			vreg->low_vol_level = be32_to_cpup(&prop[0]);
+			vreg->high_vol_level = be32_to_cpup(&prop[1]);
+		}
+
+		snprintf(prop_name, MAX_PROP_SIZE,
+				"qcom,sdcc-%s-current_level", vreg_name);
+		prop = of_get_property(np, prop_name, &len);
+		if (!prop || (len != (2 * sizeof(__be32)))) {
+			dev_warn(dev, "%s %s property\n",
+				prop ? "invalid format" : "no", prop_name);
+		} else {
+			vreg->lpm_uA = be32_to_cpup(&prop[0]);
+			vreg->hpm_uA = be32_to_cpup(&prop[1]);
+		}
+
+		*vreg_data = vreg;
+		dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
+			vreg->name, vreg->always_on ? "always_on," : "",
+			vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
+			vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
+	}
+
+err:
+	return ret;
+}
+
 static struct mmc_platform_data *msmsdcc_populate_pdata(struct device *dev)
 {
 	int i, ret;
 	struct mmc_platform_data *pdata;
 	struct device_node *np = dev->of_node;
-	u32 bus_width = 0;
-	u32 *clk_table;
-	int clk_table_len;
-	u32 *sup_voltages;
-	int sup_volt_len;
+	u32 bus_width = 0, current_limit = 0;
+	u32 *clk_table, *sup_voltages;
+	int clk_table_len, sup_volt_len, len;
 
 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata) {
@@ -4645,6 +4803,65 @@
 		dev_err(dev, "Supported clock rates not specified\n");
 	}
 
+	pdata->vreg_data = devm_kzalloc(dev,
+			sizeof(struct msm_mmc_slot_reg_data), GFP_KERNEL);
+	if (!pdata->vreg_data) {
+		dev_err(dev, "could not allocate memory for vreg_data\n");
+		goto err;
+	}
+
+	if (msmsdcc_dt_parse_vreg_info(dev,
+			&pdata->vreg_data->vdd_data, "vdd"))
+		goto err;
+
+	if (msmsdcc_dt_parse_vreg_info(dev,
+			&pdata->vreg_data->vdd_io_data, "vdd-io"))
+		goto err;
+
+	len = of_property_count_strings(np, "qcom,sdcc-bus-speed-mode");
+
+	for (i = 0; i < len; i++) {
+		const char *name = NULL;
+
+		of_property_read_string_index(np,
+			"qcom,sdcc-bus-speed-mode", i, &name);
+		if (!name)
+			continue;
+
+		if (!strncmp(name, "SDR12", sizeof("SDR12")))
+			pdata->uhs_caps |= MMC_CAP_UHS_SDR12;
+		else if (!strncmp(name, "SDR25", sizeof("SDR25")))
+			pdata->uhs_caps |= MMC_CAP_UHS_SDR25;
+		else if (!strncmp(name, "SDR50", sizeof("SDR50")))
+			pdata->uhs_caps |= MMC_CAP_UHS_SDR50;
+		else if (!strncmp(name, "DDR50", sizeof("DDR50")))
+			pdata->uhs_caps |= MMC_CAP_UHS_DDR50;
+		else if (!strncmp(name, "SDR104", sizeof("SDR104")))
+			pdata->uhs_caps |= MMC_CAP_UHS_SDR104;
+		else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
+			pdata->uhs_caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+		else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
+			pdata->uhs_caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+		else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
+			pdata->uhs_caps |= MMC_CAP_1_8V_DDR
+						| MMC_CAP_UHS_DDR50;
+		else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
+			pdata->uhs_caps |= MMC_CAP_1_2V_DDR
+						| MMC_CAP_UHS_DDR50;
+	}
+
+	of_property_read_u32(np, "qcom,sdcc-current-limit", &current_limit);
+	if (current_limit == 800)
+		pdata->uhs_caps |= MMC_CAP_MAX_CURRENT_800;
+	else if (current_limit == 600)
+		pdata->uhs_caps |= MMC_CAP_MAX_CURRENT_600;
+	else if (current_limit == 400)
+		pdata->uhs_caps |= MMC_CAP_MAX_CURRENT_400;
+	else if (current_limit == 200)
+		pdata->uhs_caps |= MMC_CAP_MAX_CURRENT_200;
+
+	if (of_get_property(np, "qcom,sdcc-xpc", NULL))
+		pdata->xpc_cap = true;
 	if (of_get_property(np, "qcom,sdcc-nonremovable", NULL))
 		pdata->nonremovable = true;
 	if (of_get_property(np, "qcom,sdcc-disable_cmd23", NULL))
@@ -4782,9 +4999,9 @@
 	host->curr.cmd = NULL;
 
 	if (!plat->disable_bam && bam_memres && dml_memres && bam_irqres)
-		host->is_sps_mode = 1;
+		set_hw_caps(host, MSMSDCC_SPS_BAM_SUP);
 	else if (dmares)
-		host->is_dma_mode = 1;
+		set_hw_caps(host, MSMSDCC_DMA_SUP);
 
 	host->base = ioremap(core_memres->start,
 			resource_size(core_memres));
@@ -4817,7 +5034,7 @@
 
 	tasklet_init(&host->sps.tlet, msmsdcc_sps_complete_tlet,
 			(unsigned long)host);
-	if (host->is_dma_mode) {
+	if (is_dma_mode(host)) {
 		/* Setup DMA */
 		ret = msmsdcc_init_dma(host);
 		if (ret)
@@ -4876,13 +5093,8 @@
 	if (!host->clk_rate)
 		dev_err(&pdev->dev, "Failed to read MCLK\n");
 
-	/*
-	* Lookup the Controller Version, to identify the supported features
-	* Version number read as 0 would indicate SDCC3 or earlier versions
-	*/
-	host->sdcc_version = readl_relaxed(host->base + MCI_VERSION);
-	pr_info("%s: mci-version: %x\n", mmc_hostname(host->mmc),
-		host->sdcc_version);
+	set_default_hw_caps(host);
+
 	/*
 	 * Set the register write delay according to min. clock frequency
 	 * supported and update later when the host->clk_rate changes.
@@ -4891,7 +5103,7 @@
 		(1 + ((3 * USEC_PER_SEC) /
 		      msmsdcc_get_min_sup_clk_rate(host)));
 
-	host->clks_on = 1;
+	atomic_set(&host->clks_on, 1);
 	/* Apply Hard reset to SDCC to put it in power on default state */
 	msmsdcc_hard_reset(host);
 
@@ -4920,7 +5132,7 @@
 
 
 	/* Clocks has to be running before accessing SPS/DML HW blocks */
-	if (host->is_sps_mode) {
+	if (is_sps_mode(host)) {
 		/* Initialize SPS */
 		ret = msmsdcc_sps_init(host);
 		if (ret)
@@ -4953,10 +5165,11 @@
 	 * status is to use the AUTO_PROG_DONE status provided by SDCC4
 	 * controller. So let's enable the CMD23 for SDCC4 only.
 	 */
-	if (!plat->disable_cmd23 && host->sdcc_version)
+	if (!plat->disable_cmd23 && is_auto_prog_done(host))
 		mmc->caps |= MMC_CAP_CMD23;
 
 	mmc->caps |= plat->uhs_caps;
+	mmc->caps2 |= plat->uhs_caps2;
 	/*
 	 * XPC controls the maximum current in the default speed mode of SDXC
 	 * card. XPC=0 means 100mA (max.) but speed class is not supported.
@@ -4971,12 +5184,6 @@
 	mmc->caps2 |= (MMC_CAP2_BOOTPART_NOACC | MMC_CAP2_DETECT_ON_ERR);
 	mmc->caps2 |= MMC_CAP2_SANITIZE;
 
-	if (pdev->dev.of_node) {
-		if (of_get_property((&pdev->dev)->of_node,
-					"qcom,sdcc-hs200", NULL))
-			mmc->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
-	}
-
 	if (plat->nonremovable)
 		mmc->caps |= MMC_CAP_NONREMOVABLE;
 	mmc->caps |= MMC_CAP_SDIO_IRQ;
@@ -5133,6 +5340,8 @@
 		(unsigned int) plat->status_irq, host->dma.channel,
 		host->dma.crci);
 
+	pr_info("%s: Controller capabilities: 0x%.8x\n",
+			mmc_hostname(mmc), host->hw_caps);
 	pr_info("%s: 8 bit data mode %s\n", mmc_hostname(mmc),
 		(mmc->caps & MMC_CAP_8_BIT_DATA ? "enabled" : "disabled"));
 	pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc),
@@ -5147,14 +5356,14 @@
 	pr_info("%s: Power save feature enable = %d\n",
 	       mmc_hostname(mmc), msmsdcc_pwrsave);
 
-	if (host->is_dma_mode && host->dma.channel != -1
+	if (is_dma_mode(host) && host->dma.channel != -1
 			&& host->dma.crci != -1) {
 		pr_info("%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n",
 		       mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr);
 		pr_info("%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n",
 		       mmc_hostname(mmc), host->dma.cmd_busaddr,
 		       host->dma.cmdptr_busaddr);
-	} else if (host->is_sps_mode) {
+	} else if (is_sps_mode(host)) {
 		pr_info("%s: SPS-BAM data transfer mode available\n",
 			mmc_hostname(mmc));
 	} else
@@ -5205,10 +5414,10 @@
  irq_free:
 	free_irq(core_irqres->start, host);
  dml_exit:
-	if (host->is_sps_mode)
+	if (is_sps_mode(host))
 		msmsdcc_dml_exit(host);
  sps_exit:
-	if (host->is_sps_mode)
+	if (is_sps_mode(host))
 		msmsdcc_sps_exit(host);
  vreg_deinit:
 	msmsdcc_vreg_init(host, false);
@@ -5231,7 +5440,7 @@
  bus_clk_put:
 	if (!IS_ERR_OR_NULL(host->bus_clk))
 		clk_put(host->bus_clk);
-	if (host->is_dma_mode) {
+	if (is_dma_mode(host)) {
 		if (host->dmares)
 			dma_free_coherent(NULL,
 				sizeof(struct msmsdcc_nc_dmadata),
@@ -5300,14 +5509,14 @@
 
 	msmsdcc_vreg_init(host, false);
 
-	if (host->is_dma_mode) {
+	if (is_dma_mode(host)) {
 		if (host->dmares)
 			dma_free_coherent(NULL,
 					sizeof(struct msmsdcc_nc_dmadata),
 					host->dma.nc, host->dma.nc_busaddr);
 	}
 
-	if (host->is_sps_mode) {
+	if (is_sps_mode(host)) {
 		msmsdcc_dml_exit(host);
 		msmsdcc_sps_exit(host);
 	}
@@ -5329,6 +5538,7 @@
 {
 	struct msmsdcc_host *host = mmc_priv(mmc);
 	unsigned long flags;
+	int rc = 0;
 
 	mutex_lock(&host->clk_mutex);
 	spin_lock_irqsave(&host->lock, flags);
@@ -5341,13 +5551,9 @@
 			disable_irq_nosync(host->core_irqres->start);
 			host->sdcc_irq_disabled = 1;
 		}
-
-		if (host->clks_on) {
-			spin_unlock_irqrestore(&host->lock, flags);
-			msmsdcc_setup_clocks(host, false);
-			spin_lock_irqsave(&host->lock, flags);
-			host->clks_on = 0;
-		}
+		rc = msmsdcc_setup_clocks(host, false);
+		if (rc)
+			goto out;
 
 		if (host->plat->sdio_lpm_gpio_setup &&
 				!host->sdio_gpio_lpm) {
@@ -5363,6 +5569,10 @@
 			host->sdio_wakeupirq_disabled = 0;
 		}
 	} else {
+		rc = msmsdcc_setup_clocks(host, true);
+		if (rc)
+			goto out;
+
 		if (!host->sdio_wakeupirq_disabled) {
 			disable_irq_nosync(host->plat->sdiowakeup_irq);
 			host->sdio_wakeupirq_disabled = 1;
@@ -5377,14 +5587,7 @@
 			host->sdio_gpio_lpm = 0;
 		}
 
-		if (!host->clks_on) {
-			spin_unlock_irqrestore(&host->lock, flags);
-			msmsdcc_setup_clocks(host, true);
-			spin_lock_irqsave(&host->lock, flags);
-			host->clks_on = 1;
-		}
-
-		if (host->sdcc_irq_disabled) {
+		if (host->sdcc_irq_disabled && atomic_read(&host->clks_on)) {
 			writel_relaxed(host->mci_irqenable,
 				       host->base + MMCIMASK0);
 			mb();
@@ -5392,9 +5595,10 @@
 			host->sdcc_irq_disabled = 0;
 		}
 	}
+out:
 	spin_unlock_irqrestore(&host->lock, flags);
 	mutex_unlock(&host->clk_mutex);
-	return 0;
+	return rc;
 }
 #else
 int msmsdcc_sdio_al_lpm(struct mmc_host *mmc, bool enable)
@@ -5606,7 +5810,7 @@
 	 * during suspend and not allowing TCXO.
 	 */
 
-	if (host->clks_on && !host->plat->is_sdio_al_client) {
+	if (atomic_read(&host->clks_on) && !host->plat->is_sdio_al_client) {
 		pr_warn("%s: clocks are on after suspend, aborting system "
 				"suspend\n", mmc_hostname(mmc));
 		rc = -EAGAIN;
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index 5531f06..baeabd2 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -38,6 +38,8 @@
 #define MCI_PWR_UP		0x02
 #define MCI_PWR_ON		0x03
 #define MCI_OD			(1 << 6)
+#define MCI_SW_RST		(1 << 7)
+#define MCI_SW_RST_CFG		(1 << 8)
 
 #define MMCICLOCK		0x004
 #define MCI_CLK_ENABLE		(1 << 8)
@@ -351,7 +353,7 @@
 	struct clk		*clk;		/* main MMC bus clock */
 	struct clk		*pclk;		/* SDCC peripheral bus clock */
 	struct clk		*bus_clk;	/* SDCC bus voter clock */
-	unsigned int		clks_on;	/* set if clocks are enabled */
+	atomic_t		clks_on;	/* set if clocks are enabled */
 
 	unsigned int		eject;		/* eject state */
 
@@ -363,14 +365,12 @@
 
 	u32			pwr;
 	struct mmc_platform_data *plat;
-	u32			sdcc_version;
+	unsigned int		hw_caps;
 
 	unsigned int		oldstat;
 
 	struct msmsdcc_dma_data	dma;
 	struct msmsdcc_sps_data sps;
-	bool			is_dma_mode;
-	bool			is_sps_mode;
 	struct msmsdcc_pio_data	pio;
 
 #ifdef CONFIG_HAS_EARLYSUSPEND
@@ -415,6 +415,47 @@
 	struct device_attribute	polling;
 };
 
+#define MSMSDCC_VERSION_MASK	0xFFFF
+#define MSMSDCC_DMA_SUP	(1 << 0)
+#define MSMSDCC_SPS_BAM_SUP	(1 << 1)
+#define MSMSDCC_SOFT_RESET	(1 << 2)
+#define MSMSDCC_AUTO_PROG_DONE	(1 << 3)
+#define MSMSDCC_REG_WR_ACTIVE	(1 << 4)
+#define MSMSDCC_SW_RST		(1 << 5)
+#define MSMSDCC_SW_RST_CFG	(1 << 6)
+
+#define set_hw_caps(h, val)		((h)->hw_caps |= val)
+#define is_sps_mode(h)			((h)->hw_caps & MSMSDCC_SPS_BAM_SUP)
+#define is_dma_mode(h)			((h)->hw_caps & MSMSDCC_DMA_SUP)
+#define is_soft_reset(h)		((h)->hw_caps & MSMSDCC_SOFT_RESET)
+#define is_auto_prog_done(h)		((h)->hw_caps & MSMSDCC_AUTO_PROG_DONE)
+#define is_wait_for_reg_write(h)	((h)->hw_caps & MSMSDCC_REG_WR_ACTIVE)
+#define is_sw_hard_reset(h)		((h)->hw_caps & MSMSDCC_SW_RST)
+#define is_sw_reset_save_config(h)	((h)->hw_caps & MSMSDCC_SW_RST_CFG)
+
+/* Set controller capabilities based on version */
+static inline void set_default_hw_caps(struct msmsdcc_host *host)
+{
+	u32 version;
+	/*
+	 * Lookup the Controller Version, to identify the supported features
+	 * Version number read as 0 would indicate SDCC3 or earlier versions.
+	 */
+	version = readl_relaxed(host->base + MCI_VERSION);
+	pr_info("%s: SDCC Version: 0x%.8x\n", mmc_hostname(host->mmc), version);
+
+	if (!version)
+		return;
+
+	version &= MSMSDCC_VERSION_MASK;
+	if (version) /* SDCC v4 and greater */
+		host->hw_caps |= MSMSDCC_AUTO_PROG_DONE |
+			MSMSDCC_SOFT_RESET | MSMSDCC_REG_WR_ACTIVE;
+
+	if (version >= 0x2D) /* SDCC v4 2.1.0 and greater */
+		host->hw_caps |= MSMSDCC_SW_RST | MSMSDCC_SW_RST_CFG;
+}
+
 int msmsdcc_set_pwrsave(struct mmc_host *mmc, int pwrsave);
 int msmsdcc_sdio_al_lpm(struct mmc_host *mmc, bool enable);
 
diff --git a/drivers/net/usb/rmnet_usb_ctrl.c b/drivers/net/usb/rmnet_usb_ctrl.c
index c2085c9..2972af0 100644
--- a/drivers/net/usb/rmnet_usb_ctrl.c
+++ b/drivers/net/usb/rmnet_usb_ctrl.c
@@ -111,7 +111,7 @@
 {
 	if (dev) {
 		mutex_lock(&dev->dev_lock);
-		if (!dev->intf) {
+		if (!dev->is_connected) {
 			mutex_unlock(&dev->dev_lock);
 			return 0;
 		}
@@ -521,8 +521,6 @@
 	dev->is_opened = 0;
 	mutex_unlock(&dev->dev_lock);
 
-	rmnet_usb_ctrl_stop_rx(dev);
-
 	if (is_dev_connected(dev))
 		usb_kill_anchored_urbs(&dev->tx_submitted);
 
@@ -761,10 +759,17 @@
 	dev->tx_ctrl_err_cnt = 0;
 	dev->set_ctrl_line_state_cnt = 0;
 
-	ret = rmnet_usb_ctrl_write_cmd(dev);
+	ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+			USB_CDC_REQ_SET_CONTROL_LINE_STATE,
+			(USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE),
+			dev->cbits_tomdm,
+			dev->intf->cur_altsetting->desc.bInterfaceNumber,
+			NULL, 0, USB_CTRL_SET_TIMEOUT);
 	if (ret < 0)
 		return ret;
 
+	dev->set_ctrl_line_state_cnt++;
+
 	dev->inturb = usb_alloc_urb(0, GFP_KERNEL);
 	if (!dev->inturb) {
 		dev_err(dev->devicep, "Error allocating int urb\n");
@@ -800,7 +805,11 @@
 			 notification_available_cb, dev, interval);
 
 	usb_mark_last_busy(udev);
-	return rmnet_usb_ctrl_start_rx(dev);
+	ret = rmnet_usb_ctrl_start_rx(dev);
+	if (!ret)
+		dev->is_connected = true;
+
+	return ret;
 }
 
 void rmnet_usb_ctrl_disconnect(struct rmnet_ctrl_dev *dev)
@@ -813,7 +822,7 @@
 	dev->cbits_tolocal = ~ACM_CTRL_CD;
 
 	dev->cbits_tomdm = ~ACM_CTRL_DTR;
-	dev->intf = NULL;
+	dev->is_connected = false;
 	mutex_unlock(&dev->dev_lock);
 
 	wake_up(&dev->read_wait_queue);
diff --git a/drivers/net/usb/rmnet_usb_ctrl.h b/drivers/net/usb/rmnet_usb_ctrl.h
index bc07726..3259940 100644
--- a/drivers/net/usb/rmnet_usb_ctrl.h
+++ b/drivers/net/usb/rmnet_usb_ctrl.h
@@ -46,6 +46,8 @@
 
 	unsigned		is_opened;
 
+	bool			is_connected;
+
 	/*input control lines (DSR, CTS, CD, RI)*/
 	unsigned int		cbits_tolocal;
 
diff --git a/drivers/net/usb/rmnet_usb_data.c b/drivers/net/usb/rmnet_usb_data.c
index 9e1e252..55020a1 100644
--- a/drivers/net/usb/rmnet_usb_data.c
+++ b/drivers/net/usb/rmnet_usb_data.c
@@ -604,6 +604,7 @@
 
 static const struct driver_info rmnet_info_pid9034 = {
 	.description   = "RmNET net device",
+	.flags         = FLAG_SEND_ZLP,
 	.bind          = rmnet_usb_bind,
 	.tx_fixup      = rmnet_usb_tx_fixup,
 	.rx_fixup      = rmnet_usb_rx_fixup,
@@ -613,6 +614,7 @@
 
 static const struct driver_info rmnet_info_pid9048 = {
 	.description   = "RmNET net device",
+	.flags         = FLAG_SEND_ZLP,
 	.bind          = rmnet_usb_bind,
 	.tx_fixup      = rmnet_usb_tx_fixup,
 	.rx_fixup      = rmnet_usb_rx_fixup,
@@ -622,6 +624,7 @@
 
 static const struct driver_info rmnet_info_pid904c = {
 	.description   = "RmNET net device",
+	.flags         = FLAG_SEND_ZLP,
 	.bind          = rmnet_usb_bind,
 	.tx_fixup      = rmnet_usb_tx_fixup,
 	.rx_fixup      = rmnet_usb_rx_fixup,
diff --git a/drivers/of/of_spmi.c b/drivers/of/of_spmi.c
index 61085c9..0c23db5 100644
--- a/drivers/of/of_spmi.c
+++ b/drivers/of/of_spmi.c
@@ -43,27 +43,12 @@
 }
 
 /*
- * Allocate dev_node array for spmi_device
- */
-static inline int of_spmi_alloc_device_store(struct of_spmi_dev_info *d_info,
-					     uint32_t num_dev_node)
-{
-	d_info->b_info.num_dev_node = num_dev_node;
-	d_info->b_info.dev_node = kzalloc(sizeof(struct spmi_resource) *
-						num_dev_node, GFP_KERNEL);
-	if (!d_info->b_info.dev_node)
-		return -ENOMEM;
-
-	return 0;
-}
-
-/*
  * Calculate the number of resources to allocate
  *
  * The caller is responsible for initializing the of_spmi_res_info structure.
  */
-static void of_spmi_sum_node_resources(struct of_spmi_res_info *r_info,
-				       bool has_reg)
+static void of_spmi_sum_resources(struct of_spmi_res_info *r_info,
+				  bool has_reg)
 {
 	struct of_irq oirq;
 	uint64_t size;
@@ -92,58 +77,48 @@
 }
 
 /*
- * free spmi_resource for the spmi_device
+ * Allocate dev_node array for spmi_device - used with spmi-dev-container
  */
-static void of_spmi_free_device_resources(struct of_spmi_dev_info *d_info)
+static inline int of_spmi_alloc_devnode_store(struct of_spmi_dev_info *d_info,
+					      uint32_t num_dev_node)
 {
-	int i;
+	d_info->b_info.num_dev_node = num_dev_node;
+	d_info->b_info.dev_node = kzalloc(sizeof(struct spmi_resource) *
+						num_dev_node, GFP_KERNEL);
+	if (!d_info->b_info.dev_node)
+		return -ENOMEM;
 
-	for (i = 0; i < d_info->b_info.num_dev_node; i++)
-		kfree(d_info->b_info.dev_node[i].resource);
-
-	kfree(d_info->b_info.dev_node);
-}
-
-/*
- * Gather node resources and populate
- */
-static void of_spmi_populate_node_resources(struct of_spmi_dev_info *d_info,
-					    struct of_spmi_res_info *r_info,
-					    int idx)
-
-{
-	uint32_t num_irq = r_info->num_irq, num_reg = r_info->num_reg;
-	int i;
-	struct resource *res;
-	const  __be32 *addrp;
-	uint64_t size;
-	uint32_t flags;
-
-	res = d_info->b_info.dev_node[idx].resource;
-	d_info->b_info.dev_node[idx].of_node = r_info->node;
-
-	if ((num_irq || num_reg) && (res != NULL)) {
-		for (i = 0; i < num_reg; i++, res++) {
-			/* Addresses are always 16 bits */
-			addrp = of_get_address(r_info->node, i, &size, &flags);
-			BUG_ON(!addrp);
-			res->start = be32_to_cpup(addrp);
-			res->end = res->start + size - 1;
-			res->flags = flags;
-		}
-		WARN_ON(of_irq_to_resource_table(r_info->node, res, num_irq) !=
-								num_irq);
-	}
+	return 0;
 }
 
 /*
  * Allocate enough memory to handle the resources associated with the
- * device_node. The number of device nodes included in this allocation
- * depends on whether the spmi-dev-container flag is specified or not.
+ * primary node.
  */
 static int of_spmi_allocate_node_resources(struct of_spmi_dev_info *d_info,
-					   struct of_spmi_res_info *r_info,
-					   uint32_t idx)
+					   struct of_spmi_res_info *r_info)
+{
+	uint32_t num_irq = r_info->num_irq, num_reg = r_info->num_reg;
+	struct resource *res = NULL;
+
+	if (num_irq || num_reg) {
+		res = kzalloc(sizeof(*res) * (num_irq + num_reg), GFP_KERNEL);
+		if (!res)
+			return -ENOMEM;
+	}
+	d_info->b_info.res.num_resources = num_reg + num_irq;
+	d_info->b_info.res.resource = res;
+
+	return 0;
+}
+
+/*
+ * Allocate enough memory to handle the resources associated with the
+ * spmi-dev-container nodes.
+ */
+static int of_spmi_allocate_devnode_resources(struct of_spmi_dev_info *d_info,
+					      struct of_spmi_res_info *r_info,
+					      uint32_t idx)
 {
 	uint32_t num_irq = r_info->num_irq, num_reg = r_info->num_reg;
 	struct resource *res = NULL;
@@ -160,6 +135,87 @@
 }
 
 /*
+ * free node resources - used with primary node
+ */
+static void of_spmi_free_node_resources(struct of_spmi_dev_info *d_info)
+{
+	kfree(d_info->b_info.res.resource);
+}
+
+/*
+ * free devnode resources - used with spmi-dev-container
+ */
+static void of_spmi_free_devnode_resources(struct of_spmi_dev_info *d_info)
+{
+	int i;
+
+	for (i = 0; i < d_info->b_info.num_dev_node; i++)
+		kfree(d_info->b_info.dev_node[i].resource);
+
+	kfree(d_info->b_info.dev_node);
+}
+
+static void of_spmi_populate_resources(struct of_spmi_dev_info *d_info,
+				       struct of_spmi_res_info *r_info,
+				       struct resource *res)
+
+{
+	uint32_t num_irq = r_info->num_irq, num_reg = r_info->num_reg;
+	int i;
+	const  __be32 *addrp;
+	uint64_t size;
+	uint32_t flags;
+
+	if ((num_irq || num_reg) && (res != NULL)) {
+		for (i = 0; i < num_reg; i++, res++) {
+			/* Addresses are always 16 bits */
+			addrp = of_get_address(r_info->node, i, &size, &flags);
+			BUG_ON(!addrp);
+			res->start = be32_to_cpup(addrp);
+			res->end = res->start + size - 1;
+			res->flags = flags;
+			of_property_read_string_index(r_info->node, "reg-names",
+								i, &res->name);
+		}
+		WARN_ON(of_irq_to_resource_table(r_info->node, res, num_irq) !=
+								num_irq);
+	}
+}
+
+/*
+ * Gather primary node resources and populate.
+ */
+static void of_spmi_populate_node_resources(struct of_spmi_dev_info *d_info,
+					    struct of_spmi_res_info *r_info)
+
+{
+	struct resource *res;
+
+	res = d_info->b_info.res.resource;
+	d_info->b_info.res.of_node = r_info->node;
+	of_property_read_string(r_info->node, "label",
+				&d_info->b_info.res.label);
+	of_spmi_populate_resources(d_info, r_info, res);
+}
+
+/*
+ * Gather node devnode resources and populate - used with spmi-dev-container.
+ */
+static void of_spmi_populate_devnode_resources(struct of_spmi_dev_info *d_info,
+					       struct of_spmi_res_info *r_info,
+					       int idx)
+
+{
+	struct resource *res;
+
+	res = d_info->b_info.dev_node[idx].resource;
+	d_info->b_info.dev_node[idx].of_node = r_info->node;
+	of_property_read_string(r_info->node, "label",
+				&d_info->b_info.dev_node[idx].label);
+	of_spmi_populate_resources(d_info, r_info, res);
+}
+
+/*
  * create a single spmi_device
  */
 static int of_spmi_create_device(struct of_spmi_dev_info *d_info,
@@ -216,10 +272,10 @@
 		num_dev_node++;
 	}
 
-	rc = of_spmi_alloc_device_store(d_info, num_dev_node);
+	rc = of_spmi_alloc_devnode_store(d_info, num_dev_node);
 	if (rc) {
-		dev_err(&ctrl->dev, "%s: unable to allocate"
-				" device resources\n", __func__);
+		dev_err(&ctrl->dev, "%s: unable to allocate devnode resources\n",
+								__func__);
 		return;
 	}
 
@@ -228,23 +284,36 @@
 		if (!of_device_is_available(node))
 			continue;
 		of_spmi_init_resource(&r_info, node);
-		of_spmi_sum_node_resources(&r_info, 1);
-		rc = of_spmi_allocate_node_resources(d_info, &r_info, i);
+		of_spmi_sum_resources(&r_info, true);
+		rc = of_spmi_allocate_devnode_resources(d_info, &r_info, i);
 		if (rc) {
 			dev_err(&ctrl->dev, "%s: unable to allocate"
 					" resources\n", __func__);
-			of_spmi_free_device_resources(d_info);
+			of_spmi_free_devnode_resources(d_info);
 			return;
 		}
-		of_spmi_populate_node_resources(d_info, &r_info, i);
+		of_spmi_populate_devnode_resources(d_info, &r_info, i);
 		i++;
 	}
 
+	of_spmi_init_resource(&r_info, container);
+	of_spmi_sum_resources(&r_info, true);
+
+	rc = of_spmi_allocate_node_resources(d_info, &r_info);
+	if (rc) {
+		dev_err(&ctrl->dev, "%s: unable to allocate resources\n",
+								  __func__);
+		of_spmi_free_node_resources(d_info);
+	}
+
+	of_spmi_populate_node_resources(d_info, &r_info);
+
+
 	rc = of_spmi_create_device(d_info, container);
 	if (rc) {
 		dev_err(&ctrl->dev, "%s: unable to create device for"
 				" node %s\n", __func__, container->full_name);
-		of_spmi_free_device_resources(d_info);
+		of_spmi_free_devnode_resources(d_info);
 		return;
 	}
 }
@@ -255,7 +324,7 @@
  * point all share the same slave_id.
  */
 static void of_spmi_walk_slave_container(struct of_spmi_dev_info *d_info,
-					struct device_node *container)
+					 struct device_node *container)
 {
 	struct spmi_controller *ctrl = d_info->ctrl;
 	struct device_node *node;
@@ -276,24 +345,17 @@
 			continue;
 		}
 
-		rc = of_spmi_alloc_device_store(d_info, 1);
-		if (rc) {
-			dev_err(&ctrl->dev, "%s: unable to allocate"
-					" device resources\n", __func__);
-			goto slave_err;
-		}
-
 		of_spmi_init_resource(&r_info, node);
-		of_spmi_sum_node_resources(&r_info, 1);
+		of_spmi_sum_resources(&r_info, true);
 
-		rc = of_spmi_allocate_node_resources(d_info, &r_info, 0);
+		rc = of_spmi_allocate_node_resources(d_info, &r_info);
 		if (rc) {
 			dev_err(&ctrl->dev, "%s: unable to allocate"
 						" resources\n", __func__);
 			goto slave_err;
 		}
 
-		of_spmi_populate_node_resources(d_info, &r_info, 0);
+		of_spmi_populate_node_resources(d_info, &r_info);
 
 		rc = of_spmi_create_device(d_info, node);
 		if (rc) {
@@ -305,7 +367,7 @@
 	return;
 
 slave_err:
-	of_spmi_free_device_resources(d_info);
+	of_spmi_free_node_resources(d_info);
 }
 
 int of_spmi_register_devices(struct spmi_controller *ctrl)
@@ -370,31 +432,23 @@
 			if (!of_device_is_available(node))
 				continue;
 
-			rc = of_spmi_alloc_device_store(&d_info, 1);
-			if (rc) {
-				dev_err(&ctrl->dev, "%s: unable to allocate"
-					" device resources\n", __func__);
-				continue;
-			}
-
 			of_spmi_init_resource(&r_info, node);
-			of_spmi_sum_node_resources(&r_info, 0);
-			rc = of_spmi_allocate_node_resources(&d_info,
-								&r_info, 0);
+			of_spmi_sum_resources(&r_info, false);
+			rc = of_spmi_allocate_node_resources(&d_info, &r_info);
 			if (rc) {
 				dev_err(&ctrl->dev, "%s: unable to allocate"
 						" resources\n", __func__);
-				of_spmi_free_device_resources(&d_info);
+				of_spmi_free_node_resources(&d_info);
 				continue;
 			}
 
-			of_spmi_populate_node_resources(&d_info, &r_info, 0);
+			of_spmi_populate_node_resources(&d_info, &r_info);
 
 			rc = of_spmi_create_device(&d_info, node);
 			if (rc) {
 				dev_err(&ctrl->dev, "%s: unable to create"
 						" device\n", __func__);
-				of_spmi_free_device_resources(&d_info);
+				of_spmi_free_node_resources(&d_info);
 				continue;
 			}
 		}
@@ -404,4 +458,4 @@
 }
 EXPORT_SYMBOL(of_spmi_register_devices);
 
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 15e7f3f..b08fc7d 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -14,7 +14,7 @@
 	bool "SPS support"
 	depends on (HAS_IOMEM && (ARCH_MSM8960 || ARCH_MSM8X60 \
 			|| ARCH_APQ8064 || ARCH_MSM9615 \
-			|| ARCH_MSM9625 || ARCH_MSMCOPPER))
+			|| ARCH_MSM9625 || ARCH_MSM8974))
 	select GENERIC_ALLOCATOR
 	default n
 	help
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index 1df6d34..d3edfa8 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -124,7 +124,7 @@
 		goto fifo_setup_error;
 	}
 	connection->desc = desc_mem_buf[connection_idx][pipe_dir];
-	connection->event_thresh = 512;
+	connection->event_thresh = 16;
 
 	ret = sps_connect(*pipe, connection);
 	if (ret < 0) {
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
index 73c042d..85389d0 100644
--- a/drivers/power/pm8921-bms.c
+++ b/drivers/power/pm8921-bms.c
@@ -47,6 +47,8 @@
 #define AMUX_TRIM_2		0x322
 #define TEST_PROGRAM_REV	0x339
 
+#define TEMP_SOC_STORAGE	0x107
+
 enum pmic_bms_interrupts {
 	PM8921_BMS_SBI_WRITE_OK,
 	PM8921_BMS_CC_THR,
@@ -134,8 +136,10 @@
 	struct timeval		t;
 	int			last_uuc_uah;
 	int			enable_fcc_learning;
+	int			shutdown_soc;
 };
 
+static int shutdown_soc_invalid;
 static struct pm8921_bms_chip *the_chip;
 
 #define DEFAULT_RBATT_MOHMS		128
@@ -468,29 +472,12 @@
 						* VBATT_MUL_FACTOR;
 }
 
-#define CC_RESOLUTION_N_V1	1085069
-#define CC_RESOLUTION_D_V1	100000
-#define CC_RESOLUTION_N_V2	868056
-#define CC_RESOLUTION_D_V2	10000
-static s64 cc_to_microvolt_v1(s64 cc)
-{
-	return div_s64(cc * CC_RESOLUTION_N_V1, CC_RESOLUTION_D_V1);
-}
-
-static s64 cc_to_microvolt_v2(s64 cc)
-{
-	return div_s64(cc * CC_RESOLUTION_N_V2, CC_RESOLUTION_D_V2);
-}
+#define CC_RESOLUTION_N		868056
+#define CC_RESOLUTION_D		10000
 
 static s64 cc_to_microvolt(struct pm8921_bms_chip *chip, s64 cc)
 {
-	/*
-	 * resolution (the value of a single bit) was changed after revision 2.0
-	 * for more accurate readings
-	 */
-	return (chip->revision < PM8XXX_REVISION_8921_2p0) ?
-				cc_to_microvolt_v1((s64)cc) :
-				cc_to_microvolt_v2((s64)cc);
+	return div_s64(cc * CC_RESOLUTION_N, CC_RESOLUTION_D);
 }
 
 #define CC_READING_TICKS	55
@@ -1455,6 +1442,71 @@
 	return soc;
 }
 
+#define MAX_SHUTDOWN_ADJUST_SECONDS	1800
+static int adjust_for_shutdown_soc(struct pm8921_bms_chip *chip, int soc)
+{
+	struct timespec uptime;
+	int val;
+
+	/* value of zero means the shutdown soc should not be used */
+	if (chip->shutdown_soc == 0)
+		return soc;
+
+	if (shutdown_soc_invalid) {
+		chip->shutdown_soc = 0;
+		return soc;
+	}
+
+	do_posix_clock_monotonic_gettime(&uptime);
+
+	if (uptime.tv_sec >= MAX_SHUTDOWN_ADJUST_SECONDS) {
+		/*
+		 * adjusted for a long time now, switch to reporting the
+		 * calculated soc
+		 */
+		chip->shutdown_soc = 0;
+		return soc;
+	}
+
+	val = ((MAX_SHUTDOWN_ADJUST_SECONDS - uptime.tv_sec)
+		* chip->shutdown_soc
+		+ uptime.tv_sec * soc);
+	val /= MAX_SHUTDOWN_ADJUST_SECONDS;
+	pr_debug("shutdown_soc = %d, adj soc = %d, calc soc = %d\n",
+				chip->shutdown_soc, val, soc);
+
+	return val;
+}
+
+static void backup_soc(struct pm8921_bms_chip *chip, int last_soc)
+{
+	/* TODO: if 0x107 is free for all variants 8917, 8038 etc */
+	pm8xxx_writeb(the_chip->dev->parent, TEMP_SOC_STORAGE, last_soc);
+}
+
+static void read_shutdown_soc(struct pm8921_bms_chip *chip)
+{
+	int rc;
+	u8 temp;
+
+	rc = pm8xxx_readb(chip->dev->parent, TEMP_SOC_STORAGE, &temp);
+	if (rc)
+		pr_err("failed to read addr = %d %d\n", TEMP_SOC_STORAGE, rc);
+	else
+		chip->shutdown_soc = temp;
+
+	pr_debug("shutdown_soc = %d\n", chip->shutdown_soc);
+}
+
+void pm8921_bms_invalidate_shutdown_soc(void)
+{
+	pr_debug("Invalidating shutdown soc - the battery was removed\n");
+	shutdown_soc_invalid = 1;
+	if (the_chip)
+		the_chip->shutdown_soc = 0;
+}
+EXPORT_SYMBOL(pm8921_bms_invalidate_shutdown_soc);
+
 /*
  * Remaining Usable Charge = remaining_charge (charge at ocv instance)
  *				- coloumb counter charge
@@ -1469,6 +1521,7 @@
 	int remaining_charge_uah, soc;
 	int cc_uah;
 	int rbatt;
+	int shutdown_adjusted_soc;
 
 	calculate_soc_params(chip, raw, batt_temp, chargecycles,
 						&fcc_uah,
@@ -1534,9 +1587,12 @@
 								last_soc);
 	}
 
-	pr_debug("Reported SOC = %u%%\n", last_soc);
-	return last_soc;
+	shutdown_adjusted_soc = adjust_for_shutdown_soc(chip, last_soc);
+	backup_soc(chip, shutdown_adjusted_soc);
+
+	return shutdown_adjusted_soc;
 }
+
 #define MIN_DELTA_625_UV	1000
 static void calib_hkadc(struct pm8921_bms_chip *chip)
 {
@@ -2629,6 +2685,8 @@
 		goto free_irqs;
 	}
 
+	read_shutdown_soc(chip);
+
 	platform_set_drvdata(pdev, chip);
 	the_chip = chip;
 	create_debugfs_entries(chip);
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index a1561f0..dc40c8e 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -265,6 +265,7 @@
 	enum pm8921_chg_hot_thr		hot_thr;
 	int				rconn_mohm;
 	enum pm8921_chg_led_src_config	led_src_config;
+	bool				host_mode;
 };
 
 /* user space parameter to limit usb current */
@@ -1141,6 +1142,7 @@
 	POWER_SUPPLY_PROP_PRESENT,
 	POWER_SUPPLY_PROP_ONLINE,
 	POWER_SUPPLY_PROP_CURRENT_MAX,
+	POWER_SUPPLY_PROP_SCOPE,
 };
 
 static enum power_supply_property pm_power_props_mains[] = {
@@ -1196,6 +1198,67 @@
 	return 0;
 }
 
+static int switch_usb_to_charge_mode(struct pm8921_chg_chip *chip)
+{
+	int rc;
+
+	if (!chip->host_mode)
+		return 0;
+
+	/* enable usbin valid comparator and remove force usb ovp fet off */
+	rc = pm8xxx_writeb(chip->dev->parent, USB_OVP_TEST, 0xB2);
+	if (rc < 0) {
+		pr_err("Failed to write 0xB2 to USB_OVP_TEST rc = %d\n", rc);
+		return rc;
+	}
+
+	chip->host_mode = 0;
+
+	return 0;
+}
+
+static int switch_usb_to_host_mode(struct pm8921_chg_chip *chip)
+{
+	int rc;
+
+	if (chip->host_mode)
+		return 0;
+
+	/* disable usbin valid comparator and force usb ovp fet off */
+	rc = pm8xxx_writeb(chip->dev->parent, USB_OVP_TEST, 0xB3);
+	if (rc < 0) {
+		pr_err("Failed to write 0xB3 to USB_OVP_TEST rc = %d\n", rc);
+		return rc;
+	}
+
+	chip->host_mode = 1;
+
+	return 0;
+}
+
+static int pm_power_set_property_usb(struct power_supply *psy,
+				  enum power_supply_property psp,
+				  const union power_supply_propval *val)
+{
+	/* Check if called before init */
+	if (!the_chip)
+		return -EINVAL;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_SCOPE:
+		if (val->intval == POWER_SUPPLY_SCOPE_SYSTEM)
+			return switch_usb_to_host_mode(the_chip);
+		if (val->intval == POWER_SUPPLY_SCOPE_DEVICE)
+			return switch_usb_to_charge_mode(the_chip);
+		else
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
 static int pm_power_get_property_usb(struct power_supply *psy,
 				  enum power_supply_property psp,
 				  union power_supply_propval *val)
@@ -1234,6 +1297,13 @@
 		else
 		    return 0;
 		break;
+
+	case POWER_SUPPLY_PROP_SCOPE:
+		if (the_chip->host_mode)
+			val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
+		else
+			val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -3296,13 +3366,29 @@
 	}
 }
 
+#define VREF_BATT_THERM_FORCE_ON	BIT(7)
+static void detect_battery_removal(struct pm8921_chg_chip *chip)
+{
+	u8 temp;
+
+	pm8xxx_readb(chip->dev->parent, CHG_CNTRL, &temp);
+	pr_debug("upon restart CHG_CNTRL = 0x%x\n",  temp);
+
+	if (!(temp & VREF_BATT_THERM_FORCE_ON))
+		/*
+		 * batt therm force on bit is battery backed and is default 0
+		 * The charger sets this bit at init time. If this bit is found
+		 * 0 that means the battery was removed. Tell the bms about it
+		 */
+		pm8921_bms_invalidate_shutdown_soc();
+}
+
 #define ENUM_TIMER_STOP_BIT	BIT(1)
 #define BOOT_DONE_BIT		BIT(6)
 #define CHG_BATFET_ON_BIT	BIT(3)
 #define CHG_VCP_EN		BIT(0)
 #define CHG_BAT_TEMP_DIS_BIT	BIT(2)
 #define SAFE_CURRENT_MA		1500
-#define VREF_BATT_THERM_FORCE_ON	BIT(7)
 static int __devinit pm8921_chg_hw_init(struct pm8921_chg_chip *chip)
 {
 	int rc;
@@ -3311,6 +3397,8 @@
 	/* forcing 19p2mhz before accessing any charger registers */
 	pm8921_chg_force_19p2mhz_clk(chip);
 
+	detect_battery_removal(chip);
+
 	rc = pm_chg_masked_write(chip, SYS_CONFIG_2,
 					BOOT_DONE_BIT, BOOT_DONE_BIT);
 	if (rc) {
@@ -3833,6 +3921,7 @@
 	chip->usb_psy.properties = pm_power_props_usb,
 	chip->usb_psy.num_properties = ARRAY_SIZE(pm_power_props_usb),
 	chip->usb_psy.get_property = pm_power_get_property_usb,
+	chip->usb_psy.set_property = pm_power_set_property_usb,
 
 	chip->dc_psy.name = "pm8921-dc",
 	chip->dc_psy.type = POWER_SUPPLY_TYPE_MAINS,
diff --git a/drivers/power/pm8xxx-ccadc.c b/drivers/power/pm8xxx-ccadc.c
index ef31575..861bac8 100644
--- a/drivers/power/pm8xxx-ccadc.c
+++ b/drivers/power/pm8xxx-ccadc.c
@@ -79,27 +79,10 @@
 static struct pm8xxx_ccadc_chip *the_chip;
 
 #ifdef DEBUG
-static s64 microvolt_to_ccadc_reading_v1(s64 uv)
-{
-	return div_s64(uv * CCADC_READING_RESOLUTION_D_V1,
-				CCADC_READING_RESOLUTION_N_V1);
-}
-
-static s64 microvolt_to_ccadc_reading_v2(s64 uv)
-{
-	return div_s64(uv * CCADC_READING_RESOLUTION_D_V2,
-				CCADC_READING_RESOLUTION_N_V2);
-}
-
 static s64 microvolt_to_ccadc_reading(struct pm8xxx_ccadc_chip *chip, s64 cc)
 {
-	/*
-	 * resolution (the value of a single bit) was changed after revision 2.0
-	 * for more accurate readings
-	 */
-	return (the_chip->revision < PM8XXX_REVISION_8921_2p0) ?
-				microvolt_to_ccadc_reading_v1((s64)cc) :
-				microvolt_to_ccadc_reading_v2((s64)cc);
+	return div_s64(uv * CCADC_READING_RESOLUTION_D,
+				CCADC_READING_RESOLUTION_N);
 }
 #endif
 
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index be6ba04..e87b4bd 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -64,6 +64,24 @@
 EXPORT_SYMBOL_GPL(power_supply_set_online);
 
 /**
+ * power_supply_set_scope - set scope of the power supply
+ * @psy:	the power supply to control
+ * @scope:	value to set the scope property to, should be from
+ *		the SCOPE enum in power_supply.h
+ */
+int power_supply_set_scope(struct power_supply *psy, int scope)
+{
+	const union power_supply_propval ret = {scope, };
+
+	if (psy->set_property)
+		return psy->set_property(psy, POWER_SUPPLY_PROP_SCOPE,
+								&ret);
+
+	return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(power_supply_set_scope);
+
+/**
  * power_supply_set_charge_type - set charge type of the power supply
  * @psy:	the power supply to control
  * @enable:	sets charge type property of power supply
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 2bbc796..6b0916e 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -419,8 +419,8 @@
 	  constraint checking while the real driver is being developed.
 
 config REGULATOR_QPNP
+	depends on SPMI
 	depends on OF_SPMI
-	depends on MSM_QPNP
 	tristate "Qualcomm QPNP regulator support"
 	help
 	  This driver supports voltage regulators in Qualcomm PMIC chips which
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 986d55b..7cb4a51 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -302,11 +302,11 @@
 static int regulator_check_drms(struct regulator_dev *rdev)
 {
 	if (!rdev->constraints) {
-		rdev_err(rdev, "no constraints\n");
+		rdev_dbg(rdev, "no constraints\n");
 		return -ENODEV;
 	}
 	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
-		rdev_err(rdev, "operation not allowed\n");
+		rdev_dbg(rdev, "operation not allowed\n");
 		return -EPERM;
 	}
 	return 0;
diff --git a/drivers/regulator/qpnp-regulator.c b/drivers/regulator/qpnp-regulator.c
index 120d17e..8d592fb 100644
--- a/drivers/regulator/qpnp-regulator.c
+++ b/drivers/regulator/qpnp-regulator.c
@@ -29,8 +29,6 @@
 #include <linux/regulator/of_regulator.h>
 #include <linux/regulator/qpnp-regulator.h>
 
-#include <mach/qpnp.h>
-
 /* Debug Flag Definitions */
 enum {
 	QPNP_VREG_DEBUG_REQUEST		= BIT(0), /* Show requests */
@@ -1189,7 +1187,7 @@
 	pdata->init_data.constraints.input_uV
 		= pdata->init_data.constraints.max_uV;
 
-	res = qpnp_get_resource(spmi, 0, IORESOURCE_MEM, 0);
+	res = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
 	if (!res) {
 		dev_err(&spmi->dev, "%s: node is missing base address\n",
 			__func__);
diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c
index 24da4d1..36414e0 100644
--- a/drivers/slimbus/slimbus.c
+++ b/drivers/slimbus/slimbus.c
@@ -1979,7 +1979,7 @@
 			}
 		}
 		/* Leave some slots for messaging space */
-		if (opensl1[1] == 0 && opensl1[0] == 0)
+		if (opensl1[1] <= 0 && opensl1[0] <= 0)
 			return -EXFULL;
 		if (opensl1[1] > opensl1[0]) {
 			int temp = opensl1[0];
@@ -2184,7 +2184,7 @@
 			}
 		}
 		/* Leave some slots for messaging space */
-		if (opensl3[1] == 0 && opensl3[0] == 0)
+		if (opensl3[1] <= 0 && opensl3[0] <= 0)
 			return -EXFULL;
 		/* swap 1st and 2nd bucket if 2nd bucket has more open slots */
 		if (opensl3[1] > opensl3[0]) {
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index 2c86e83..f2c881d 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -405,9 +405,17 @@
 			bytes_sent = 0;
 	}
 
-	/* We'll send in chunks of SPI_MAX_LEN if larger */
-	bytes_to_send = dd->tx_bytes_remaining / SPI_MAX_LEN ?
-			  SPI_MAX_LEN : dd->tx_bytes_remaining;
+	/* We'll send in chunks of SPI_MAX_LEN if larger than
+	 * 4K bytes for targets that doesn't support infinite
+	 * mode. Make sure this doesn't happen on targets that
+	 * support infinite mode.
+	 */
+	if (!dd->pdata->infinite_mode)
+		bytes_to_send = dd->tx_bytes_remaining / SPI_MAX_LEN ?
+				SPI_MAX_LEN : dd->tx_bytes_remaining;
+	else
+		bytes_to_send = dd->tx_bytes_remaining;
+
 	num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
 	dd->unaligned_len = bytes_to_send % dd->burst_size;
 	num_rows = bytes_to_send / dd->burst_size;
@@ -512,12 +520,11 @@
 		msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
 }
 
-/* SPI core can send maximum of 4K transfers, because there is HW problem
-   with infinite mode.
-   Therefore, we are sending several chunks of 3K or less (depending on how
-   much is left).
-   Upon completion we send the next chunk, or complete the transfer if
-   everything is finished.
+/* SPI core on targets that does not support infinite mode can send maximum of
+   4K transfers, Therefore, we are sending several chunks of 3K or less
+   (depending on how much is left). Upon completion we send the next chunk,
+   or complete the transfer if everything is finished. On targets that support
+   infinite mode, we send all the bytes in as single chunk.
 */
 static int msm_spi_dm_send_next(struct msm_spi *dd)
 {
@@ -527,8 +534,10 @@
 	if (dd->mode != SPI_DMOV_MODE)
 		return 0;
 
-	/* We need to send more chunks, if we sent max last time */
-	if (dd->tx_bytes_remaining > SPI_MAX_LEN) {
+	/* On targets which does not support infinite mode,
+	   We need to send more chunks, if we sent max last time  */
+	if ((!dd->pdata->infinite_mode) &&
+	    (dd->tx_bytes_remaining > SPI_MAX_LEN)) {
 		dd->tx_bytes_remaining -= SPI_MAX_LEN;
 		if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
 			return 0;
@@ -1766,6 +1775,8 @@
 
 	of_property_read_u32(node, "spi-max-frequency",
 			&pdata->max_clock_speed);
+	of_property_read_u32(node, "infinite_mode",
+			&pdata->infinite_mode);
 
 	return pdata;
 }
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
index 84fd462..8724138 100644
--- a/drivers/spmi/Kconfig
+++ b/drivers/spmi/Kconfig
@@ -19,18 +19,11 @@
 	  This is required for communicating with Qualcomm PMICs and
 	  other devices that have the SPMI interface.
 
-config MSM_QPNP
-	depends on ARCH_MSMCOPPER
-	depends on OF_SPMI
-	bool "MSM QPNP"
-	help
-	  Say 'y' here to include support for the Qualcomm QPNP
-
 config MSM_QPNP_INT
 	depends on SPARSE_IRQ
-	depends on ARCH_MSMCOPPER
+	depends on ARCH_MSM8974
+	depends on SPMI
 	depends on OF_SPMI
-	depends on MSM_QPNP
 	bool "MSM QPNP INT"
 	help
 	  Say 'y' here to include support for the Qualcomm QPNP interrupt
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
index d59a610..becd823 100644
--- a/drivers/spmi/Makefile
+++ b/drivers/spmi/Makefile
@@ -1,7 +1,6 @@
 #
 # Makefile for kernel SPMI framework.
 #
-obj-$(CONFIG_SPMI)			+= spmi.o
+obj-$(CONFIG_SPMI)			+= spmi.o spmi-resources.o
 obj-$(CONFIG_SPMI_MSM_PMIC_ARB)		+= spmi-pmic-arb.o
-obj-$(CONFIG_MSM_QPNP)                  += qpnp.o
 obj-$(CONFIG_MSM_QPNP_INT)		+= qpnp-int.o
diff --git a/drivers/spmi/qpnp-int.c b/drivers/spmi/qpnp-int.c
index 2998c01..b6dfd51 100644
--- a/drivers/spmi/qpnp-int.c
+++ b/drivers/spmi/qpnp-int.c
@@ -31,8 +31,6 @@
 #include <asm/mach/irq.h>
 #include <mach/qpnp-int.h>
 
-#define QPNPINT_MAX_BUSSES 1
-
 /* 16 slave_ids, 256 per_ids per slave, and 8 ints per per_id */
 #define QPNPINT_NR_IRQS (16 * 256 * 8)
 
@@ -66,13 +64,18 @@
 
 struct q_chip_data {
 	int bus_nr;
-	struct irq_domain domain;
+	struct irq_domain *domain;
 	struct qpnp_local_int cb;
 	struct spmi_controller *spmi_ctrl;
 	struct radix_tree_root per_tree;
+	struct list_head list;
 };
 
-static struct q_chip_data chip_data[QPNPINT_MAX_BUSSES] __read_mostly;
+static LIST_HEAD(qpnpint_chips);
+static DEFINE_MUTEX(qpnpint_chips_mutex);
+
+#define QPNPINT_MAX_BUSSES 4
+struct q_chip_data *chip_lookup[QPNPINT_MAX_BUSSES];
 
 /**
  * qpnpint_encode_hwirq - translate between qpnp_irq_spec and
@@ -138,8 +141,7 @@
 	if (chip_d->cb.mask) {
 		rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
 		if (rc)
-			pr_err("%s: decode failed on hwirq %lu\n",
-						 __func__, d->hwirq);
+			pr_err("decode failed on hwirq %lu\n", d->hwirq);
 		else
 			chip_d->cb.mask(chip_d->spmi_ctrl, &q_spec,
 								irq_d->priv_d);
@@ -150,8 +152,7 @@
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
 					(u8 *)&irq_d->mask_shift, 1);
 	if (rc)
-		pr_err("%s: spmi failure on irq %d\n",
-						 __func__, d->irq);
+		pr_err("spmi failure on irq %d\n", d->irq);
 }
 
 static void qpnpint_irq_mask_ack(struct irq_data *d)
@@ -168,8 +169,7 @@
 	if (chip_d->cb.mask) {
 		rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
 		if (rc)
-			pr_err("%s: decode failed on hwirq %lu\n",
-						 __func__, d->hwirq);
+			pr_err("decode failed on hwirq %lu\n", d->hwirq);
 		else
 			chip_d->cb.mask(chip_d->spmi_ctrl, &q_spec,
 								irq_d->priv_d);
@@ -180,14 +180,12 @@
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
 							&irq_d->mask_shift, 1);
 	if (rc)
-		pr_err("%s: spmi failure on irq %d\n",
-						 __func__, d->irq);
+		pr_err("spmi failure on irq %d\n", d->irq);
 
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_LATCHED_CLR,
 							&irq_d->mask_shift, 1);
 	if (rc)
-		pr_err("%s: spmi failure on irq %d\n",
-						 __func__, d->irq);
+		pr_err("spmi failure on irq %d\n", d->irq);
 }
 
 static void qpnpint_irq_unmask(struct irq_data *d)
@@ -203,8 +201,7 @@
 	if (chip_d->cb.unmask) {
 		rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
 		if (rc)
-			pr_err("%s: decode failed on hwirq %lu\n",
-						 __func__, d->hwirq);
+			pr_err("decode failed on hwirq %lu\n", d->hwirq);
 		else
 			chip_d->cb.unmask(chip_d->spmi_ctrl, &q_spec,
 								irq_d->priv_d);
@@ -214,8 +211,7 @@
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_SET,
 					&irq_d->mask_shift, 1);
 	if (rc)
-		pr_err("%s: spmi failure on irq %d\n",
-						 __func__, d->irq);
+		pr_err("spmi failure on irq %d\n", d->irq);
 }
 
 static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
@@ -244,7 +240,7 @@
 		if (flow_type & IRQF_TRIGGER_HIGH)
 			per_d->pol_high |= irq_d->mask_shift;
 		else
-			per_d->pol_high &= ~irq_d->mask_shift;
+			per_d->pol_low |= irq_d->mask_shift;
 	}
 
 	buf[0] = per_d->type;
@@ -253,8 +249,7 @@
 
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_SET_TYPE, &buf, 3);
 	if (rc)
-		pr_err("%s: spmi failure on irq %d\n",
-						 __func__, d->irq);
+		pr_err("spmi failure on irq %d\n", d->irq);
 	return rc;
 }
 
@@ -279,13 +274,16 @@
 		return rc;
 	irq_d->spmi_slave = q_spec.slave;
 	irq_d->spmi_offset = q_spec.per << 8;
-	irq_d->per_d->use_count++;
 	irq_d->chip_d = chip_d;
 
 	if (chip_d->cb.register_priv_data)
 		rc = chip_d->cb.register_priv_data(chip_d->spmi_ctrl, &q_spec,
 							&irq_d->priv_d);
-	return rc;
+		if (rc)
+			return rc;
+
+	irq_d->per_d->use_count++;
+	return 0;
 }
 
 static struct q_irq_data *qpnpint_alloc_irq_data(
@@ -307,8 +305,10 @@
 	per_d = radix_tree_lookup(&chip_d->per_tree, (hwirq & ~0x7));
 	if (!per_d) {
 		per_d = kzalloc(sizeof(struct q_perip_data), GFP_KERNEL);
-		if (!per_d)
+		if (!per_d) {
+			kfree(irq_d);
 			return ERR_PTR(-ENOMEM);
+		}
 		radix_tree_insert(&chip_d->per_tree,
 				  (hwirq & ~0x7), per_d);
 	}
@@ -317,74 +317,6 @@
 	return irq_d;
 }
 
-static int qpnpint_register_int(uint32_t busno, unsigned long hwirq)
-{
-	int irq, rc;
-	struct irq_domain *domain;
-	struct q_irq_data *irq_d;
-
-	pr_debug("busno = %u hwirq = %lu\n", busno, hwirq);
-
-	if (hwirq < 0 || hwirq >= 32768) {
-		pr_err("%s: hwirq %lu out of qpnp interrupt bounds\n",
-							__func__, hwirq);
-		return -EINVAL;
-	}
-
-	if (busno < 0 || busno > QPNPINT_MAX_BUSSES) {
-		pr_err("%s: invalid bus number %d\n", __func__, busno);
-		return -EINVAL;
-	}
-
-	domain = &chip_data[busno].domain;
-	irq = irq_domain_to_irq(domain, hwirq);
-
-	rc = irq_alloc_desc_at(irq, numa_node_id());
-	if (rc < 0) {
-		if (rc != -EEXIST)
-			pr_err("%s: failed to alloc irq at %d with "
-					"rc %d\n", __func__, irq, rc);
-		return rc;
-	}
-	irq_d = qpnpint_alloc_irq_data(&chip_data[busno], hwirq);
-	if (IS_ERR(irq_d)) {
-		pr_err("%s: failed to alloc irq data %d with "
-					"rc %d\n", __func__, irq, rc);
-		rc = PTR_ERR(irq_d);
-		goto register_err_cleanup;
-	}
-	rc = qpnpint_init_irq_data(&chip_data[busno], irq_d, hwirq);
-	if (rc) {
-		pr_err("%s: failed to init irq data %d with "
-					"rc %d\n", __func__, irq, rc);
-		goto register_err_cleanup;
-	}
-
-	irq_domain_register_irq(domain, hwirq);
-
-	irq_set_chip_and_handler(irq,
-			&qpnpint_chip,
-			handle_level_irq);
-	irq_set_chip_data(irq, irq_d);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
-	irq_set_noprobe(irq);
-#endif
-	return 0;
-
-register_err_cleanup:
-	irq_free_desc(irq);
-	if (!IS_ERR(irq_d)) {
-		if (irq_d->per_d->use_count == 1)
-			kfree(irq_d->per_d);
-		else
-			irq_d->per_d->use_count--;
-		kfree(irq_d);
-	}
-	return rc;
-}
-
 static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
 				       struct device_node *controller,
 				       const u32 *intspec, unsigned int intsize,
@@ -392,11 +324,10 @@
 				       unsigned int *out_type)
 {
 	struct qpnp_irq_spec addr;
-	struct q_chip_data *chip_d = d->priv;
 	int ret;
 
-	pr_debug("%s: intspec[0] 0x%x intspec[1] 0x%x intspec[2] 0x%x\n",
-				__func__, intspec[0], intspec[1], intspec[2]);
+	pr_debug("intspec[0] 0x%x intspec[1] 0x%x intspec[2] 0x%x\n",
+				intspec[0], intspec[1], intspec[2]);
 
 	if (d->of_node != controller)
 		return -EINVAL;
@@ -409,41 +340,102 @@
 
 	ret = qpnpint_encode_hwirq(&addr);
 	if (ret < 0) {
-		pr_err("%s: invalid intspec\n", __func__);
+		pr_err("invalid intspec\n");
 		return ret;
 	}
 	*out_hwirq = ret;
 	*out_type = IRQ_TYPE_NONE;
 
-	/**
-	 * Register the interrupt if it's not already registered.
-	 * This implies that mapping a qpnp interrupt allocates
-	 * resources.
-	 */
-	ret = qpnpint_register_int(chip_d->bus_nr, *out_hwirq);
-	if (ret && ret != -EEXIST) {
-		pr_err("%s: Cannot register hwirq %lu\n", __func__, *out_hwirq);
-		return ret;
-	}
-
 	return 0;
 }
 
+static void qpnpint_free_irq_data(struct q_irq_data *irq_d)
+{
+	if (irq_d->per_d->use_count == 1)
+		kfree(irq_d->per_d);
+	else
+		irq_d->per_d->use_count--;
+	kfree(irq_d);
+}
+
+static int qpnpint_irq_domain_map(struct irq_domain *d,
+				  unsigned int virq, irq_hw_number_t hwirq)
+{
+	struct q_chip_data *chip_d = d->host_data;
+	struct q_irq_data *irq_d;
+	int rc;
+
+	pr_debug("hwirq = %lu\n", hwirq);
+
+	if (hwirq < 0 || hwirq >= 32768) {
+		pr_err("hwirq %lu out of bounds\n", hwirq);
+		return -EINVAL;
+	}
+
+	irq_radix_revmap_insert(d, virq, hwirq);
+
+	irq_d = qpnpint_alloc_irq_data(chip_d, hwirq);
+	if (IS_ERR(irq_d)) {
+		pr_err("failed to alloc irq data for hwirq %lu\n", hwirq);
+		return PTR_ERR(irq_d);
+	}
+
+	rc = qpnpint_init_irq_data(chip_d, irq_d, hwirq);
+	if (rc) {
+		pr_err("failed to init irq data for hwirq %lu\n", hwirq);
+		goto map_err;
+	}
+
+	irq_set_chip_and_handler(virq,
+			&qpnpint_chip,
+			handle_level_irq);
+	irq_set_chip_data(virq, irq_d);
+#ifdef CONFIG_ARM
+	set_irq_flags(virq, IRQF_VALID);
+#else
+	irq_set_noprobe(virq);
+#endif
+	return 0;
+
+map_err:
+	qpnpint_free_irq_data(irq_d);
+	return rc;
+}
+
+void qpnpint_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
+{
+	struct q_irq_data *irq_d = irq_get_chip_data(virq);
+
+	if (WARN_ON(!irq_d))
+		return;
+
+	qpnpint_free_irq_data(irq_d);
+}
+
 const struct irq_domain_ops qpnpint_irq_domain_ops = {
-	.dt_translate = qpnpint_irq_domain_dt_translate,
+	.map = qpnpint_irq_domain_map,
+	.unmap = qpnpint_irq_domain_unmap,
+	.xlate = qpnpint_irq_domain_dt_translate,
 };
 
-int qpnpint_register_controller(unsigned int busno,
+int qpnpint_register_controller(struct device_node *node,
+				struct spmi_controller *ctrl,
 				struct qpnp_local_int *li_cb)
 {
-	if (busno >= QPNPINT_MAX_BUSSES)
-		return -EINVAL;
-	chip_data[busno].cb = *li_cb;
-	chip_data[busno].spmi_ctrl = spmi_busnum_to_ctrl(busno);
-	if (!chip_data[busno].spmi_ctrl)
-		return -ENOENT;
+	struct q_chip_data *chip_d;
 
-	return 0;
+	if (!node || !ctrl || ctrl->nr >= QPNPINT_MAX_BUSSES)
+		return -EINVAL;
+
+	list_for_each_entry(chip_d, &qpnpint_chips, list)
+		if (node == chip_d->domain->of_node) {
+			chip_d->cb = *li_cb;
+			chip_d->spmi_ctrl = ctrl;
+			chip_lookup[ctrl->nr] = chip_d;
+			return 0;
+		}
+
+	return -ENOENT;
 }
 EXPORT_SYMBOL(qpnpint_register_controller);
 
@@ -457,21 +449,18 @@
 	pr_debug("spec slave = %u per = %u irq = %u\n",
 					spec->slave, spec->per, spec->irq);
 
-	if (!spec || !spmi_ctrl)
-		return -EINVAL;
-
 	busno = spmi_ctrl->nr;
-	if (busno >= QPNPINT_MAX_BUSSES)
+	if (!spec || !spmi_ctrl || busno >= QPNPINT_MAX_BUSSES)
 		return -EINVAL;
 
 	hwirq = qpnpint_encode_hwirq(spec);
 	if (hwirq < 0) {
-		pr_err("%s: invalid irq spec passed\n", __func__);
+		pr_err("invalid irq spec passed\n");
 		return -EINVAL;
 	}
 
-	domain = &chip_data[busno].domain;
-	irq = irq_domain_to_irq(domain, hwirq);
+	domain = chip_lookup[busno]->domain;
+	irq = irq_radix_revmap_lookup(domain, hwirq);
 
 	generic_handle_irq(irq);
 
@@ -479,31 +468,24 @@
 }
 EXPORT_SYMBOL(qpnpint_handle_irq);
 
-/**
- * This assumes that there's a relationship between the order of the interrupt
- * controllers specified to of_irq_match() is the SPMI device topology. If
- * this ever turns out to be a bad assumption, then of_irq_init_cb_t should
- * be modified to pass a parameter to this function.
- */
-static int qpnpint_cnt __initdata;
-
 int __init qpnpint_of_init(struct device_node *node, struct device_node *parent)
 {
-	struct q_chip_data *chip_d = &chip_data[qpnpint_cnt];
-	struct irq_domain *domain = &chip_d->domain;
+	struct q_chip_data *chip_d;
+
+	chip_d = kzalloc(sizeof(struct q_chip_data), GFP_KERNEL);
+	if (!chip_d)
+		return -ENOMEM;
+
+	chip_d->domain = irq_domain_add_tree(node,
+					&qpnpint_irq_domain_ops, chip_d);
+	if (!chip_d->domain) {
+		pr_err("Unable to allocate irq_domain\n");
+		kfree(chip_d);
+		return -ENOMEM;
+	}
 
 	INIT_RADIX_TREE(&chip_d->per_tree, GFP_ATOMIC);
-
-	domain->irq_base = irq_domain_find_free_range(0, QPNPINT_NR_IRQS);
-	domain->nr_irq = QPNPINT_NR_IRQS;
-	domain->of_node = of_node_get(node);
-	domain->priv = chip_d;
-	domain->ops = &qpnpint_irq_domain_ops;
-	irq_domain_add(domain);
-
-	pr_info("irq_base = %d\n", domain->irq_base);
-
-	qpnpint_cnt++;
+	list_add(&chip_d->list, &qpnpint_chips);
 
 	return 0;
 }
diff --git a/drivers/spmi/qpnp.c b/drivers/spmi/qpnp.c
deleted file mode 100644
index a164efb..0000000
--- a/drivers/spmi/qpnp.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/* Copyright (c) 2002-3 Patrick Mochel
- * Copyright (c) 2002-3 Open Source Development Labs
- * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Resource handling based on platform.c.
- */
-
-#include <linux/export.h>
-#include <mach/qpnp.h>
-
-/**
- * qpnp_get_resource - get a resource for a device
- * @dev: qpnp device
- * @type: resource type
- * @num: resource index
- */
-struct resource *qpnp_get_resource(struct spmi_device *dev,
-				   unsigned int node_idx, unsigned int type,
-				   unsigned int res_num)
-{
-	int i;
-
-	for (i = 0; i < dev->dev_node[node_idx].num_resources; i++) {
-		struct resource *r = &dev->dev_node[node_idx].resource[i];
-
-		if (type == resource_type(r) && res_num-- == 0)
-			return r;
-	}
-	return NULL;
-}
-EXPORT_SYMBOL_GPL(qpnp_get_resource);
-
-/**
- * qpnp_get_irq - get an IRQ for a device
- * @dev: qpnp device
- * @num: IRQ number index
- */
-int qpnp_get_irq(struct spmi_device *dev, unsigned int node_idx,
-					  unsigned int res_num)
-{
-	struct resource *r = qpnp_get_resource(dev, node_idx,
-						IORESOURCE_IRQ, res_num);
-
-	return r ? r->start : -ENXIO;
-}
-EXPORT_SYMBOL_GPL(qpnp_get_irq);
-
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index f22b900..422e99e 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -664,7 +664,14 @@
 		goto err_add_controller;
 
 	/* Register the interrupt enable/disable functions */
-	qpnpint_register_controller(cell_index, &spmi_pmic_arb_intr_cb);
+	ret = qpnpint_register_controller(pmic_arb->controller.dev.of_node,
+					  &pmic_arb->controller,
+					  &spmi_pmic_arb_intr_cb);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to register controller %d\n",
+					cell_index);
+		goto err_reg_controller;
+	}
 
 	/* Register device(s) from the device tree */
 	of_spmi_register_devices(&pmic_arb->controller);
@@ -674,6 +681,8 @@
 
 	return 0;
 
+err_reg_controller:
+	spmi_del_controller(&pmic_arb->controller);
 err_add_controller:
 	platform_set_drvdata(pdev, NULL);
 	return ret;
diff --git a/drivers/spmi/spmi-resources.c b/drivers/spmi/spmi-resources.c
new file mode 100644
index 0000000..97f15ae
--- /dev/null
+++ b/drivers/spmi/spmi-resources.c
@@ -0,0 +1,151 @@
+/* Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Resource handling based on platform.c.
+ */
+
+#include <linux/export.h>
+#include <linux/spmi.h>
+#include <linux/string.h>
+
+/**
+ * spmi_get_resource - get a resource for a device
+ * @dev: spmi device
+ * @node: device node resource
+ * @type: resource type
+ * @res_num: resource index
+ *
+ * If 'node' is specified as NULL, then the API treats this as a special
+ * case to assume the first devnode. For configurations that do not use
+ * spmi-dev-container, there is only one node to begin with, so NULL should
+ * be passed in this case.
+ *
+ * Returns
+ *  NULL on failure.
+ */
+struct resource *spmi_get_resource(struct spmi_device *dev,
+				   struct spmi_resource *node,
+				   unsigned int type, unsigned int res_num)
+{
+	int i;
+
+	/* if a node is not specified, default to the first node */
+	if (!node)
+		node = &dev->res;
+
+	for (i = 0; i < node->num_resources; i++) {
+		struct resource *r = &node->resource[i];
+
+		if (type == resource_type(r) && res_num-- == 0)
+			return r;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(spmi_get_resource);
+
+#define SPMI_MAX_RES_NAME 256
+
+/**
+ * spmi_get_resource_byname - get a resource for a device given a name
+ * @dev: spmi device handle
+ * @node: device node resource
+ * @type: resource type
+ * @name: resource name to lookup
+ */
+struct resource *spmi_get_resource_byname(struct spmi_device *dev,
+					  struct spmi_resource *node,
+					  unsigned int type,
+					  const char *name)
+{
+	int i;
+
+	/* if a node is not specified, default to the first node */
+	if (!node)
+		node = &dev->res;
+
+	for (i = 0; i < node->num_resources; i++) {
+		struct resource *r = &node->resource[i];
+
+		if (type == resource_type(r) && r->name &&
+				!strncmp(r->name, name, SPMI_MAX_RES_NAME))
+			return r;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(spmi_get_resource_byname);
+
+/**
+ * spmi_get_irq - get an IRQ for a device
+ * @dev: spmi device
+ * @node: device node resource
+ * @res_num: IRQ number index
+ *
+ * Returns
+ *  -ENXIO on failure.
+ */
+int spmi_get_irq(struct spmi_device *dev, struct spmi_resource *node,
+					  unsigned int res_num)
+{
+	struct resource *r = spmi_get_resource(dev, node,
+						IORESOURCE_IRQ, res_num);
+
+	return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(spmi_get_irq);
+
+/**
+ * spmi_get_irq_byname - get an IRQ for a device given a name
+ * @dev: spmi device handle
+ * @node: device node resource
+ * @name: resource name to lookup
+ *
+ * Returns -ENXIO on failure
+ */
+int spmi_get_irq_byname(struct spmi_device *dev,
+			struct spmi_resource *node, const char *name)
+{
+	struct resource *r = spmi_get_resource_byname(dev, node,
+							IORESOURCE_IRQ, name);
+	return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(spmi_get_irq_byname);
+
+/*
+ * spmi_get_container_dev_byname - get a device node resource
+ * @dev: spmi device handle
+ * @label: device name to lookup
+ *
+ * Only useable in spmi-dev-container configurations. Given a name,
+ * find the associated spmi_resource that matches the name.
+ *
+ * Return NULL if the spmi_device is not a dev-container,
+ * or if the lookup fails.
+ */
+struct spmi_resource *spmi_get_dev_container_byname(struct spmi_device *dev,
+						    const char *label)
+{
+	int i;
+
+	if (!label)
+		return NULL;
+
+	for (i = 0; i < dev->num_dev_node; i++) {
+		struct spmi_resource *r = &dev->dev_node[i];
+
+		if (r && r->label && !strncmp(r->label,
+					label, SPMI_MAX_RES_NAME))
+			return r;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(spmi_get_dev_container_byname);
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 0342b97..914df95 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -238,6 +238,7 @@
 	spmidev->dev.platform_data = (void *)info->platform_data;
 	spmidev->num_dev_node = info->num_dev_node;
 	spmidev->dev_node = info->dev_node;
+	spmidev->res = info->res;
 
 	rc = spmi_add_device(spmidev);
 	if (rc < 0) {
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 8390f5d..5d79bd2 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -631,24 +631,32 @@
 {
 	unsigned int physaddr = 0;
 	pgd_t *pgd_ptr = NULL;
+	pud_t *pud_ptr = NULL;
 	pmd_t *pmd_ptr = NULL;
 	pte_t *pte_ptr = NULL, pte;
 
 	spin_lock(&current->mm->page_table_lock);
 	pgd_ptr = pgd_offset(current->mm, virtaddr);
-	if (pgd_none(*pgd) || pgd_bad(*pgd)) {
+	if (pgd_none(*pgd_ptr) || pgd_bad(*pgd_ptr)) {
 		pr_err("Failed to convert virtaddr %x to pgd_ptr\n",
 			virtaddr);
 		goto done;
 	}
 
-	pmd_ptr = pmd_offset(pgd_ptr, virtaddr);
-	if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
-		pr_err("Failed to convert pgd_ptr %p to pmd_ptr\n",
+	pud_ptr = pud_offset(pgd_ptr, virtaddr);
+	if (pud_none(*pud_ptr) || pud_bad(*pud_ptr)) {
+		pr_err("Failed to convert pgd_ptr %p to pud_ptr\n",
 			(void *)pgd_ptr);
 		goto done;
 	}
 
+	pmd_ptr = pmd_offset(pud_ptr, virtaddr);
+	if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
+		pr_err("Failed to convert pud_ptr %p to pmd_ptr\n",
+			(void *)pud_ptr);
+		goto done;
+	}
+
 	pte_ptr = pte_offset_map(pmd_ptr, virtaddr);
 	if (!pte_ptr) {
 		pr_err("Failed to convert pmd_ptr %p to pte_ptr\n",
diff --git a/drivers/tty/n_smux.c b/drivers/tty/n_smux.c
index d03b4b4..c271ca4 100644
--- a/drivers/tty/n_smux.c
+++ b/drivers/tty/n_smux.c
@@ -33,8 +33,6 @@
 
 #define SMUX_NOTIFY_FIFO_SIZE	128
 #define SMUX_TX_QUEUE_SIZE	256
-#define SMUX_WM_LOW 2
-#define SMUX_WM_HIGH 4
 #define SMUX_PKT_LOG_SIZE 80
 
 /* Maximum size we can accept in a single RX buffer */
@@ -48,7 +46,7 @@
 #define SMUX_WAKEUP_DELAY_MIN (1 << 15)
 
 /* inactivity timeout for no rx/tx activity */
-#define SMUX_INACTIVITY_TIMEOUT_MS 1000
+#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
 
 /* RX get_rx_buffer retry timeout values */
 #define SMUX_RX_RETRY_MIN_MS (1 << 0)  /* 1 ms */
@@ -83,6 +81,30 @@
 			pr_info(x);  \
 } while (0)
 
+#define SMUX_PWR_PKT_RX(pkt) do { \
+	if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
+			smux_log_pkt(pkt, 1); \
+} while (0)
+
+#define SMUX_PWR_PKT_TX(pkt) do { \
+	if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
+			if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
+					pkt->hdr.flags == SMUX_WAKEUP_ACK) \
+				pr_info("smux: TX Wakeup ACK\n"); \
+			else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
+					pkt->hdr.flags == SMUX_WAKEUP_REQ) \
+				pr_info("smux: TX Wakeup REQ\n"); \
+			else \
+				smux_log_pkt(pkt, 0); \
+	} \
+} while (0)
+
+#define SMUX_PWR_BYTE_TX(pkt) do { \
+	if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
+			smux_log_pkt(pkt, 0); \
+	} \
+} while (0)
+
 #define SMUX_LOG_PKT_RX(pkt) do { \
 	if (smux_debug_mask & MSM_SMUX_PKT) \
 			smux_log_pkt(pkt, 1); \
@@ -143,7 +165,7 @@
 	SMUX_PWR_OFF,
 	SMUX_PWR_TURNING_ON,
 	SMUX_PWR_ON,
-	SMUX_PWR_TURNING_OFF_FLUSH,
+	SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
 	SMUX_PWR_TURNING_OFF,
 	SMUX_PWR_OFF_FLUSH,
 };
@@ -172,12 +194,15 @@
 	unsigned local_state;
 	unsigned local_mode;
 	uint8_t local_tiocm;
+	unsigned options;
 
 	unsigned remote_state;
 	unsigned remote_mode;
 	uint8_t remote_tiocm;
 
 	int tx_flow_control;
+	int rx_flow_control_auto;
+	int rx_flow_control_client;
 
 	/* client callbacks and private data */
 	void *priv;
@@ -270,6 +295,7 @@
 	unsigned pwr_wakeup_delay_us;
 	unsigned tx_activity_flag;
 	unsigned powerdown_enabled;
+	unsigned power_ctl_remote_req_received;
 	struct list_head power_queue;
 };
 
@@ -330,6 +356,8 @@
 static int ssr_notifier_cb(struct notifier_block *this,
 				unsigned long code,
 				void *data);
+static void smux_uart_power_on_atomic(void);
+static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
 
 /**
  * Convert TTY Error Flags to string for logging purposes.
@@ -402,10 +430,13 @@
 		ch->local_state = SMUX_LCH_LOCAL_CLOSED;
 		ch->local_mode = SMUX_LCH_MODE_NORMAL;
 		ch->local_tiocm = 0x0;
+		ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
 		ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
 		ch->remote_mode = SMUX_LCH_MODE_NORMAL;
 		ch->remote_tiocm = 0x0;
 		ch->tx_flow_control = 0;
+		ch->rx_flow_control_auto = 0;
+		ch->rx_flow_control_client = 0;
 		ch->priv = 0;
 		ch->notify = 0;
 		ch->get_rx_buffer = 0;
@@ -486,6 +517,8 @@
 		ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
 		ch->remote_mode = SMUX_LCH_MODE_NORMAL;
 		ch->tx_flow_control = 0;
+		ch->rx_flow_control_auto = 0;
+		ch->rx_flow_control_client = 0;
 
 		/* Purge RX retry queue */
 		if (ch->rx_retry_queue_cnt)
@@ -537,67 +570,76 @@
 	char local_mode;
 	char remote_state;
 	char remote_mode;
-	struct smux_lch_t *ch;
+	struct smux_lch_t *ch = NULL;
 	unsigned char *data;
 
-	ch = &smux_lch[pkt->hdr.lcid];
+	if (!smux_assert_lch_id(pkt->hdr.lcid))
+		ch = &smux_lch[pkt->hdr.lcid];
 
-	switch (ch->local_state) {
-	case SMUX_LCH_LOCAL_CLOSED:
-		local_state = 'C';
-		break;
-	case SMUX_LCH_LOCAL_OPENING:
-		local_state = 'o';
-		break;
-	case SMUX_LCH_LOCAL_OPENED:
-		local_state = 'O';
-		break;
-	case SMUX_LCH_LOCAL_CLOSING:
-		local_state = 'c';
-		break;
-	default:
-		local_state = 'U';
-		break;
-	}
+	if (ch) {
+		switch (ch->local_state) {
+		case SMUX_LCH_LOCAL_CLOSED:
+			local_state = 'C';
+			break;
+		case SMUX_LCH_LOCAL_OPENING:
+			local_state = 'o';
+			break;
+		case SMUX_LCH_LOCAL_OPENED:
+			local_state = 'O';
+			break;
+		case SMUX_LCH_LOCAL_CLOSING:
+			local_state = 'c';
+			break;
+		default:
+			local_state = 'U';
+			break;
+		}
 
-	switch (ch->local_mode) {
-	case SMUX_LCH_MODE_LOCAL_LOOPBACK:
-		local_mode = 'L';
-		break;
-	case SMUX_LCH_MODE_REMOTE_LOOPBACK:
-		local_mode = 'R';
-		break;
-	case SMUX_LCH_MODE_NORMAL:
-		local_mode = 'N';
-		break;
-	default:
-		local_mode = 'U';
-		break;
-	}
+		switch (ch->local_mode) {
+		case SMUX_LCH_MODE_LOCAL_LOOPBACK:
+			local_mode = 'L';
+			break;
+		case SMUX_LCH_MODE_REMOTE_LOOPBACK:
+			local_mode = 'R';
+			break;
+		case SMUX_LCH_MODE_NORMAL:
+			local_mode = 'N';
+			break;
+		default:
+			local_mode = 'U';
+			break;
+		}
 
-	switch (ch->remote_state) {
-	case SMUX_LCH_REMOTE_CLOSED:
-		remote_state = 'C';
-		break;
-	case SMUX_LCH_REMOTE_OPENED:
-		remote_state = 'O';
-		break;
+		switch (ch->remote_state) {
+		case SMUX_LCH_REMOTE_CLOSED:
+			remote_state = 'C';
+			break;
+		case SMUX_LCH_REMOTE_OPENED:
+			remote_state = 'O';
+			break;
 
-	default:
-		remote_state = 'U';
-		break;
-	}
+		default:
+			remote_state = 'U';
+			break;
+		}
 
-	switch (ch->remote_mode) {
-	case SMUX_LCH_MODE_REMOTE_LOOPBACK:
-		remote_mode = 'R';
-		break;
-	case SMUX_LCH_MODE_NORMAL:
-		remote_mode = 'N';
-		break;
-	default:
-		remote_mode = 'U';
-		break;
+		switch (ch->remote_mode) {
+		case SMUX_LCH_MODE_REMOTE_LOOPBACK:
+			remote_mode = 'R';
+			break;
+		case SMUX_LCH_MODE_NORMAL:
+			remote_mode = 'N';
+			break;
+		default:
+			remote_mode = 'U';
+			break;
+		}
+	} else {
+		/* broadcast channel */
+		local_state = '-';
+		local_mode = '-';
+		remote_state = '-';
+		remote_mode = '-';
 	}
 
 	/* determine command type (ACK, etc) */
@@ -611,6 +653,11 @@
 		if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
 			snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
 		break;
+
+	case SMUX_CMD_PWR_CTL:
+	   if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
+			snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
+	   break;
 	};
 
 	i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
@@ -1342,6 +1389,7 @@
 	uint8_t lcid;
 	int ret = 0;
 	int do_retry = 0;
+	int tx_ready = 0;
 	int tmp;
 	int rx_len;
 	struct smux_lch_t *ch;
@@ -1385,8 +1433,20 @@
 
 	if (!list_empty(&ch->rx_retry_queue)) {
 		do_retry = 1;
+
+		if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
+			!ch->rx_flow_control_auto &&
+			((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
+			/* need to flow control RX */
+			ch->rx_flow_control_auto = 1;
+			tx_ready |= smux_rx_flow_control_updated(ch);
+			schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
+					NULL);
+		}
 		if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
 			/* retry queue full */
+			pr_err("%s: ch %d RX retry queue full\n",
+					__func__, lcid);
 			schedule_notify(lcid, SMUX_READ_FAIL, NULL);
 			ret = -ENOMEM;
 			spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
@@ -1410,7 +1470,7 @@
 			}
 			ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
 			smux_tx_queue(ack_pkt, ch, 0);
-			list_channel(ch);
+			tx_ready = 1;
 		} else {
 			pr_err("%s: Remote loopack allocation failure\n",
 					__func__);
@@ -1436,6 +1496,8 @@
 			/* buffer allocation failed - add to retry queue */
 			do_retry = 1;
 		} else if (tmp < 0) {
+			pr_err("%s: ch %d Client RX buffer alloc failed %d\n",
+					__func__, lcid, tmp);
 			schedule_notify(lcid, SMUX_READ_FAIL, NULL);
 			ret = -ENOMEM;
 		}
@@ -1482,6 +1544,8 @@
 		spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
 	}
 
+	if (tx_ready)
+		list_channel(ch);
 out:
 	return ret;
 }
@@ -1599,21 +1663,20 @@
 static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
 {
 	struct smux_pkt_t *ack_pkt = NULL;
+	int power_down = 0;
 	unsigned long flags;
 
+	SMUX_PWR_PKT_RX(pkt);
+
 	spin_lock_irqsave(&smux.tx_lock_lha2, flags);
 	if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
 		/* local sleep request ack */
-		if (smux.power_state == SMUX_PWR_TURNING_OFF) {
+		if (smux.power_state == SMUX_PWR_TURNING_OFF)
 			/* Power-down complete, turn off UART */
-			SMUX_PWR("%s: Power %d->%d\n", __func__,
-					smux.power_state, SMUX_PWR_OFF_FLUSH);
-			smux.power_state = SMUX_PWR_OFF_FLUSH;
-			queue_work(smux_tx_wq, &smux_inactivity_work);
-		} else {
+			power_down = 1;
+		else
 			pr_err("%s: sleep request ack invalid in state %d\n",
 					__func__, smux.power_state);
-		}
 	} else {
 		/*
 		 * Remote sleep request
@@ -1625,9 +1688,10 @@
 		 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
 		 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
 		 * when it sends the packet.
+		 *
+		 * If we are already powering down, then no ACK is sent.
 		 */
-		if (smux.power_state == SMUX_PWR_ON
-			|| smux.power_state == SMUX_PWR_TURNING_OFF) {
+		if (smux.power_state == SMUX_PWR_ON) {
 			ack_pkt = smux_alloc_pkt();
 			if (ack_pkt) {
 				SMUX_PWR("%s: Power %d->%d\n", __func__,
@@ -1644,11 +1708,31 @@
 						&smux.power_queue);
 				queue_work(smux_tx_wq, &smux_tx_work);
 			}
+		} else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
+			/* Local power-down request still in TX queue */
+			SMUX_PWR("%s: Power-down shortcut - no ack\n",
+					__func__);
+			smux.power_ctl_remote_req_received = 1;
+		} else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
+			/*
+			 * Local power-down request already sent to remote
+			 * side, so this request gets treated as an ACK.
+			 */
+			SMUX_PWR("%s: Power-down shortcut - no ack\n",
+					__func__);
+			power_down = 1;
 		} else {
 			pr_err("%s: sleep request invalid in state %d\n",
 					__func__, smux.power_state);
 		}
 	}
+
+	if (power_down) {
+		SMUX_PWR("%s: Power %d->%d\n", __func__,
+				smux.power_state, SMUX_PWR_OFF_FLUSH);
+		smux.power_state = SMUX_PWR_OFF_FLUSH;
+		queue_work(smux_tx_wq, &smux_inactivity_work);
+	}
 	spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
 
 	return 0;
@@ -1665,10 +1749,9 @@
 {
 	int ret = -ENXIO;
 
-	SMUX_LOG_PKT_RX(pkt);
-
 	switch (pkt->hdr.cmd) {
 	case SMUX_CMD_OPEN_LCH:
+		SMUX_LOG_PKT_RX(pkt);
 		if (smux_assert_lch_id(pkt->hdr.lcid)) {
 			pr_err("%s: invalid channel id %d\n",
 					__func__, pkt->hdr.lcid);
@@ -1678,6 +1761,7 @@
 		break;
 
 	case SMUX_CMD_DATA:
+		SMUX_LOG_PKT_RX(pkt);
 		if (smux_assert_lch_id(pkt->hdr.lcid)) {
 			pr_err("%s: invalid channel id %d\n",
 					__func__, pkt->hdr.lcid);
@@ -1687,6 +1771,7 @@
 		break;
 
 	case SMUX_CMD_CLOSE_LCH:
+		SMUX_LOG_PKT_RX(pkt);
 		if (smux_assert_lch_id(pkt->hdr.lcid)) {
 			pr_err("%s: invalid channel id %d\n",
 					__func__, pkt->hdr.lcid);
@@ -1696,6 +1781,7 @@
 		break;
 
 	case SMUX_CMD_STATUS:
+		SMUX_LOG_PKT_RX(pkt);
 		if (smux_assert_lch_id(pkt->hdr.lcid)) {
 			pr_err("%s: invalid channel id %d\n",
 					__func__, pkt->hdr.lcid);
@@ -1709,10 +1795,12 @@
 		break;
 
 	case SMUX_CMD_BYTE:
+		SMUX_LOG_PKT_RX(pkt);
 		ret = smux_handle_rx_byte_cmd(pkt);
 		break;
 
 	default:
+		SMUX_LOG_PKT_RX(pkt);
 		pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
 		ret = -EINVAL;
 	}
@@ -1769,8 +1857,12 @@
 		queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
 			msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
 		smux_send_byte(SMUX_WAKEUP_ACK);
-	} else {
+	} else if (smux.power_state == SMUX_PWR_ON) {
 		smux_send_byte(SMUX_WAKEUP_ACK);
+	} else {
+		/* stale wakeup request from previous wakeup */
+		SMUX_PWR("%s: stale Wakeup REQ in state %d\n",
+				__func__, smux.power_state);
 	}
 	spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
 }
@@ -1794,7 +1886,7 @@
 
 	} else if (smux.power_state != SMUX_PWR_ON) {
 		/* invalid message */
-		pr_err("%s: wakeup request ack invalid in state %d\n",
+		SMUX_PWR("%s: stale Wakeup REQ ACK in state %d\n",
 				__func__, smux.power_state);
 	}
 	spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
@@ -1828,9 +1920,11 @@
 			smux.rx_state = SMUX_RX_MAGIC;
 			break;
 		case SMUX_WAKEUP_REQ:
+			SMUX_PWR("smux: RX Wakeup REQ\n");
 			smux_handle_wakeup_req();
 			break;
 		case SMUX_WAKEUP_ACK:
+			SMUX_PWR("smux: RX Wakeup ACK\n");
 			smux_handle_wakeup_ack();
 			break;
 		default:
@@ -2048,8 +2142,10 @@
  */
 static void smux_flush_tty(void)
 {
+	mutex_lock(&smux.mutex_lha0);
 	if (!smux.tty) {
 		pr_err("%s: ldisc not loaded\n", __func__);
+		mutex_unlock(&smux.mutex_lha0);
 		return;
 	}
 
@@ -2058,6 +2154,8 @@
 
 	if (tty_chars_in_buffer(smux.tty) > 0)
 		pr_err("%s: unable to flush UART queue\n", __func__);
+
+	mutex_unlock(&smux.mutex_lha0);
 }
 
 /**
@@ -2106,8 +2204,10 @@
 
 /**
  * Power-up the UART.
+ *
+ * Must be called with smux.mutex_lha0 already locked.
  */
-static void smux_uart_power_on(void)
+static void smux_uart_power_on_atomic(void)
 {
 	struct uart_state *state;
 
@@ -2121,19 +2221,32 @@
 }
 
 /**
+ * Power-up the UART.
+ */
+static void smux_uart_power_on(void)
+{
+	mutex_lock(&smux.mutex_lha0);
+	smux_uart_power_on_atomic();
+	mutex_unlock(&smux.mutex_lha0);
+}
+
+/**
  * Power down the UART.
  */
 static void smux_uart_power_off(void)
 {
 	struct uart_state *state;
 
+	mutex_lock(&smux.mutex_lha0);
 	if (!smux.tty || !smux.tty->driver_data) {
 		pr_err("%s: unable to find UART port for tty %p\n",
 				__func__, smux.tty);
+		mutex_unlock(&smux.mutex_lha0);
 		return;
 	}
 	state = smux.tty->driver_data;
 	msm_hs_request_clock_off(state->uart_port);
+	mutex_unlock(&smux.mutex_lha0);
 }
 
 /**
@@ -2148,44 +2261,17 @@
 {
 	unsigned long flags;
 	unsigned wakeup_delay;
-	int complete = 0;
 
-	while (!smux.in_reset) {
-		spin_lock_irqsave(&smux.tx_lock_lha2, flags);
-		if (smux.power_state == SMUX_PWR_ON) {
-			/* wakeup complete */
-			complete = 1;
-			spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
-			break;
-		} else {
-			/* retry */
-			wakeup_delay = smux.pwr_wakeup_delay_us;
-			smux.pwr_wakeup_delay_us <<= 1;
-			if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
-				smux.pwr_wakeup_delay_us =
-					SMUX_WAKEUP_DELAY_MAX;
-		}
+	if (smux.in_reset)
+		return;
+
+	spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+	if (smux.power_state == SMUX_PWR_ON) {
+		/* wakeup complete */
+		smux.pwr_wakeup_delay_us = 1;
 		spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
-		SMUX_DBG("%s: triggering wakeup\n", __func__);
-		smux_send_byte(SMUX_WAKEUP_REQ);
-
-		if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
-			SMUX_DBG("%s: sleeping for %u us\n", __func__,
-					wakeup_delay);
-			usleep_range(wakeup_delay, 2*wakeup_delay);
-		} else {
-			/* schedule delayed work */
-			SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
-					__func__, wakeup_delay / 1000);
-			queue_delayed_work(smux_tx_wq,
-					&smux_wakeup_delayed_work,
-					msecs_to_jiffies(wakeup_delay / 1000));
-			break;
-		}
-	}
-
-	if (complete) {
 		SMUX_DBG("%s: wakeup complete\n", __func__);
+
 		/*
 		 * Cancel any pending retry.  This avoids a race condition with
 		 * a new power-up request because:
@@ -2194,6 +2280,38 @@
 		 *    workqueue as new TX wakeup requests
 		 */
 		cancel_delayed_work(&smux_wakeup_delayed_work);
+		queue_work(smux_tx_wq, &smux_tx_work);
+	} else if (smux.power_state == SMUX_PWR_TURNING_ON) {
+		/* retry wakeup */
+		wakeup_delay = smux.pwr_wakeup_delay_us;
+		smux.pwr_wakeup_delay_us <<= 1;
+		if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
+			smux.pwr_wakeup_delay_us =
+				SMUX_WAKEUP_DELAY_MAX;
+
+		spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+		SMUX_PWR("%s: triggering wakeup\n", __func__);
+		smux_send_byte(SMUX_WAKEUP_REQ);
+
+		if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
+			SMUX_DBG("%s: sleeping for %u us\n", __func__,
+					wakeup_delay);
+			usleep_range(wakeup_delay, 2*wakeup_delay);
+			queue_work(smux_tx_wq, &smux_wakeup_work);
+		} else {
+			/* schedule delayed work */
+			SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
+					__func__, wakeup_delay / 1000);
+			queue_delayed_work(smux_tx_wq,
+					&smux_wakeup_delayed_work,
+					msecs_to_jiffies(wakeup_delay / 1000));
+		}
+	} else {
+		/* wakeup aborted */
+		smux.pwr_wakeup_delay_us = 1;
+		spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+		SMUX_PWR("%s: wakeup aborted\n", __func__);
+		cancel_delayed_work(&smux_wakeup_delayed_work);
 	}
 }
 
@@ -2221,8 +2339,9 @@
 				if (pkt) {
 					SMUX_PWR("%s: Power %d->%d\n", __func__,
 						smux.power_state,
-						SMUX_PWR_TURNING_OFF);
-					smux.power_state = SMUX_PWR_TURNING_OFF;
+						SMUX_PWR_TURNING_OFF_FLUSH);
+					smux.power_state =
+						SMUX_PWR_TURNING_OFF_FLUSH;
 
 					/* send power-down request */
 					pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
@@ -2236,9 +2355,6 @@
 							__func__);
 				}
 			}
-		} else {
-			SMUX_DBG("%s: link inactive, but powerdown disabled\n",
-					__func__);
 		}
 	}
 	smux.tx_activity_flag = 0;
@@ -2275,18 +2391,32 @@
 /**
  * Remove RX retry packet from channel and free it.
  *
- * Must be called with state_lock_lhb1 locked.
- *
  * @ch    Channel for retry packet
  * @retry Retry packet to remove
+ *
+ * @returns 1 if flow control updated; 0 otherwise
+ *
+ * Must be called with state_lock_lhb1 locked.
  */
-void smux_remove_rx_retry(struct smux_lch_t *ch,
+int smux_remove_rx_retry(struct smux_lch_t *ch,
 		struct smux_rx_pkt_retry *retry)
 {
+	int tx_ready = 0;
+
 	list_del(&retry->rx_retry_list);
 	--ch->rx_retry_queue_cnt;
 	smux_free_pkt(retry->pkt);
 	kfree(retry);
+
+	if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
+			(ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
+			ch->rx_flow_control_auto) {
+		ch->rx_flow_control_auto = 0;
+		smux_rx_flow_control_updated(ch);
+		schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
+		tx_ready = 1;
+	}
+	return tx_ready;
 }
 
 /**
@@ -2357,6 +2487,8 @@
 	union notifier_metadata metadata;
 	int tmp;
 	unsigned long flags;
+	int immediate_retry = 0;
+	int tx_ready = 0;
 
 	ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
 
@@ -2368,7 +2500,7 @@
 			retry = list_first_entry(&ch->rx_retry_queue,
 						struct smux_rx_pkt_retry,
 						rx_retry_list);
-			smux_remove_rx_retry(ch, retry);
+			(void)smux_remove_rx_retry(ch, retry);
 		}
 	}
 
@@ -2383,7 +2515,8 @@
 					rx_retry_list);
 	spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
 
-	SMUX_DBG("%s: retrying rx pkt %p\n", __func__, retry);
+	SMUX_DBG("%s: ch %d retrying rx pkt %p\n",
+			__func__, ch->lcid, retry);
 	metadata.read.pkt_priv = 0;
 	metadata.read.buffer = 0;
 	tmp = ch->get_rx_buffer(ch->priv,
@@ -2392,33 +2525,44 @@
 			retry->pkt->hdr.payload_len);
 	if (tmp == 0 && metadata.read.buffer) {
 		/* have valid RX buffer */
+
 		memcpy(metadata.read.buffer, retry->pkt->payload,
 						retry->pkt->hdr.payload_len);
 		metadata.read.len = retry->pkt->hdr.payload_len;
 
 		spin_lock_irqsave(&ch->state_lock_lhb1, flags);
-		smux_remove_rx_retry(ch, retry);
+		tx_ready = smux_remove_rx_retry(ch, retry);
 		spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
-
 		schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
+		if (tx_ready)
+			list_channel(ch);
+
+		immediate_retry = 1;
 	} else if (tmp == -EAGAIN ||
 			(tmp == 0 && !metadata.read.buffer)) {
 		/* retry again */
 		retry->timeout_in_ms <<= 1;
 		if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
 			/* timed out */
+			pr_err("%s: ch %d RX retry client timeout\n",
+					__func__, ch->lcid);
 			spin_lock_irqsave(&ch->state_lock_lhb1, flags);
-			smux_remove_rx_retry(ch, retry);
-			schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
+			tx_ready = smux_remove_rx_retry(ch, retry);
 			spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+			schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
+			if (tx_ready)
+				list_channel(ch);
 		}
 	} else {
 		/* client error - drop packet */
+		pr_err("%s: ch %d RX retry client failed (%d)\n",
+				__func__, ch->lcid, tmp);
 		spin_lock_irqsave(&ch->state_lock_lhb1, flags);
-		smux_remove_rx_retry(ch, retry);
+		tx_ready = smux_remove_rx_retry(ch, retry);
 		spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
-
 		schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
+		if (tx_ready)
+			list_channel(ch);
 	}
 
 	/* schedule next retry */
@@ -2427,8 +2571,12 @@
 		retry = list_first_entry(&ch->rx_retry_queue,
 						struct smux_rx_pkt_retry,
 						rx_retry_list);
-		queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
-				msecs_to_jiffies(retry->timeout_in_ms));
+
+		if (immediate_retry)
+			queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
+		else
+			queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
+					msecs_to_jiffies(retry->timeout_in_ms));
 	}
 	spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
 }
@@ -2472,14 +2620,12 @@
 			if (!list_empty(&smux.lch_tx_ready_list) ||
 			   !list_empty(&smux.power_queue)) {
 				/* data to transmit, do wakeup */
-				smux.pwr_wakeup_delay_us = 1;
 				SMUX_PWR("%s: Power %d->%d\n", __func__,
 						smux.power_state,
 						SMUX_PWR_TURNING_ON);
 				smux.power_state = SMUX_PWR_TURNING_ON;
 				spin_unlock_irqrestore(&smux.tx_lock_lha2,
 						flags);
-				smux_uart_power_on();
 				queue_work(smux_tx_wq, &smux_wakeup_work);
 			} else {
 				/* no activity -- stay asleep */
@@ -2496,8 +2642,39 @@
 			list_del(&pkt->list);
 			spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
 
+			/* Adjust power state if this is a flush command */
+			spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+			if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
+				pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
+				if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
+					smux.power_ctl_remote_req_received) {
+					/*
+					 * Sending remote power-down request ACK
+					 * or sending local power-down request
+					 * and we already received a remote
+					 * power-down request.
+					 */
+					SMUX_PWR("%s: Power %d->%d\n", __func__,
+							smux.power_state,
+							SMUX_PWR_OFF_FLUSH);
+					smux.power_state = SMUX_PWR_OFF_FLUSH;
+					smux.power_ctl_remote_req_received = 0;
+					queue_work(smux_tx_wq,
+							&smux_inactivity_work);
+				} else {
+					/* sending local power-down request */
+					SMUX_PWR("%s: Power %d->%d\n", __func__,
+							smux.power_state,
+							SMUX_PWR_TURNING_OFF);
+					smux.power_state = SMUX_PWR_TURNING_OFF;
+				}
+			}
+			spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+
 			/* send the packet */
-			SMUX_LOG_PKT_TX(pkt);
+			smux_uart_power_on();
+			smux.tx_activity_flag = 1;
+			SMUX_PWR_PKT_TX(pkt);
 			if (!smux_byte_loopback) {
 				smux_tx_tty(pkt);
 				smux_flush_tty();
@@ -2505,19 +2682,6 @@
 				smux_tx_loopback(pkt);
 			}
 
-			/* Adjust power state if this is a flush command */
-			spin_lock_irqsave(&smux.tx_lock_lha2, flags);
-			if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
-				pkt->hdr.cmd == SMUX_CMD_PWR_CTL &&
-				(pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)) {
-				SMUX_PWR("%s: Power %d->%d\n", __func__,
-						smux.power_state,
-						SMUX_PWR_OFF_FLUSH);
-				smux.power_state = SMUX_PWR_OFF_FLUSH;
-				queue_work(smux_tx_wq, &smux_inactivity_work);
-			}
-			spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
-
 			smux_free_pkt(pkt);
 			continue;
 		}
@@ -2534,7 +2698,7 @@
 
 		if (smux.power_state != SMUX_PWR_ON) {
 			/* channel not ready to transmit */
-			SMUX_DBG("%s: can not tx with power state %d\n",
+			SMUX_DBG("%s: waiting for link up (state %d)\n",
 					__func__,
 					smux.power_state);
 			spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
@@ -2577,7 +2741,7 @@
 				--ch->tx_pending_data_cnt;
 				if (ch->notify_lwm &&
 					ch->tx_pending_data_cnt
-						<= SMUX_WM_LOW) {
+						<= SMUX_TX_WM_LOW) {
 					ch->notify_lwm = 0;
 					low_wm_notif = 1;
 				}
@@ -2604,6 +2768,34 @@
 	}
 }
 
+/**
+ * Update the RX flow control (sent in the TIOCM Status command).
+ *
+ * @ch  Channel for update
+ *
+ * @returns 1 for updated, 0 for not updated
+ *
+ * Must be called with ch->state_lock_lhb1 locked.
+ */
+static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
+{
+	int updated = 0;
+	int prev_state;
+
+	prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
+
+	if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
+		ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
+	else
+		ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
+
+	if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
+		smux_send_status_cmd(ch);
+		updated = 1;
+	}
+
+	return updated;
+}
 
 /**********************************************************************/
 /* Kernel API                                                         */
@@ -2646,17 +2838,30 @@
 	if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
 		ch->local_mode = SMUX_LCH_MODE_NORMAL;
 
-	/* Flow control */
+	/* RX Flow control */
 	if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
-		ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
-		ret = smux_send_status_cmd(ch);
-		tx_ready = 1;
+		ch->rx_flow_control_client = 1;
+		tx_ready |= smux_rx_flow_control_updated(ch);
 	}
 
 	if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
-		ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
-		ret = smux_send_status_cmd(ch);
-		tx_ready = 1;
+		ch->rx_flow_control_client = 0;
+		tx_ready |= smux_rx_flow_control_updated(ch);
+	}
+
+	/* Auto RX Flow Control */
+	if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
+		SMUX_DBG("%s: auto rx flow control option enabled\n",
+			__func__);
+		ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
+	}
+
+	if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
+		SMUX_DBG("%s: auto rx flow control option disabled\n",
+			__func__);
+		ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
+		ch->rx_flow_control_auto = 0;
+		tx_ready |= smux_rx_flow_control_updated(ch);
 	}
 
 	spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
@@ -2880,16 +3085,16 @@
 	/* verify high watermark */
 	SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
 
-	if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
+	if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
 		pr_err("%s: ch %d high watermark %d exceeded %d\n",
-				__func__, lcid, SMUX_WM_HIGH,
+				__func__, lcid, SMUX_TX_WM_HIGH,
 				ch->tx_pending_data_cnt);
 		ret = -EAGAIN;
 		goto out_inner;
 	}
 
 	/* queue packet for transmit */
-	if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
+	if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
 		ch->notify_lwm = 1;
 		pr_err("%s: high watermark hit\n", __func__);
 		schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
@@ -2936,7 +3141,7 @@
 	ch = &smux_lch[lcid];
 
 	spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
-	if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
+	if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
 		is_full = 1;
 	spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
 
@@ -2964,7 +3169,7 @@
 	ch = &smux_lch[lcid];
 
 	spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
-	if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
+	if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
 		is_low = 1;
 	spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
 
@@ -3264,7 +3469,7 @@
 	spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
 
 	if (power_up_uart)
-		smux_uart_power_on();
+		smux_uart_power_on_atomic();
 
 	/* Disconnect from TTY */
 	smux.tty = NULL;
@@ -3385,6 +3590,7 @@
 	smux.power_state = SMUX_PWR_OFF;
 	smux.pwr_wakeup_delay_us = 1;
 	smux.powerdown_enabled = 0;
+	smux.power_ctl_remote_req_received = 0;
 	INIT_LIST_HEAD(&smux.power_queue);
 	smux.rx_activity_flag = 0;
 	smux.tx_activity_flag = 0;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 72bc8de..c483bb45 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -73,6 +73,7 @@
 #ifdef CONFIG_SERIAL_MSM_RX_WAKEUP
 	struct msm_wakeup wakeup;
 #endif
+	int			uim;
 };
 
 #define UART_TO_MSM(uart_port)	((struct msm_port *) uart_port)
@@ -500,7 +501,22 @@
 	msm_port->clk_state = MSM_CLK_ON;
 #endif
 
-	if (port->uartclk == 19200000) {
+	if (msm_port->uim) {
+		msm_write(port,
+			UART_SIM_CFG_UIM_TX_MODE |
+			UART_SIM_CFG_UIM_RX_MODE |
+			UART_SIM_CFG_STOP_BIT_LEN_N(1) |
+			UART_SIM_CFG_SIM_CLK_ON |
+			UART_SIM_CFG_SIM_CLK_STOP_HIGH |
+			UART_SIM_CFG_SIM_SEL,
+			UART_SIM_CFG);
+
+		/* (TCXO * 16) / (5 * 372) = TCXO * 16 / 1860 */
+		msm_write(port, 0x08, UART_MREG);
+		msm_write(port, 0x19, UART_NREG);
+		msm_write(port, 0xe8, UART_DREG);
+		msm_write(port, 0x0e, UART_MNDREG);
+	} else if (port->uartclk == 19200000) {
 		/* clock is TCXO (19.2MHz) */
 		msm_write(port, 0x06, UART_MREG);
 		msm_write(port, 0xF1, UART_NREG);
@@ -603,6 +619,11 @@
 {
 	struct msm_port *msm_port = UART_TO_MSM(port);
 
+	if (msm_port->uim)
+		msm_write(port,
+			UART_SIM_CFG_SIM_CLK_STOP_HIGH,
+			UART_SIM_CFG);
+
 	msm_port->imr = 0;
 	msm_write(port, 0, UART_IMR); /* disable interrupts */
 
@@ -1040,6 +1061,39 @@
 	return uart_add_one_port(&msm_uart_driver, port);
 }
 
+static int __init msm_uim_probe(struct platform_device *pdev)
+{
+	struct msm_port *msm_port;
+	struct resource *resource;
+	struct uart_port *port;
+	int irq;
+
+	if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
+		return -ENXIO;
+
+	pr_info("msm_uim: detected port #%d\n", pdev->id);
+
+	port = get_port_from_line(pdev->id);
+	port->dev = &pdev->dev;
+	msm_port = UART_TO_MSM(port);
+
+	msm_port->uim = true;
+
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (unlikely(!resource))
+		return -ENXIO;
+	port->mapbase = resource->start;
+
+	irq = platform_get_irq(pdev, 0);
+	if (unlikely(irq < 0))
+		return -ENXIO;
+	port->irq = irq;
+
+	platform_set_drvdata(pdev, port);
+
+	return uart_add_one_port(&msm_uart_driver, port);
+}
+
 static int __devexit msm_serial_remove(struct platform_device *pdev)
 {
 	struct msm_port *msm_port = platform_get_drvdata(pdev);
@@ -1125,6 +1179,14 @@
 	},
 };
 
+static struct platform_driver msm_platform_uim_driver = {
+	.remove = msm_serial_remove,
+	.driver = {
+		.name = "msm_uim",
+		.owner = THIS_MODULE,
+	},
+};
+
 static int __init msm_serial_init(void)
 {
 	int ret;
@@ -1137,6 +1199,8 @@
 	if (unlikely(ret))
 		uart_unregister_driver(&msm_uart_driver);
 
+	platform_driver_probe(&msm_platform_uim_driver, msm_uim_probe);
+
 	printk(KERN_INFO "msm_serial: driver initialized\n");
 
 	return ret;
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index 65d0e30..a769825 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -1,7 +1,7 @@
 /*
  * Copyright (C) 2007 Google, Inc.
  * Author: Robert Love <rlove@google.com>
- * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -100,6 +100,16 @@
 #define UART_DREG		0x0030
 #define UART_MNDREG		0x0034
 #define UART_IRDA		0x0038
+
+#define UART_SIM_CFG			0x003c
+#define UART_SIM_CFG_UIM_TX_MODE	(1 << 17)
+#define UART_SIM_CFG_UIM_RX_MODE	(1 << 16)
+#define UART_SIM_CFG_STOP_BIT_LEN_N(n)	((n) << 8)
+#define UART_SIM_CFG_SIM_CLK_ON		(1 << 7)
+#define UART_SIM_CFG_SIM_CLK_TD8_SEL	(1 << 6)
+#define UART_SIM_CFG_SIM_CLK_STOP_HIGH	(1 << 5)
+#define UART_SIM_CFG_SIM_SEL		(1 << 0)
+
 #define UART_MISR_MODE		0x0040
 #define UART_MISR_RESET		0x0044
 #define UART_MISR_EXPORT	0x0048
diff --git a/drivers/tty/serial/msm_serial_hs_lite.c b/drivers/tty/serial/msm_serial_hs_lite.c
index 5735534..987008d 100644
--- a/drivers/tty/serial/msm_serial_hs_lite.c
+++ b/drivers/tty/serial/msm_serial_hs_lite.c
@@ -122,9 +122,15 @@
 	},
 	{}
 };
+
+#ifdef CONFIG_SERIAL_MSM_HSL_CONSOLE
+static int get_console_state(struct uart_port *port);
+#else
+static inline int get_console_state(struct uart_port *port) { return -ENODEV; };
+#endif
+
 static struct dentry *debug_base;
 static inline void wait_for_xmitr(struct uart_port *port);
-static int get_console_state(struct uart_port *port);
 static inline void msm_hsl_write(struct uart_port *port,
 				 unsigned int val, unsigned int off)
 {
@@ -1113,7 +1119,9 @@
 	if (!(msm_hsl_read(port, regmap[vid][UARTDM_SR]) &
 			UARTDM_SR_TXEMT_BMSK)) {
 		while (!(msm_hsl_read(port, regmap[vid][UARTDM_ISR]) &
-			UARTDM_ISR_TX_READY_BMSK)) {
+			UARTDM_ISR_TX_READY_BMSK) &&
+		       !(msm_hsl_read(port, regmap[vid][UARTDM_SR]) &
+			UARTDM_SR_TXEMT_BMSK)) {
 			udelay(1);
 			touch_nmi_watchdog();
 			cpu_relax();
diff --git a/drivers/tty/smux_ctl.c b/drivers/tty/smux_ctl.c
index 69adbf3..2b8f028 100644
--- a/drivers/tty/smux_ctl.c
+++ b/drivers/tty/smux_ctl.c
@@ -49,15 +49,6 @@
 
 static uint32_t smux_ctl_ch_id[] = {
 	SMUX_DATA_CTL_0,
-	SMUX_DATA_CTL_1,
-	SMUX_DATA_CTL_2,
-	SMUX_DATA_CTL_3,
-	SMUX_DATA_CTL_4,
-	SMUX_DATA_CTL_5,
-	SMUX_DATA_CTL_6,
-	SMUX_DATA_CTL_7,
-	SMUX_USB_RMNET_CTL_0,
-	SMUX_CSVT_CTL_0
 };
 
 #define SMUX_CTL_NUM_CHANNELS ARRAY_SIZE(smux_ctl_ch_id)
@@ -78,6 +69,7 @@
 	uint32_t read_avail;
 	struct list_head rx_list;
 
+	int abort_wait;
 	wait_queue_head_t read_wait_queue;
 	wait_queue_head_t write_wait_queue;
 
@@ -359,7 +351,8 @@
 
 		r = wait_event_interruptible_timeout(
 				devp->write_wait_queue,
-				(devp->state == SMUX_CONNECTED),
+				(devp->state == SMUX_CONNECTED ||
+				 devp->abort_wait),
 				(5 * HZ));
 		if (r == 0)
 			r = -ETIMEDOUT;
@@ -372,6 +365,13 @@
 			msm_smux_close(devp->id);
 			return r;
 
+		} else if (devp->abort_wait) {
+			pr_err("%s: %s: Open command aborted\n",
+					SMUX_CTL_MODULE_NAME, __func__);
+			r = -EIO;
+			atomic_dec(&devp->ref_count);
+			msm_smux_close(devp->id);
+			return r;
 		} else if (devp->state != SMUX_CONNECTED) {
 			pr_err(SMUX_CTL_MODULE_NAME ": %s: "
 				"Invalid open notification\n", __func__);
@@ -440,8 +440,9 @@
 
 	if (signal_pending(current))
 		r = -ERESTARTSYS;
-
-	if (smux_ctl_devp[dev_index]->state == SMUX_DISCONNECTED &&
+	else if (smux_ctl_devp[dev_index]->abort_wait)
+		r = -ENETRESET;
+	else if (smux_ctl_devp[dev_index]->state == SMUX_DISCONNECTED &&
 	    smux_ctl_devp[dev_index]->is_channel_reset != 0)
 		r = -ENETRESET;
 
@@ -560,6 +561,9 @@
 
 	if (signal_pending(current))
 		r = -ERESTARTSYS;
+
+	else if (smux_ctl_devp[dev_index]->abort_wait)
+		r = -ENETRESET;
 	else if (smux_ctl_devp[dev_index]->state == SMUX_DISCONNECTED &&
 	    smux_ctl_devp[dev_index]->is_channel_reset != 0)
 		r = -ENETRESET;
@@ -645,6 +649,13 @@
 
 	r = wait_event_interruptible(devp->write_wait_queue,
 			0 != (write_err = smux_ctl_writeable(id)));
+
+	if (-EIO == r) {
+		pr_err("%s: %s: wait_event_interruptible ret %i\n",
+				SMUX_CTL_MODULE_NAME, __func__, r);
+		return -EIO;
+	}
+
 	if (r < 0) {
 		pr_err(SMUX_CTL_MODULE_NAME " :%s: wait_event_interruptible "
 				"ret %i\n", __func__, r);
@@ -699,6 +710,25 @@
 	.unlocked_ioctl = smux_ctl_ioctl,
 };
 
+static void smux_ctl_reset_channel(struct smux_ctl_dev *devp)
+{
+	devp->is_high_wm = 0;
+	devp->write_pending = 0;
+	devp->is_channel_reset = 0;
+	devp->state = SMUX_DISCONNECTED;
+	devp->read_avail = 0;
+
+	devp->stats.bytes_tx = 0;
+	devp->stats.bytes_rx = 0;
+	devp->stats.pkts_tx = 0;
+	devp->stats.pkts_rx = 0;
+	devp->stats.cnt_ssr = 0;
+	devp->stats.cnt_read_fail = 0;
+	devp->stats.cnt_write_fail = 0;
+	devp->stats.cnt_high_wm_hit = 0;
+	devp->abort_wait = 0;
+}
+
 static int smux_ctl_probe(struct platform_device *pdev)
 {
 	int i;
@@ -706,6 +736,27 @@
 
 	SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
 
+	if (smux_ctl_inited) {
+		/* Already loaded once - reinitialize channels */
+		for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+			struct smux_ctl_dev *devp = smux_ctl_devp[i];
+
+			smux_ctl_reset_channel(devp);
+
+			if (atomic_read(&devp->ref_count)) {
+				r = msm_smux_open(devp->id,
+						devp,
+						smux_ctl_notify_cb,
+						smux_ctl_get_rx_buf_cb);
+				if (r)
+					pr_err("%s: unable to reopen ch %d, ret %d\n",
+							__func__, devp->id, r);
+			}
+		}
+		return 0;
+	}
+
+	/* Create character devices */
 	for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
 		smux_ctl_devp[i] = kzalloc(sizeof(struct smux_ctl_dev),
 							GFP_KERNEL);
@@ -718,26 +769,13 @@
 
 		smux_ctl_devp[i]->id = smux_ctl_ch_id[i];
 		atomic_set(&smux_ctl_devp[i]->ref_count, 0);
-		smux_ctl_devp[i]->is_high_wm = 0;
-		smux_ctl_devp[i]->write_pending = 0;
-		smux_ctl_devp[i]->is_channel_reset = 0;
-		smux_ctl_devp[i]->state = SMUX_DISCONNECTED;
-		smux_ctl_devp[i]->read_avail = 0;
-
-		smux_ctl_devp[i]->stats.bytes_tx = 0;
-		smux_ctl_devp[i]->stats.bytes_rx = 0;
-		smux_ctl_devp[i]->stats.pkts_tx = 0;
-		smux_ctl_devp[i]->stats.pkts_rx = 0;
-		smux_ctl_devp[i]->stats.cnt_ssr = 0;
-		smux_ctl_devp[i]->stats.cnt_read_fail = 0;
-		smux_ctl_devp[i]->stats.cnt_write_fail = 0;
-		smux_ctl_devp[i]->stats.cnt_high_wm_hit = 0;
 
 		mutex_init(&smux_ctl_devp[i]->dev_lock);
 		init_waitqueue_head(&smux_ctl_devp[i]->read_wait_queue);
 		init_waitqueue_head(&smux_ctl_devp[i]->write_wait_queue);
 		mutex_init(&smux_ctl_devp[i]->rx_lock);
 		INIT_LIST_HEAD(&smux_ctl_devp[i]->rx_list);
+		smux_ctl_reset_channel(smux_ctl_devp[i]);
 	}
 
 	r = alloc_chrdev_region(&smux_ctl_number, 0, SMUX_CTL_NUM_CHANNELS,
@@ -761,7 +799,8 @@
 		cdev_init(&smux_ctl_devp[i]->cdev, &smux_ctl_fops);
 		smux_ctl_devp[i]->cdev.owner = THIS_MODULE;
 
-		r = cdev_add(&smux_ctl_devp[i]->cdev, (smux_ctl_number + i), 1);
+		r = cdev_add(&smux_ctl_devp[i]->cdev,
+				(smux_ctl_number + i), 1);
 
 		if (IS_ERR_VALUE(r)) {
 			pr_err(SMUX_CTL_MODULE_NAME ": %s: "
@@ -818,15 +857,32 @@
 	SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
 
 	for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
-		cdev_del(&smux_ctl_devp[i]->cdev);
-		kfree(smux_ctl_devp[i]);
-		device_destroy(smux_ctl_classp,
-			MKDEV(MAJOR(smux_ctl_number), i));
-	}
-	class_destroy(smux_ctl_classp);
-	unregister_chrdev_region(MAJOR(smux_ctl_number),
-			SMUX_CTL_NUM_CHANNELS);
+		struct smux_ctl_dev *devp = smux_ctl_devp[i];
 
+		mutex_lock(&devp->dev_lock);
+		devp->abort_wait = 1;
+		wake_up(&devp->write_wait_queue);
+		wake_up(&devp->read_wait_queue);
+		mutex_unlock(&devp->dev_lock);
+
+		/* Empty RX queue */
+		mutex_lock(&devp->rx_lock);
+		while (!list_empty(&devp->rx_list)) {
+			struct smux_ctl_list_elem *list_elem;
+
+			list_elem = list_first_entry(
+					&devp->rx_list,
+					struct smux_ctl_list_elem,
+					list);
+			list_del(&list_elem->list);
+			kfree(list_elem->ctl_pkt.data);
+			kfree(list_elem);
+		}
+		devp->read_avail = 0;
+		mutex_unlock(&devp->rx_lock);
+	}
+
+	SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Ends\n", __func__);
 	return 0;
 }
 
@@ -841,8 +897,6 @@
 
 static int __init smux_ctl_init(void)
 {
-	msm_smux_ctl_debug_mask = MSM_SMUX_CTL_DEBUG | MSM_SMUX_CTL_DUMP_BUFFER;
-
 	SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
 	return platform_driver_register(&smux_ctl_driver);
 }
diff --git a/drivers/tty/smux_private.h b/drivers/tty/smux_private.h
index f644ff0..353c762 100644
--- a/drivers/tty/smux_private.h
+++ b/drivers/tty/smux_private.h
@@ -32,6 +32,10 @@
 
 /* Maximum number of packets in retry queue */
 #define SMUX_RX_RETRY_MAX_PKTS 32
+#define SMUX_RX_WM_HIGH        16
+#define SMUX_RX_WM_LOW          4
+#define SMUX_TX_WM_LOW          2
+#define SMUX_TX_WM_HIGH         4
 
 struct tty_struct;
 
diff --git a/drivers/tty/smux_test.c b/drivers/tty/smux_test.c
index 62e9465..e488a63 100644
--- a/drivers/tty/smux_test.c
+++ b/drivers/tty/smux_test.c
@@ -43,37 +43,46 @@
  * @failed - set to true if test fails
  */
 #define UT_ASSERT_INT(a, cmp, b) \
-	if (!((a)cmp(b))) { \
+	{ \
+	int a_tmp = (a); \
+	int b_tmp = (b); \
+	if (!((a_tmp)cmp(b_tmp))) { \
 		i += scnprintf(buf + i, max - i, \
 			"%s:%d Fail: " #a "(%d) " #cmp " " #b "(%d)\n", \
 				__func__, __LINE__, \
-				a, b); \
+				a_tmp, b_tmp); \
 		failed = 1; \
 		break; \
 	} \
-	do {} while (0)
+	}
 
 #define UT_ASSERT_PTR(a, cmp, b) \
-	if (!((a)cmp(b))) { \
+	{ \
+	void *a_tmp = (a); \
+	void *b_tmp = (b); \
+	if (!((a_tmp)cmp(b_tmp))) { \
 		i += scnprintf(buf + i, max - i, \
 			"%s:%d Fail: " #a "(%p) " #cmp " " #b "(%p)\n", \
 				__func__, __LINE__, \
-				a, b); \
+				a_tmp, b_tmp); \
 		failed = 1; \
 		break; \
 	} \
-	do {} while (0)
+	}
 
 #define UT_ASSERT_UINT(a, cmp, b) \
-	if (!((a)cmp(b))) { \
+	{ \
+	unsigned a_tmp = (a); \
+	unsigned b_tmp = (b); \
+	if (!((a_tmp)cmp(b_tmp))) { \
 		i += scnprintf(buf + i, max - i, \
 			"%s:%d Fail: " #a "(%u) " #cmp " " #b "(%u)\n", \
 				__func__, __LINE__, \
-				a, b); \
+				a_tmp, b_tmp); \
 		failed = 1; \
 		break; \
 	} \
-	do {} while (0)
+	}
 
 /**
  * In-range unit test assertion for test cases.
@@ -94,16 +103,20 @@
  * @failed - set to true if test fails
  */
 #define UT_ASSERT_INT_IN_RANGE(a, minv, maxv) \
-	if (((a) < (minv)) || ((a) > (maxv))) { \
+	{ \
+	int a_tmp = (a); \
+	int minv_tmp = (minv); \
+	int maxv_tmp = (maxv); \
+	if (((a_tmp) < (minv_tmp)) || ((a_tmp) > (maxv_tmp))) { \
 		i += scnprintf(buf + i, max - i, \
 			"%s:%d Fail: " #a "(%d) < " #minv "(%d) or " \
 				 #a "(%d) > " #maxv "(%d)\n", \
 				__func__, __LINE__, \
-				a, minv, a, maxv); \
+				a_tmp, minv_tmp, a_tmp, maxv_tmp); \
 		failed = 1; \
 		break; \
 	} \
-	do {} while (0)
+	}
 
 
 static unsigned char test_array[] = {1, 1, 2, 3, 5, 8, 13, 21, 34, 55,
@@ -172,6 +185,8 @@
 	int event_disconnected_ssr;
 	int event_low_wm;
 	int event_high_wm;
+	int event_rx_retry_high_wm;
+	int event_rx_retry_low_wm;
 
 	/* TIOCM changes */
 	int event_tiocm;
@@ -222,6 +237,8 @@
 	cb->event_disconnected_ssr = 0;
 	cb->event_low_wm = 0;
 	cb->event_high_wm = 0;
+	cb->event_rx_retry_high_wm = 0;
+	cb->event_rx_retry_low_wm = 0;
 	cb->event_tiocm = 0;
 	cb->tiocm_meta.tiocm_old = 0;
 	cb->tiocm_meta.tiocm_new = 0;
@@ -282,15 +299,17 @@
 		"\tevent_disconnected_ssr=%d\n"
 		"\tevent_low_wm=%d\n"
 		"\tevent_high_wm=%d\n"
+		"\tevent_rx_retry_high_wm=%d\n"
+		"\tevent_rx_retry_low_wm=%d\n"
 		"\tevent_tiocm=%d\n"
 		"\tevent_read_done=%d\n"
 		"\tevent_read_failed=%d\n"
-		"\tread_events=%d\n"
+		"\tread_events empty=%d\n"
 		"\tget_rx_retry=%d\n"
-		"\tget_rx_retry_events=%d\n"
+		"\tget_rx_retry_events empty=%d\n"
 		"\tevent_write_done=%d\n"
 		"\tevent_write_failed=%d\n"
-		"\twrite_events=%d\n",
+		"\twrite_events empty=%d\n",
 		cb->cb_count,
 		cb->cb_completion.done,
 		cb->event_connected,
@@ -298,12 +317,14 @@
 		cb->event_disconnected_ssr,
 		cb->event_low_wm,
 		cb->event_high_wm,
+		cb->event_rx_retry_high_wm,
+		cb->event_rx_retry_low_wm,
 		cb->event_tiocm,
 		cb->event_read_done,
 		cb->event_read_failed,
-		!list_empty(&cb->read_events),
+		list_empty(&cb->read_events),
 		cb->get_rx_buff_retry_count,
-		!list_empty(&cb->get_rx_buff_retry_events),
+		list_empty(&cb->get_rx_buff_retry_events),
 		cb->event_write_done,
 		cb->event_write_failed,
 		list_empty(&cb->write_events)
@@ -416,6 +437,19 @@
 		spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
 		break;
 
+	case SMUX_RX_RETRY_HIGH_WM_HIT:
+		spin_lock_irqsave(&cb_data_ptr->lock, flags);
+		++cb_data_ptr->event_rx_retry_high_wm;
+		spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
+		break;
+
+	case SMUX_RX_RETRY_LOW_WM_HIT:
+		spin_lock_irqsave(&cb_data_ptr->lock, flags);
+		++cb_data_ptr->event_rx_retry_low_wm;
+		spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
+		break;
+
+
 	case SMUX_TIOCM_UPDATE:
 		spin_lock_irqsave(&cb_data_ptr->lock, flags);
 		++cb_data_ptr->event_tiocm;
@@ -1315,7 +1349,7 @@
 		/* open port for loopback */
 		ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
 				SMUX_CH_OPTION_LOCAL_LOOPBACK,
-				0);
+				SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP);
 		UT_ASSERT_INT(ret, ==, 0);
 
 		ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
@@ -1568,6 +1602,132 @@
 	return i;
 }
 
+/**
+ * Verify get_rx_buffer callback retry for auto-rx flow control.
+ *
+ * @buf  Buffer for status message
+ * @max  Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_local_get_rx_buff_retry_auto(char *buf, int max)
+{
+	static struct smux_mock_callback cb_data;
+	static int cb_initialized;
+	int i = 0;
+	int failed = 0;
+	int ret;
+	int try;
+	int try_rx_retry_wm;
+
+	i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+	pr_err("%s", buf);
+
+	if (!cb_initialized)
+		mock_cb_data_init(&cb_data);
+
+	mock_cb_data_reset(&cb_data);
+	smux_byte_loopback = SMUX_TEST_LCID;
+	while (!failed) {
+		/* open port for loopback */
+		ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+				SMUX_CH_OPTION_LOCAL_LOOPBACK
+				| SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP,
+				0);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
+				smux_mock_cb, get_rx_buffer_mock);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(
+				(int)wait_for_completion_timeout(
+					&cb_data.cb_completion, HZ), >, 0);
+		UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+		mock_cb_data_reset(&cb_data);
+
+		/* Test high rx-retry watermark */
+		get_rx_buffer_mock_fail = 1;
+		try_rx_retry_wm = 0;
+		for (try = 0; try < SMUX_RX_RETRY_MAX_PKTS; ++try) {
+			pr_err("%s: try %d\n", __func__, try);
+			ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
+						test_array, sizeof(test_array));
+			UT_ASSERT_INT(ret, ==, 0);
+			if (failed)
+				break;
+
+			if (!try_rx_retry_wm &&
+					cb_data.event_rx_retry_high_wm) {
+				/* RX high watermark hit */
+				try_rx_retry_wm = try + 1;
+				break;
+			}
+
+			while (cb_data.event_write_done <= try) {
+				UT_ASSERT_INT(
+					(int)wait_for_completion_timeout(
+						&cb_data.cb_completion, HZ),
+					>, 0);
+				INIT_COMPLETION(cb_data.cb_completion);
+			}
+			if (failed)
+				break;
+		}
+		if (failed)
+			break;
+
+		/* RX retry high watermark should have been set */
+		UT_ASSERT_INT(cb_data.event_rx_retry_high_wm, ==, 1);
+		UT_ASSERT_INT(try_rx_retry_wm, ==, SMUX_RX_WM_HIGH);
+
+		/*
+		 * Disabled RX buffer allocation failure and wait for
+		 * the SMUX_RX_WM_HIGH count successful packets.
+		 */
+		get_rx_buffer_mock_fail = 0;
+		while (cb_data.event_read_done < SMUX_RX_WM_HIGH) {
+			UT_ASSERT_INT(
+				(int)wait_for_completion_timeout(
+					&cb_data.cb_completion, 2*HZ),
+				>, 0);
+			INIT_COMPLETION(cb_data.cb_completion);
+		}
+		if (failed)
+			break;
+
+		UT_ASSERT_INT(0, ==, cb_data.event_read_failed);
+		UT_ASSERT_INT(SMUX_RX_WM_HIGH, ==,
+				cb_data.event_read_done);
+		UT_ASSERT_INT(cb_data.event_rx_retry_low_wm, ==, 1);
+		mock_cb_data_reset(&cb_data);
+
+		/* close port */
+		ret = msm_smux_close(SMUX_TEST_LCID);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+				&cb_data.cb_completion, HZ),
+			>, 0);
+		UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+		UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
+		break;
+	}
+
+	if (!failed) {
+		i += scnprintf(buf + i, max - i, "\tOK\n");
+	} else {
+		pr_err("%s: Failed\n", __func__);
+		i += scnprintf(buf + i, max - i, "\tFailed\n");
+		i += mock_cb_data_print(&cb_data, buf + i, max - i);
+		msm_smux_close(SMUX_TEST_LCID);
+	}
+	smux_byte_loopback = 0;
+	mock_cb_data_reset(&cb_data);
+	return i;
+}
+
 static char debug_buffer[DEBUG_BUFMAX];
 
 static ssize_t debug_read(struct file *file, char __user *buf,
@@ -1631,6 +1791,8 @@
 			smux_ut_local_smuxld_receive_buf);
 	debug_create("ut_local_get_rx_buff_retry", 0444, dent,
 			smux_ut_local_get_rx_buff_retry);
+	debug_create("ut_local_get_rx_buff_retry_auto", 0444, dent,
+			smux_ut_local_get_rx_buff_retry_auto);
 
 	return 0;
 }
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index ceefb23..3e15ea7 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -53,6 +53,7 @@
 # some non-PCI HCDs implement xHCI
 config USB_ARCH_HAS_XHCI
 	boolean
+	default y if ARCH_MSM8974
 	default PCI
 
 menuconfig USB_SUPPORT
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 5dceb41..d97d548 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2808,6 +2808,7 @@
 int usb_remote_wakeup(struct usb_device *udev)
 {
 	int	status = 0;
+	struct usb_hcd *hcd = bus_to_hcd(udev->bus);
 
 	if (udev->state == USB_STATE_SUSPENDED) {
 		dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-");
@@ -2816,7 +2817,11 @@
 			/* Let the drivers do their thing, then... */
 			usb_autosuspend_device(udev);
 		}
+	} else {
+		dev_dbg(&udev->dev, "usb not suspended\n");
+		clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
 	}
+
 	return status;
 }
 
@@ -3152,7 +3157,9 @@
 	 * value.
 	 */
 	for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
-		if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) {
+		if (USE_NEW_SCHEME(retry_counter) &&
+			!(hcd->driver->flags & HCD_USB3) &&
+			!(hcd->driver->flags & HCD_OLD_ENUM)) {
 			struct usb_device_descriptor *buf;
 			int r = 0;
 
@@ -3252,7 +3259,9 @@
 			 *  - read ep0 maxpacket even for high and low speed,
 			 */
 			msleep(10);
-			if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3))
+			if (USE_NEW_SCHEME(retry_counter) &&
+				!(hcd->driver->flags & HCD_USB3) &&
+				!(hcd->driver->flags & HCD_OLD_ENUM))
 				break;
   		}
 
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index d216f17..05f1a60 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -147,13 +147,6 @@
 #define USB_PHY_VDD_DIG_VOL_MIN		1045000 /* uV */
 #define USB_PHY_VDD_DIG_VOL_MAX		1320000 /* uV */
 
-enum usb_vdd_value {
-	VDD_NONE = 0,
-	VDD_MIN,
-	VDD_MAX,
-	VDD_VAL_MAX,
-};
-
 static const int vdd_val[VDD_TYPE_MAX][VDD_VAL_MAX] = {
 		{  /* VDD_CX CORNER Voting */
 			[VDD_NONE]	= RPM_VREG_CORNER_NONE,
diff --git a/drivers/usb/dwc3/dwc3_otg.c b/drivers/usb/dwc3/dwc3_otg.c
index 5df030a..23b582d 100644
--- a/drivers/usb/dwc3/dwc3_otg.c
+++ b/drivers/usb/dwc3/dwc3_otg.c
@@ -579,9 +579,11 @@
 		return -ENOMEM;
 	}
 
-	dotg->irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+	/* DWC3 has separate IRQ line for OTG events (ID/BSV etc.) */
+	dotg->irq = platform_get_irq_byname(to_platform_device(dwc->dev),
+								"otg_irq");
 	if (dotg->irq < 0) {
-		dev_err(dwc->dev, "%s: missing IRQ\n", __func__);
+		dev_err(dwc->dev, "%s: missing OTG IRQ\n", __func__);
 		ret = -ENODEV;
 		goto err1;
 	}
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 87b307c..c95f82d 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -123,13 +123,7 @@
 #   - discrete ones (including all PCI-only controllers)
 #   - debug/dummy gadget+hcd is last.
 #
-choice
-	prompt "USB Peripheral Controller"
-	help
-	   A USB device uses a controller to talk to its host.
-	   Systems should have only one such upstream link.
-	   Many controller drivers are platform-specific; these
-	   often need board-specific hooks.
+menu "USB Peripheral Controller"
 
 #
 # Integrated controllers
@@ -574,7 +568,7 @@
 # NOTE:  Please keep dummy_hcd LAST so that "real hardware" appears
 # first and will be selected by default.
 
-endchoice
+endmenu
 
 # Selected by UDC drivers that support high-speed operation.
 config USB_GADGET_DUALSPEED
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index b5a7291..a13b5da 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -1801,6 +1801,9 @@
 		goto err_dev;
 	}
 
+	if (pdata)
+		composite_driver.usb_core_id = pdata->usb_core_id;
+
 	ret = usb_composite_probe(&android_usb_driver, android_bind);
 	if (ret) {
 		pr_err("%s(): Failed to register android "
diff --git a/drivers/usb/gadget/ci13xxx_msm_hsic.c b/drivers/usb/gadget/ci13xxx_msm_hsic.c
index f353b07..5d5ee00 100644
--- a/drivers/usb/gadget/ci13xxx_msm_hsic.c
+++ b/drivers/usb/gadget/ci13xxx_msm_hsic.c
@@ -635,7 +635,7 @@
 	struct resource *res;
 	struct msm_hsic_per *mhsic;
 	int ret = 0;
-	struct msm_hsic_peripheral_platform_data *pdata;
+	struct ci13xxx_platform_data *pdata;
 
 	dev_dbg(&pdev->dev, "msm-hsic probe\n");
 
@@ -654,7 +654,8 @@
 	the_mhsic = mhsic;
 	platform_set_drvdata(pdev, mhsic);
 	mhsic->dev = &pdev->dev;
-	mhsic->pdata = pdata;
+	mhsic->pdata =
+		(struct msm_hsic_peripheral_platform_data *)pdata->prv_data;
 
 	mhsic->irq = platform_get_irq(pdev, 0);
 	if (mhsic->irq < 0) {
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 8cdc2e9..18f0721 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -48,7 +48,6 @@
  * - Handle requests which spawns into several TDs
  * - GET_STATUS(device) - always reports 0
  * - Gadget API (majority of optional features)
- * - Suspend & Remote Wakeup
  */
 #include <linux/delay.h>
 #include <linux/device.h>
@@ -170,6 +169,8 @@
 #define CAP_ENDPTCTRL       (hw_bank.lpm ? 0x0ECUL : 0x080UL)
 #define CAP_LAST            (hw_bank.lpm ? 0x12CUL : 0x0C0UL)
 
+#define REMOTE_WAKEUP_DELAY	msecs_to_jiffies(200)
+
 /* maximum number of enpoints: valid only after hw_device_reset() */
 static unsigned hw_ep_max;
 
@@ -1523,6 +1524,24 @@
 	return ret;
 }
 
+static void usb_do_remote_wakeup(struct work_struct *w)
+{
+	struct ci13xxx *udc = _udc;
+	unsigned long flags;
+	bool do_wake;
+
+	/*
+	 * This work can not be canceled from interrupt handler. Check
+	 * if wakeup conditions are still met.
+	 */
+	spin_lock_irqsave(udc->lock, flags);
+	do_wake = udc->suspended && udc->remote_wakeup;
+	spin_unlock_irqrestore(udc->lock, flags);
+
+	if (do_wake)
+		ci13xxx_wakeup(&udc->gadget);
+}
+
 static ssize_t usb_remote_wakeup(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
@@ -1652,6 +1671,7 @@
 	unsigned i;
 	int ret = 0;
 	unsigned length = mReq->req.length;
+	struct ci13xxx *udc = _udc;
 
 	trace("%p, %p", mEp, mReq);
 
@@ -1728,6 +1748,18 @@
 		mReq->ptr->page[i] =
 			(mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;
 
+	/* Remote Wakeup */
+	if (udc->suspended) {
+		if (!udc->remote_wakeup) {
+			mReq->req.status = -EAGAIN;
+			dev_dbg(mEp->device, "%s: queue failed (suspend) ept #%d\n",
+				__func__, mEp->num);
+			return -EAGAIN;
+		}
+		usb_phy_set_suspend(udc->transceiver, 0);
+		schedule_delayed_work(&udc->rw_work, REMOTE_WAKEUP_DELAY);
+	}
+
 	if (!list_empty(&mEp->qh.queue)) {
 		struct ci13xxx_req *mReqPrev;
 		int n = hw_ep_bit(mEp->num, mEp->dir);
@@ -2207,8 +2239,11 @@
 	trace("%p", udc);
 
 	mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in;
-	udc->status->context = udc;
-	udc->status->complete = isr_setup_status_complete;
+	if (udc->status) {
+		udc->status->context = udc;
+		udc->status->complete = isr_setup_status_complete;
+	} else
+		return -EINVAL;
 
 	spin_unlock(mEp->lock);
 	retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
@@ -3299,6 +3334,8 @@
 		void __iomem *regs)
 {
 	struct ci13xxx *udc;
+	struct ci13xxx_platform_data *pdata =
+		(struct ci13xxx_platform_data *)(dev->platform_data);
 	int retval = 0, i;
 
 	trace("%p, %p, %p", dev, regs, driver->name);
@@ -3327,6 +3364,9 @@
 	INIT_LIST_HEAD(&udc->gadget.ep_list);
 	udc->gadget.ep0 = NULL;
 
+	if (pdata)
+		udc->gadget.usb_core_id = pdata->usb_core_id;
+
 	dev_set_name(&udc->gadget.dev, "gadget");
 	udc->gadget.dev.dma_mask = dev->dma_mask;
 	udc->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask;
@@ -3341,6 +3381,8 @@
 		}
 	}
 
+	INIT_DELAYED_WORK(&udc->rw_work, usb_do_remote_wakeup);
+
 	retval = hw_device_init(regs);
 	if (retval < 0)
 		goto put_transceiver;
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index 4376804..6527b76 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -149,6 +149,7 @@
 	u8                         configured;  /* is device configured */
 	u8                         test_mode;  /* the selected test mode */
 
+	struct delayed_work        rw_work;    /* remote wakeup delayed work */
 	struct usb_gadget_driver  *driver;     /* 3rd party gadget driver */
 	struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
 	int                        vbus_active; /* is VBUS active */
@@ -157,6 +158,11 @@
 	struct usb_phy            *transceiver; /* Transceiver struct */
 };
 
+struct ci13xxx_platform_data {
+	u8 usb_core_id;
+	void *prv_data;
+};
+
 /******************************************************************************
  * REGISTERS
  *****************************************************************************/
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 6e807cb..59ff8d7 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -449,6 +449,7 @@
 	struct f_rndis			*rndis = req->context;
 	struct usb_composite_dev	*cdev = rndis->port.func.config->cdev;
 	int				status;
+	rndis_init_msg_type		*buf;
 
 	/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
 //	spin_lock(&dev->lock);
@@ -456,6 +457,19 @@
 	if (status < 0)
 		ERROR(cdev, "RNDIS command error %d, %d/%d\n",
 			status, req->actual, req->length);
+
+	buf = (rndis_init_msg_type *)req->buf;
+
+	if (buf->MessageType == REMOTE_NDIS_INITIALIZE_MSG) {
+		if (buf->MaxTransferSize > 2048)
+			rndis->port.multi_pkt_xfer = 1;
+		else
+			rndis->port.multi_pkt_xfer = 0;
+		DBG(cdev, "%s: MaxTransferSize: %d : Multi_pkt_txr: %s\n",
+				__func__, buf->MaxTransferSize,
+				rndis->port.multi_pkt_xfer ? "enabled" :
+							    "disabled");
+	}
 //	spin_unlock(&dev->lock);
 }
 
diff --git a/drivers/usb/gadget/msm72k_udc.c b/drivers/usb/gadget/msm72k_udc.c
index a025d95..3e40552 100644
--- a/drivers/usb/gadget/msm72k_udc.c
+++ b/drivers/usb/gadget/msm72k_udc.c
@@ -119,6 +119,7 @@
 	unsigned long dTD_update_fail_count;
 	unsigned long false_prime_fail_count;
 	unsigned actual_prime_fail_count;
+	unsigned long dTD_workaround_fail_count;
 
 	unsigned wedged:1;
 	/* pointers to DMA transfer list area */
@@ -199,6 +200,7 @@
 	unsigned phy_fail_count;
 	unsigned prime_fail_count;
 	unsigned long dTD_update_fail_count;
+	unsigned long dTD_workaround_fail_count;
 
 	struct usb_gadget		gadget;
 	struct usb_gadget_driver	*driver;
@@ -1110,6 +1112,8 @@
 	struct msm_request *req;
 	unsigned long flags;
 	int req_dequeue = 1;
+	int dtd_update_fail_count_chk = 10;
+	int check_bit = 0;
 	unsigned info;
 
 	/*
@@ -1136,12 +1140,22 @@
 		/* if the transaction is still in-flight, stop here */
 		if (info & INFO_ACTIVE) {
 			if (req_dequeue) {
-				req_dequeue = 0;
 				ui->dTD_update_fail_count++;
 				ept->dTD_update_fail_count++;
-				udelay(10);
+				udelay(1);
+				if (!dtd_update_fail_count_chk--) {
+					req_dequeue = 0;
+					check_bit = 1;
+				}
 				goto dequeue;
 			} else {
+				if (check_bit) {
+					pr_debug("%s: Delay Workaround Failed\n",
+						 __func__);
+					check_bit = 0;
+					ui->dTD_workaround_fail_count++;
+					ept->dTD_workaround_fail_count++;
+				}
 				break;
 			}
 		}
@@ -1965,11 +1979,14 @@
 			continue;
 
 		i += scnprintf(buf + i, PAGE_SIZE - i,
-			"ept%d %s false_prime_count=%lu prime_fail_count=%d dtd_fail_count=%lu\n",
+			"ept%d %s false_prime_count=%lu prime_fail_count=%d "
+					 "dtd_fail_count=%lu "
+					 "dTD_workaround_fail_count=%lu\n",
 			ept->num, (ept->flags & EPT_FLAG_IN) ? "in " : "out",
 			ept->false_prime_fail_count,
 			ept->actual_prime_fail_count,
-			ept->dTD_update_fail_count);
+			ept->dTD_update_fail_count,
+			ept->dTD_workaround_fail_count);
 	}
 
 	i += scnprintf(buf + i, PAGE_SIZE - i,
@@ -1979,6 +1996,10 @@
 	i += scnprintf(buf + i, PAGE_SIZE - i,
 			   "prime_fail count: %d\n", ui->prime_fail_count);
 
+	i += scnprintf(buf + i, PAGE_SIZE - i,
+			   "dtd_workaround_fail count: %lu\n",
+			   ui->dTD_workaround_fail_count);
+
 	spin_unlock_irqrestore(&ui->lock, flags);
 
 	return simple_read_from_buffer(ubuf, count, ppos, buf, i);
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 0cb2121..16c4afb 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -585,12 +585,12 @@
 	resp->MinorVersion = cpu_to_le32(RNDIS_MINOR_VERSION);
 	resp->DeviceFlags = cpu_to_le32(RNDIS_DF_CONNECTIONLESS);
 	resp->Medium = cpu_to_le32(RNDIS_MEDIUM_802_3);
-	resp->MaxPacketsPerTransfer = cpu_to_le32(1);
-	resp->MaxTransferSize = cpu_to_le32(
-		  params->dev->mtu
+	resp->MaxPacketsPerTransfer = cpu_to_le32(TX_SKB_HOLD_THRESHOLD);
+	resp->MaxTransferSize = cpu_to_le32(TX_SKB_HOLD_THRESHOLD *
+		(params->dev->mtu
 		+ sizeof(struct ethhdr)
 		+ sizeof(struct rndis_packet_msg_type)
-		+ 22);
+		+ 22));
 	resp->PacketAlignmentFactor = cpu_to_le32(0);
 	resp->AFListOffset = cpu_to_le32(0);
 	resp->AFListSize = cpu_to_le32(0);
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 62955c2..78ab8b7 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -59,6 +59,11 @@
 	spinlock_t		req_lock;	/* guard {rx,tx}_reqs */
 	struct list_head	tx_reqs, rx_reqs;
 	unsigned		tx_qlen;
+/* Minimum number of TX USB request queued to UDC */
+#define TX_REQ_THRESHOLD	5
+	int			no_tx_req_used;
+	int			tx_skb_hold_count;
+	u32			tx_req_bufsize;
 
 	struct sk_buff_head	rx_frames;
 
@@ -86,7 +91,7 @@
 
 #ifdef CONFIG_USB_GADGET_DUALSPEED
 
-static unsigned qmult = 5;
+static unsigned qmult = 10;
 module_param(qmult, uint, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
 
@@ -457,6 +462,11 @@
 {
 	struct sk_buff	*skb = req->context;
 	struct eth_dev	*dev = ep->driver_data;
+	struct net_device *net = dev->net;
+	struct usb_request *new_req;
+	struct usb_ep *in;
+	int length;
+	int retval;
 
 	switch (req->status) {
 	default:
@@ -467,14 +477,73 @@
 	case -ESHUTDOWN:		/* disconnect etc */
 		break;
 	case 0:
-		dev->net->stats.tx_bytes += skb->len;
+		if (!req->zero)
+			dev->net->stats.tx_bytes += req->length-1;
+		else
+			dev->net->stats.tx_bytes += req->length;
 	}
 	dev->net->stats.tx_packets++;
 
 	spin_lock(&dev->req_lock);
-	list_add(&req->list, &dev->tx_reqs);
-	spin_unlock(&dev->req_lock);
-	dev_kfree_skb_any(skb);
+	list_add_tail(&req->list, &dev->tx_reqs);
+
+	if (dev->port_usb->multi_pkt_xfer) {
+		dev->no_tx_req_used--;
+		req->length = 0;
+		in = dev->port_usb->in_ep;
+
+		if (!list_empty(&dev->tx_reqs)) {
+			new_req = container_of(dev->tx_reqs.next,
+					struct usb_request, list);
+			list_del(&new_req->list);
+			spin_unlock(&dev->req_lock);
+			if (new_req->length > 0) {
+				length = new_req->length;
+
+				/* NCM requires no zlp if transfer is
+				 * dwNtbInMaxSize */
+				if (dev->port_usb->is_fixed &&
+					length == dev->port_usb->fixed_in_len &&
+					(length % in->maxpacket) == 0)
+					new_req->zero = 0;
+				else
+					new_req->zero = 1;
+
+				/* use zlp framing on tx for strict CDC-Ether
+				 * conformance, though any robust network rx
+				 * path ignores extra padding. and some hardware
+				 * doesn't like to write zlps.
+				 */
+				if (new_req->zero && !dev->zlp &&
+						(length % in->maxpacket) == 0) {
+					new_req->zero = 0;
+					length++;
+				}
+
+				new_req->length = length;
+				retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
+				switch (retval) {
+				default:
+					DBG(dev, "tx queue err %d\n", retval);
+					break;
+				case 0:
+					spin_lock(&dev->req_lock);
+					dev->no_tx_req_used++;
+					spin_unlock(&dev->req_lock);
+					net->trans_start = jiffies;
+				}
+			} else {
+				spin_lock(&dev->req_lock);
+				list_add(&new_req->list, &dev->tx_reqs);
+				spin_unlock(&dev->req_lock);
+			}
+		} else {
+			spin_unlock(&dev->req_lock);
+		}
+	} else {
+		spin_unlock(&dev->req_lock);
+		dev_kfree_skb_any(skb);
+	}
 
 	if (netif_carrier_ok(dev->net))
 		netif_wake_queue(dev->net);
@@ -485,6 +554,26 @@
 	return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
 }
 
+static void alloc_tx_buffer(struct eth_dev *dev)
+{
+	struct list_head	*act;
+	struct usb_request	*req;
+
+	dev->tx_req_bufsize = (TX_SKB_HOLD_THRESHOLD *
+				(dev->net->mtu
+				+ sizeof(struct ethhdr)
+				/* size of rndis_packet_msg_type */
+				+ 44
+				+ 22));
+
+	list_for_each(act, &dev->tx_reqs) {
+		req = container_of(act, struct usb_request, list);
+		if (!req->buf)
+			req->buf = kmalloc(dev->tx_req_bufsize,
+						GFP_ATOMIC);
+	}
+}
+
 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
 					struct net_device *net)
 {
@@ -511,6 +600,10 @@
 		return NETDEV_TX_OK;
 	}
 
+	/* Allocate memory for tx_reqs to support multi packet transfer */
+	if (dev->port_usb->multi_pkt_xfer && !dev->tx_req_bufsize)
+		alloc_tx_buffer(dev);
+
 	/* apply outgoing CDC or RNDIS filters */
 	if (!is_promisc(cdc_filter)) {
 		u8		*dest = skb->data;
@@ -565,11 +658,39 @@
 		spin_unlock_irqrestore(&dev->lock, flags);
 		if (!skb)
 			goto drop;
-
-		length = skb->len;
 	}
-	req->buf = skb->data;
-	req->context = skb;
+
+	spin_lock_irqsave(&dev->req_lock, flags);
+	dev->tx_skb_hold_count++;
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+
+	if (dev->port_usb->multi_pkt_xfer) {
+		memcpy(req->buf + req->length, skb->data, skb->len);
+		req->length = req->length + skb->len;
+		length = req->length;
+		dev_kfree_skb_any(skb);
+
+		spin_lock_irqsave(&dev->req_lock, flags);
+		if (dev->tx_skb_hold_count < TX_SKB_HOLD_THRESHOLD) {
+			if (dev->no_tx_req_used > TX_REQ_THRESHOLD) {
+				list_add(&req->list, &dev->tx_reqs);
+				spin_unlock_irqrestore(&dev->req_lock, flags);
+				goto success;
+			}
+		}
+
+		dev->no_tx_req_used++;
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+
+		spin_lock_irqsave(&dev->lock, flags);
+		dev->tx_skb_hold_count = 0;
+		spin_unlock_irqrestore(&dev->lock, flags);
+	} else {
+		length = skb->len;
+		req->buf = skb->data;
+		req->context = skb;
+	}
+
 	req->complete = tx_complete;
 
 	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
@@ -584,8 +705,10 @@
 	 * though any robust network rx path ignores extra padding.
 	 * and some hardware doesn't like to write zlps.
 	 */
-	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
+	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
+		req->zero = 0;
 		length++;
+	}
 
 	req->length = length;
 
@@ -593,7 +716,7 @@
 	if (gadget_is_dualspeed(dev->gadget) &&
 			 (dev->gadget->speed == USB_SPEED_HIGH)) {
 		dev->tx_qlen++;
-		if (dev->tx_qlen == qmult) {
+		if (dev->tx_qlen == (qmult/2)) {
 			req->no_interrupt = 0;
 			dev->tx_qlen = 0;
 		} else {
@@ -613,7 +736,8 @@
 	}
 
 	if (retval) {
-		dev_kfree_skb_any(skb);
+		if (!dev->port_usb->multi_pkt_xfer)
+			dev_kfree_skb_any(skb);
 drop:
 		dev->net->stats.tx_dropped++;
 		spin_lock_irqsave(&dev->req_lock, flags);
@@ -622,6 +746,7 @@
 		list_add(&req->list, &dev->tx_reqs);
 		spin_unlock_irqrestore(&dev->req_lock, flags);
 	}
+success:
 	return NETDEV_TX_OK;
 }
 
@@ -924,6 +1049,9 @@
 		dev->wrap = link->wrap;
 
 		spin_lock(&dev->lock);
+		dev->tx_skb_hold_count = 0;
+		dev->no_tx_req_used = 0;
+		dev->tx_req_bufsize = 0;
 		dev->port_usb = link;
 		link->ioport = dev;
 		if (netif_running(dev->net)) {
@@ -989,6 +1117,8 @@
 		list_del(&req->list);
 
 		spin_unlock(&dev->req_lock);
+		if (link->multi_pkt_xfer)
+			kfree(req->buf);
 		usb_ep_free_request(link->in_ep, req);
 		spin_lock(&dev->req_lock);
 	}
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index 37431f5..faa9a3b 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -53,6 +53,9 @@
 	bool				is_fixed;
 	u32				fixed_out_len;
 	u32				fixed_in_len;
+/* Max number of SKB packets to be used to create Multi Packet RNDIS */
+#define TX_SKB_HOLD_THRESHOLD		3
+	bool				multi_pkt_xfer;
 	struct sk_buff			*(*wrap)(struct gether *port,
 						struct sk_buff *skb);
 	int				(*unwrap)(struct gether *port,
diff --git a/drivers/usb/gadget/u_sdio.c b/drivers/usb/gadget/u_sdio.c
index 8c4b4c7..5e9b0ec 100644
--- a/drivers/usb/gadget/u_sdio.c
+++ b/drivers/usb/gadget/u_sdio.c
@@ -1140,18 +1140,6 @@
 			goto free_sdio_ports;
 		}
 
-#ifdef DEBUG
-		/* REVISIT: create one file per port
-		 * or do not create any file
-		 */
-		if (i == 0) {
-			ret = device_create_file(&g->dev, &dev_attr_input);
-			if (ret)
-				pr_err("%s: unable to create device file\n",
-						__func__);
-		}
-#endif
-
 	}
 
 	gsdio_debugfs_init();
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index e5e44f8..c16ff97 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -322,8 +322,9 @@
 
 	mutex_lock(&udc_lock);
 	list_for_each_entry(udc, &udc_list, list) {
-		/* For now we take the first one */
-		if (!udc->driver)
+		/* Match according to usb_core_id */
+		if (!udc->driver && udc->gadget &&
+		    udc->gadget->usb_core_id == driver->usb_core_id)
 			goto found;
 	}
 
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 6a6f6e5..7309438 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -977,6 +977,9 @@
 	/* PCI errors [4.15.2.4] */
 	if (unlikely ((status & STS_FATAL) != 0)) {
 		ehci_err(ehci, "fatal error\n");
+		if (hcd->driver->dump_regs)
+			hcd->driver->dump_regs(hcd);
+		panic("System error\n");
 		dbg_cmd(ehci, "fatal", cmd);
 		dbg_status(ehci, "fatal", status);
 		ehci_halt(ehci);
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index caf86ca..fff9465 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -380,6 +380,9 @@
 	ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
 	ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next);
 
+	/*CMD_RUN will be set after, PORT_RESUME gets cleared*/
+	if (ehci->resume_sof_bug)
+		ehci->command &= ~CMD_RUN;
 	/* restore CMD_RUN, framelist size, and irq threshold */
 	ehci_writel(ehci, ehci->command, &ehci->regs->command);
 	ehci->rh_state = EHCI_RH_RUNNING;
@@ -422,6 +425,17 @@
 		ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
 	}
 
+	if (ehci->resume_sof_bug && resume_needed) {
+		/* root hub has only one port.
+		 * PORT_RESUME gets cleared automatically. */
+		handshake(ehci, &ehci->regs->port_status[0], PORT_RESUME, 0,
+				20000);
+		ehci_writel(ehci, ehci_readl(ehci,
+				&ehci->regs->command) | CMD_RUN,
+				&ehci->regs->command);
+		goto skip_clear_resume;
+	}
+
 	/* msleep for 20ms only if code is trying to resume port */
 	if (resume_needed) {
 		spin_unlock_irq(&ehci->lock);
@@ -438,6 +452,8 @@
 			ehci_vdbg (ehci, "resumed port %d\n", i + 1);
 		}
 	}
+
+skip_clear_resume:
 	(void) ehci_readl(ehci, &ehci->regs->command);
 
 	/* maybe re-activate the schedule(s) */
@@ -823,7 +839,7 @@
 	u32 __iomem	*status_reg = &ehci->regs->port_status[
 				(wIndex & 0xff) - 1];
 	u32 __iomem	*hostpc_reg = NULL;
-	u32		temp, temp1, status;
+	u32		temp, temp1, status, cmd = 0;
 	unsigned long	flags;
 	int		retval = 0;
 	unsigned	selector;
@@ -1202,7 +1218,32 @@
 				ehci->reset_done [wIndex] = jiffies
 						+ msecs_to_jiffies (50);
 			}
+
+			if (ehci->reset_sof_bug && (temp & PORT_RESET)) {
+				cmd = ehci_readl(ehci, &ehci->regs->command);
+				cmd &= ~CMD_RUN;
+				ehci_writel(ehci, cmd, &ehci->regs->command);
+				if (handshake(ehci, &ehci->regs->status,
+						STS_HALT, STS_HALT, 16 * 125))
+					ehci_info(ehci,
+						"controller halt failed\n");
+			}
 			ehci_writel(ehci, temp, status_reg);
+			if (ehci->reset_sof_bug && (temp & PORT_RESET)
+				&& hcd->driver->enable_ulpi_control) {
+				hcd->driver->enable_ulpi_control(hcd,
+						PORT_RESET);
+				spin_unlock_irqrestore(&ehci->lock, flags);
+				usleep_range(50000, 55000);
+				if (handshake(ehci, status_reg,
+						PORT_RESET, 0, 10 * 1000))
+					ehci_info(ehci,
+						"failed to clear reset\n");
+				spin_lock_irqsave(&ehci->lock, flags);
+				hcd->driver->disable_ulpi_control(hcd);
+				cmd |= CMD_RUN;
+				ehci_writel(ehci, cmd, &ehci->regs->command);
+			}
 			break;
 
 		/* For downstream facing ports (these):  one hub port is put
diff --git a/drivers/usb/host/ehci-msm-hsic.c b/drivers/usb/host/ehci-msm-hsic.c
index a6b7dee..e49e2a0 100644
--- a/drivers/usb/host/ehci-msm-hsic.c
+++ b/drivers/usb/host/ehci-msm-hsic.c
@@ -45,6 +45,8 @@
 #include <mach/rpm-regulator.h>
 
 #define MSM_USB_BASE (hcd->regs)
+#define USB_REG_START_OFFSET 0x90
+#define USB_REG_END_OFFSET 0x250
 
 struct msm_hsic_hcd {
 	struct ehci_hcd		ehci;
@@ -68,6 +70,8 @@
 	enum usb_vdd_type	vdd_type;
 };
 
+struct msm_hsic_hcd *__mehci;
+
 static bool debug_bus_voting_enabled = true;
 
 static unsigned int enable_dbg_log = 1;
@@ -256,6 +260,22 @@
 	return container_of((void *) mehci, struct usb_hcd, hcd_priv);
 }
 
+static void dump_hsic_regs(struct usb_hcd *hcd)
+{
+	int i;
+	struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
+
+	if (atomic_read(&mehci->in_lpm))
+		return;
+
+	for (i = USB_REG_START_OFFSET; i <= USB_REG_END_OFFSET; i += 0x10)
+		pr_info("%p: %08x\t%08x\t%08x\t%08x\n", hcd->regs + i,
+				readl_relaxed(hcd->regs + i),
+				readl_relaxed(hcd->regs + i + 4),
+				readl_relaxed(hcd->regs + i + 8),
+				readl_relaxed(hcd->regs + i + 0xc));
+}
+
 #define ULPI_IO_TIMEOUT_USEC	(10 * 1000)
 
 #define USB_PHY_VDD_DIG_VOL_NONE	0 /*uV */
@@ -328,6 +348,29 @@
 
 }
 
+static int ulpi_read(struct msm_hsic_hcd *mehci, u32 reg)
+{
+	struct usb_hcd *hcd = hsic_to_hcd(mehci);
+	unsigned long timeout;
+
+	/* initiate read operation */
+	writel_relaxed(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg),
+	       USB_ULPI_VIEWPORT);
+
+	/* wait for completion */
+	timeout = jiffies + usecs_to_jiffies(ULPI_IO_TIMEOUT_USEC);
+	while (readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_RUN) {
+		if (time_after(jiffies, timeout)) {
+			dev_err(mehci->dev, "ulpi_read: timeout %08x\n",
+				readl_relaxed(USB_ULPI_VIEWPORT));
+			return -ETIMEDOUT;
+		}
+		udelay(1);
+	}
+
+	return ULPI_DATA_READ(readl_relaxed(USB_ULPI_VIEWPORT));
+}
+
 static int ulpi_write(struct msm_hsic_hcd *mehci, u32 val, u32 reg)
 {
 	struct usb_hcd *hcd = hsic_to_hcd(mehci);
@@ -354,6 +397,37 @@
 	return 0;
 }
 
+#define HSIC_DBG1		0X38
+#define ULPI_MANUAL_ENABLE	BIT(4)
+#define ULPI_LINESTATE_DATA	BIT(5)
+#define ULPI_LINESTATE_STROBE	BIT(6)
+static void ehci_msm_enable_ulpi_control(struct usb_hcd *hcd, u32 linestate)
+{
+	struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
+	int val;
+
+	switch (linestate) {
+	case PORT_RESET:
+		val = ulpi_read(mehci, HSIC_DBG1);
+		val |= ULPI_MANUAL_ENABLE;
+		val &= ~(ULPI_LINESTATE_DATA | ULPI_LINESTATE_STROBE);
+		ulpi_write(mehci, val, HSIC_DBG1);
+		break;
+	default:
+		pr_info("%s: Unknown linestate:%0x\n", __func__, linestate);
+	}
+}
+
+static void ehci_msm_disable_ulpi_control(struct usb_hcd *hcd)
+{
+	struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
+	int val;
+
+	val = ulpi_read(mehci, HSIC_DBG1);
+	val &= ~ULPI_MANUAL_ENABLE;
+	ulpi_write(mehci, val, HSIC_DBG1);
+}
+
 static int msm_hsic_config_gpios(struct msm_hsic_hcd *mehci, int gpio_en)
 {
 	int rc = 0;
@@ -406,50 +480,28 @@
 	return rc;
 }
 
-static int msm_hsic_phy_clk_reset(struct msm_hsic_hcd *mehci)
+static void msm_hsic_clk_reset(struct msm_hsic_hcd *mehci)
 {
 	int ret;
 
-	clk_prepare_enable(mehci->alt_core_clk);
-
 	ret = clk_reset(mehci->core_clk, CLK_RESET_ASSERT);
 	if (ret) {
-		clk_disable_unprepare(mehci->alt_core_clk);
-		dev_err(mehci->dev, "usb phy clk assert failed\n");
-		return ret;
+		dev_err(mehci->dev, "hsic clk assert failed:%d\n", ret);
+		return;
 	}
-	usleep_range(10000, 12000);
-	clk_disable_unprepare(mehci->alt_core_clk);
+	clk_disable(mehci->core_clk);
 
 	ret = clk_reset(mehci->core_clk, CLK_RESET_DEASSERT);
 	if (ret)
-		dev_err(mehci->dev, "usb phy clk deassert failed\n");
+		dev_err(mehci->dev, "hsic clk deassert failed:%d\n", ret);
 
-	return ret;
+	usleep_range(10000, 12000);
+
+	clk_enable(mehci->core_clk);
 }
 
-static int msm_hsic_phy_reset(struct msm_hsic_hcd *mehci)
-{
-	struct usb_hcd *hcd = hsic_to_hcd(mehci);
-	u32 val;
-	int ret;
-
-	ret = msm_hsic_phy_clk_reset(mehci);
-	if (ret)
-		return ret;
-
-	val = readl_relaxed(USB_PORTSC) & ~PORTSC_PTS_MASK;
-	writel_relaxed(val | PORTSC_PTS_ULPI, USB_PORTSC);
-
-	/* Ensure that RESET operation is completed before turning off clock */
-	mb();
-	dev_dbg(mehci->dev, "phy_reset: success\n");
-
-	return 0;
-}
-
-#define HSIC_GPIO150_PAD_CTL   (MSM_TLMM_BASE+0x20C0)
-#define HSIC_GPIO151_PAD_CTL   (MSM_TLMM_BASE+0x20C4)
+#define HSIC_STROBE_GPIO_PAD_CTL	(MSM_TLMM_BASE+0x20C0)
+#define HSIC_DATA_GPIO_PAD_CTL		(MSM_TLMM_BASE+0x20C4)
 #define HSIC_CAL_PAD_CTL       (MSM_TLMM_BASE+0x20C8)
 #define HSIC_LV_MODE		0x04
 #define HSIC_PAD_CALIBRATION	0xA8
@@ -458,33 +510,15 @@
 static int msm_hsic_reset(struct msm_hsic_hcd *mehci)
 {
 	struct usb_hcd *hcd = hsic_to_hcd(mehci);
-	int cnt = 0;
 	int ret;
 	struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
 
-	ret = msm_hsic_phy_reset(mehci);
-	if (ret) {
-		dev_err(mehci->dev, "phy_reset failed\n");
-		return ret;
-	}
+	msm_hsic_clk_reset(mehci);
 
-	writel_relaxed(USBCMD_RESET, USB_USBCMD);
-	while (cnt < LINK_RESET_TIMEOUT_USEC) {
-		if (!(readl_relaxed(USB_USBCMD) & USBCMD_RESET))
-			break;
-		udelay(1);
-		cnt++;
-	}
-	if (cnt >= LINK_RESET_TIMEOUT_USEC)
-		return -ETIMEDOUT;
-
-	/* Reset PORTSC and select ULPI phy */
+	/* select ulpi phy */
 	writel_relaxed(0x80000000, USB_PORTSC);
 
-	/* TODO: Need to confirm if HSIC PHY also requires delay after RESET */
-	msleep(100);
-
-	/* HSIC PHY Initialization */
+	mb();
 
 	/* HSIC init sequence when HSIC signals (Strobe/Data) are
 	routed via GPIOs */
@@ -493,6 +527,8 @@
 		/* Enable LV_MODE in HSIC_CAL_PAD_CTL register */
 		writel_relaxed(HSIC_LV_MODE, HSIC_CAL_PAD_CTL);
 
+		mb();
+
 		/*set periodic calibration interval to ~2.048sec in
 		  HSIC_IO_CAL_REG */
 		ulpi_write(mehci, 0xFF, 0x33);
@@ -500,16 +536,18 @@
 		/* Enable periodic IO calibration in HSIC_CFG register */
 		ulpi_write(mehci, HSIC_PAD_CALIBRATION, 0x30);
 
-		/* Configure GPIO 150/151 pins for HSIC functionality mode */
+		/* Configure GPIO pins for HSIC functionality mode */
 		ret = msm_hsic_config_gpios(mehci, 1);
 		if (ret) {
 			dev_err(mehci->dev, " gpio configuarion failed\n");
 			return ret;
 		}
-		/* Set LV_MODE=0x1 and DCC=0x2 in HSIC_GPIO150/151_PAD_CTL
-		   register */
-		writel_relaxed(HSIC_GPIO_PAD_VAL, HSIC_GPIO150_PAD_CTL);
-		writel_relaxed(HSIC_GPIO_PAD_VAL, HSIC_GPIO151_PAD_CTL);
+		/* Set LV_MODE=0x1 and DCC=0x2 in HSIC_GPIO PAD_CTL register */
+		writel_relaxed(HSIC_GPIO_PAD_VAL, HSIC_STROBE_GPIO_PAD_CTL);
+		writel_relaxed(HSIC_GPIO_PAD_VAL, HSIC_DATA_GPIO_PAD_CTL);
+
+		mb();
+
 		/* Enable HSIC mode in HSIC_CFG register */
 		ulpi_write(mehci, 0x01, 0x31);
 	} else {
@@ -712,13 +750,6 @@
 
 skip_phy_resume:
 
-	if (!(readl_relaxed(USB_USBCMD) & CMD_RUN) &&
-			(readl_relaxed(USB_PORTSC) & PORT_SUSPEND)) {
-		writel_relaxed(readl_relaxed(USB_USBCMD) | CMD_RUN ,
-				USB_USBCMD);
-		dbg_log_event(NULL, "Set RS", readl_relaxed(USB_USBCMD));
-	}
-
 	usb_hcd_resume_root_hub(hcd);
 
 	atomic_set(&mehci->in_lpm, 0);
@@ -824,7 +855,7 @@
 	 * generic hardware linkage
 	 */
 	.irq			= msm_hsic_irq,
-	.flags			= HCD_USB2 | HCD_MEMORY,
+	.flags			= HCD_USB2 | HCD_MEMORY | HCD_OLD_ENUM,
 
 	.reset			= ehci_hsic_reset,
 	.start			= ehci_run,
@@ -861,6 +892,10 @@
 	.bus_resume		= ehci_hsic_bus_resume,
 
 	.log_urb_complete	= dbg_log_event,
+	.dump_regs		= dump_hsic_regs,
+
+	.enable_ulpi_control	= ehci_msm_enable_ulpi_control,
+	.disable_ulpi_control	= ehci_msm_disable_ulpi_control,
 };
 
 static int msm_hsic_init_clocks(struct msm_hsic_hcd *mehci, u32 init)
@@ -1223,6 +1258,9 @@
 	mehci->dev = &pdev->dev;
 
 	mehci->ehci.susp_sof_bug = 1;
+	mehci->ehci.reset_sof_bug = 1;
+
+	mehci->ehci.resume_sof_bug = 1;
 
 	mehci->ehci.max_log2_irq_thresh = 6;
 
@@ -1316,6 +1354,8 @@
 		}
 	}
 
+	__mehci = mehci;
+
 	/*
 	 * This pdev->dev is assigned parent of root-hub by USB core,
 	 * hence, runtime framework automatically calls this driver's
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 6afb70b..a0f995c 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -152,6 +152,8 @@
 	unsigned		has_synopsys_hc_bug:1; /* Synopsys HC */
 	unsigned		frame_index_bug:1; /* MosChip (AKA NetMos) */
 	unsigned		susp_sof_bug:1; /*Chip Idea HC*/
+	unsigned		resume_sof_bug:1;/*Chip Idea HC*/
+	unsigned		reset_sof_bug:1; /*Chip Idea HC*/
 
 	/* required for usb32 quirk */
 	#define OHCI_CTRL_HCFS          (3 << 6)
diff --git a/drivers/usb/misc/diag_bridge.c b/drivers/usb/misc/diag_bridge.c
index 8b762a2..6d5544a 100644
--- a/drivers/usb/misc/diag_bridge.c
+++ b/drivers/usb/misc/diag_bridge.c
@@ -255,6 +255,7 @@
 	pipe = usb_sndbulkpipe(dev->udev, dev->out_epAddr);
 	usb_fill_bulk_urb(urb, dev->udev, pipe, data, size,
 				diag_bridge_write_cb, dev);
+	urb->transfer_flags |= URB_ZERO_PACKET;
 	usb_anchor_urb(urb, &dev->submitted);
 	dev->pending_writes++;
 
diff --git a/drivers/usb/misc/mdm_data_bridge.c b/drivers/usb/misc/mdm_data_bridge.c
index db2f40a..1c9de07 100644
--- a/drivers/usb/misc/mdm_data_bridge.c
+++ b/drivers/usb/misc/mdm_data_bridge.c
@@ -497,6 +497,8 @@
 	usb_fill_bulk_urb(txurb, dev->udev, dev->bulk_out,
 			skb->data, skb->len, data_bridge_write_cb, skb);
 
+	txurb->transfer_flags |= URB_ZERO_PACKET;
+
 	if (test_bit(SUSPENDED, &dev->flags)) {
 		usb_anchor_urb(txurb, &dev->delayed);
 		goto free_urb;
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index dedad53..bb6bb2c 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -74,7 +74,7 @@
 static struct regulator *hsusb_1p8;
 static struct regulator *hsusb_vddcx;
 static struct regulator *vbus_otg;
-static struct regulator *mhl_analog_switch;
+static struct regulator *mhl_usb_hs_switch;
 static struct power_supply *psy;
 
 static bool aca_id_turned_on;
@@ -250,16 +250,16 @@
 	if (!pdata->mhl_enable)
 		return;
 
-	if (!mhl_analog_switch) {
-		pr_err("%s: mhl_analog_switch is NULL.\n", __func__);
+	if (!mhl_usb_hs_switch) {
+		pr_err("%s: mhl_usb_hs_switch is NULL.\n", __func__);
 		return;
 	}
 
 	if (on) {
-		if (regulator_enable(mhl_analog_switch))
-			pr_err("unable to enable mhl_analog_switch\n");
+		if (regulator_enable(mhl_usb_hs_switch))
+			pr_err("unable to enable mhl_usb_hs_switch\n");
 	} else {
-		regulator_disable(mhl_analog_switch);
+		regulator_disable(mhl_usb_hs_switch);
 	}
 }
 
@@ -670,6 +670,9 @@
 	if (aca_enabled())
 		return 0;
 
+	if (atomic_read(&motg->in_lpm) == suspend)
+		return 0;
+
 	if (suspend) {
 		switch (phy->state) {
 		case OTG_STATE_A_WAIT_BCON:
@@ -969,6 +972,21 @@
 }
 #endif
 
+static int msm_otg_notify_host_mode(struct msm_otg *motg, bool host_mode)
+{
+	if (!psy)
+		goto psy_not_supported;
+
+	if (host_mode)
+		power_supply_set_scope(psy, POWER_SUPPLY_SCOPE_SYSTEM);
+	else
+		power_supply_set_scope(psy, POWER_SUPPLY_SCOPE_DEVICE);
+
+psy_not_supported:
+	dev_dbg(motg->phy.dev, "Power Supply doesn't support USB charger\n");
+	return -ENXIO;
+}
+
 static int msm_otg_notify_chg_type(struct msm_otg *motg)
 {
 	static int charger_type;
@@ -983,7 +1001,8 @@
 		charger_type = POWER_SUPPLY_TYPE_USB;
 	else if (motg->chg_type == USB_CDP_CHARGER)
 		charger_type = POWER_SUPPLY_TYPE_USB_CDP;
-	else if (motg->chg_type == USB_DCP_CHARGER)
+	else if (motg->chg_type == USB_DCP_CHARGER ||
+			motg->chg_type == USB_PROPRIETARY_CHARGER)
 		charger_type = POWER_SUPPLY_TYPE_USB_DCP;
 	else if ((motg->chg_type == USB_ACA_DOCK_CHARGER ||
 		motg->chg_type == USB_ACA_A_CHARGER ||
@@ -1217,7 +1236,7 @@
 	 * current from the source.
 	 */
 	if (on) {
-		pm8921_disable_source_current(on);
+		msm_otg_notify_host_mode(motg, on);
 		ret = regulator_enable(vbus_otg);
 		if (ret) {
 			pr_err("unable to enable vbus_otg\n");
@@ -1230,7 +1249,7 @@
 			pr_err("unable to disable vbus_otg\n");
 			return;
 		}
-		pm8921_disable_source_current(on);
+		msm_otg_notify_host_mode(motg, on);
 		vbus_is_on = false;
 	}
 }
@@ -1609,9 +1628,6 @@
 		ulpi_write(phy, chg_det, 0x34);
 		break;
 	case SNPS_28NM_INTEGRATED_PHY:
-		/* Turn off VDP_SRC */
-		ulpi_write(phy, 0x3, 0x86);
-		msleep(20);
 		/*
 		 * Configure DM as current source, DP as current sink
 		 * and enable battery charging comparators.
@@ -1639,6 +1655,9 @@
 	case SNPS_28NM_INTEGRATED_PHY:
 		chg_det = ulpi_read(phy, 0x87);
 		ret = chg_det & 1;
+		/* Turn off VDP_SRC */
+		ulpi_write(phy, 0x3, 0x86);
+		msleep(20);
 		break;
 	default:
 		break;
@@ -1807,6 +1826,7 @@
 	case USB_ACA_B_CHARGER:		return "USB_ACA_B_CHARGER";
 	case USB_ACA_C_CHARGER:		return "USB_ACA_C_CHARGER";
 	case USB_ACA_DOCK_CHARGER:	return "USB_ACA_DOCK_CHARGER";
+	case USB_PROPRIETARY_CHARGER:	return "USB_PROPRIETARY_CHARGER";
 	default:			return "INVALID_CHARGER";
 	}
 }
@@ -1820,6 +1840,7 @@
 	struct msm_otg *motg = container_of(w, struct msm_otg, chg_work.work);
 	struct usb_phy *phy = &motg->phy;
 	bool is_dcd = false, tmout, vout, is_aca;
+	u32 line_state, dm_vlgc;
 	unsigned long delay;
 
 	dev_dbg(phy->dev, "chg detection work\n");
@@ -1862,24 +1883,37 @@
 		break;
 	case USB_CHG_STATE_DCD_DONE:
 		vout = msm_chg_check_primary_det(motg);
-		if (vout) {
+		line_state = readl_relaxed(USB_PORTSC) & PORTSC_LS;
+		dm_vlgc = line_state & PORTSC_LS_DM;
+		if (vout && !dm_vlgc) { /* VDAT_REF < DM < VLGC */
 			if (test_bit(ID_A, &motg->inputs)) {
 				motg->chg_type = USB_ACA_DOCK_CHARGER;
 				motg->chg_state = USB_CHG_STATE_DETECTED;
 				delay = 0;
 				break;
 			}
-			msm_chg_enable_secondary_det(motg);
-			delay = MSM_CHG_SECONDARY_DET_TIME;
-			motg->chg_state = USB_CHG_STATE_PRIMARY_DONE;
-		} else {
+			if (line_state) { /* DP > VLGC */
+				motg->chg_type = USB_PROPRIETARY_CHARGER;
+				motg->chg_state = USB_CHG_STATE_DETECTED;
+				delay = 0;
+			} else {
+				msm_chg_enable_secondary_det(motg);
+				delay = MSM_CHG_SECONDARY_DET_TIME;
+				motg->chg_state = USB_CHG_STATE_PRIMARY_DONE;
+			}
+		} else { /* DM < VDAT_REF || DM > VLGC */
 			if (test_bit(ID_A, &motg->inputs)) {
 				motg->chg_type = USB_ACA_A_CHARGER;
 				motg->chg_state = USB_CHG_STATE_DETECTED;
 				delay = 0;
 				break;
 			}
-			motg->chg_type = USB_SDP_CHARGER;
+
+			if (line_state) /* DP > VLGC or/and DM > VLGC */
+				motg->chg_type = USB_PROPRIETARY_CHARGER;
+			else
+				motg->chg_type = USB_SDP_CHARGER;
+
 			motg->chg_state = USB_CHG_STATE_DETECTED;
 			delay = 0;
 		}
@@ -2032,6 +2066,8 @@
 				case USB_DCP_CHARGER:
 					/* Enable VDP_SRC */
 					ulpi_write(otg->phy, 0x2, 0x85);
+					/* fall through */
+				case USB_PROPRIETARY_CHARGER:
 					msm_otg_notify_charger(motg,
 							IDEV_CHG_MAX);
 					pm_runtime_put_noidle(otg->phy->dev);
@@ -2703,6 +2739,11 @@
 {
 	static bool init;
 	struct msm_otg *motg = the_msm_otg;
+	struct usb_otg *otg = motg->phy.otg;
+
+	/* In A Host Mode, ignore received BSV interrupts */
+	if (otg->phy->state >= OTG_STATE_A_IDLE)
+		return;
 
 	if (online) {
 		pr_debug("PMIC: BSV set\n");
@@ -3337,10 +3378,10 @@
 	}
 
 	if (pdata->mhl_enable) {
-		mhl_analog_switch = devm_regulator_get(motg->phy.dev,
-							"mhl_ext_3p3v");
-		if (IS_ERR(mhl_analog_switch)) {
-			dev_err(&pdev->dev, "Unable to get mhl_analog_switch\n");
+		mhl_usb_hs_switch = devm_regulator_get(motg->phy.dev,
+							"mhl_usb_hs_switch");
+		if (IS_ERR(mhl_usb_hs_switch)) {
+			dev_err(&pdev->dev, "Unable to get mhl_usb_hs_switch\n");
 			goto free_ldo_init;
 		}
 	}
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 1f6d915..a749a6d 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -284,6 +284,8 @@
 	.write		     = usb_wwan_write,
 	.write_room	     = usb_wwan_write_room,
 	.chars_in_buffer     = usb_wwan_chars_in_buffer,
+	.throttle            = usb_wwan_throttle,
+	.unthrottle          = usb_wwan_unthrottle,
 	.attach		     = usb_wwan_startup,
 	.disconnect	     = usb_wwan_disconnect,
 	.release	     = qc_release,
diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h
index 9811a82..98b399f 100644
--- a/drivers/usb/serial/usb-wwan.h
+++ b/drivers/usb/serial/usb-wwan.h
@@ -24,6 +24,8 @@
 extern int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
 			  const unsigned char *buf, int count);
 extern int usb_wwan_chars_in_buffer(struct tty_struct *tty);
+extern void usb_wwan_throttle(struct tty_struct *tty);
+extern void usb_wwan_unthrottle(struct tty_struct *tty);
 #ifdef CONFIG_PM
 extern int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message);
 extern int usb_wwan_resume(struct usb_serial *serial);
@@ -33,7 +35,7 @@
 
 #define N_IN_URB 5
 #define N_OUT_URB 5
-#define IN_BUFLEN 65536
+#define IN_BUFLEN 16384
 #define OUT_BUFLEN 65536
 
 struct usb_wwan_intf_private {
@@ -55,6 +57,10 @@
 	int opened;
 	struct usb_anchor submitted;
 	struct usb_anchor delayed;
+	struct list_head in_urb_list;
+	spinlock_t in_lock;
+	ssize_t n_read;
+	struct work_struct in_work;
 
 	/* Settings for the port */
 	int rts_state;		/* Handshaking pins (outputs) */
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 0c58554..bf30c0b 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -279,15 +279,78 @@
 }
 EXPORT_SYMBOL(usb_wwan_write);
 
+static void usb_wwan_in_work(struct work_struct *w)
+{
+	struct usb_wwan_port_private *portdata =
+		container_of(w, struct usb_wwan_port_private, in_work);
+	struct list_head *q = &portdata->in_urb_list;
+	struct urb *urb;
+	unsigned char *data;
+	struct tty_struct *tty;
+	struct usb_serial_port *port;
+	int err;
+	ssize_t len;
+	ssize_t count;
+	unsigned long flags;
+
+	spin_lock_irqsave(&portdata->in_lock, flags);
+	while (!list_empty(q)) {
+		urb = list_first_entry(q, struct urb, urb_list);
+		port = urb->context;
+		if (port->throttle_req || port->throttled)
+			break;
+
+		tty = tty_port_tty_get(&port->port);
+		if (!tty)
+			continue;
+
+		list_del_init(&urb->urb_list);
+
+		spin_unlock_irqrestore(&portdata->in_lock, flags);
+
+		len = urb->actual_length - portdata->n_read;
+		data = urb->transfer_buffer + portdata->n_read;
+		count = tty_insert_flip_string(tty, data, len);
+		tty_flip_buffer_push(tty);
+		tty_kref_put(tty);
+
+		if (count < len) {
+			dbg("%s: len:%d count:%d n_read:%d\n", __func__,
+					len, count, portdata->n_read);
+			portdata->n_read += count;
+			port->throttled = true;
+
+			/* add request back to list */
+			spin_lock_irqsave(&portdata->in_lock, flags);
+			list_add(&urb->urb_list, q);
+			spin_unlock_irqrestore(&portdata->in_lock, flags);
+			return;
+		}
+		portdata->n_read = 0;
+
+		usb_anchor_urb(urb, &portdata->submitted);
+		err = usb_submit_urb(urb, GFP_ATOMIC);
+		if (err) {
+			usb_unanchor_urb(urb);
+			if (err != -EPERM)
+				pr_err("%s: submit read urb failed:%d",
+						__func__, err);
+		}
+
+		usb_mark_last_busy(port->serial->dev);
+		spin_lock_irqsave(&portdata->in_lock, flags);
+	}
+	spin_unlock_irqrestore(&portdata->in_lock, flags);
+}
+
 static void usb_wwan_indat_callback(struct urb *urb)
 {
 	int err;
 	int endpoint;
 	struct usb_wwan_port_private *portdata;
 	struct usb_serial_port *port;
-	struct tty_struct *tty;
-	unsigned char *data = urb->transfer_buffer;
 	int status = urb->status;
+	unsigned long flags;
 
 	dbg("%s: %p", __func__, urb);
 
@@ -295,38 +358,30 @@
 	port = urb->context;
 	portdata = usb_get_serial_port_data(port);
 
-	if (status) {
-		dbg("%s: nonzero status: %d on endpoint %02x.",
-		    __func__, status, endpoint);
-	} else {
-		tty = tty_port_tty_get(&port->port);
-		if (tty) {
-			if (urb->actual_length) {
-				tty_insert_flip_string(tty, data,
-						urb->actual_length);
-				tty_flip_buffer_push(tty);
-			} else
-				dbg("%s: empty read urb received", __func__);
-			tty_kref_put(tty);
-		}
+	usb_mark_last_busy(port->serial->dev);
 
-		/* Resubmit urb so we continue receiving */
-		if (status != -ESHUTDOWN) {
-			usb_anchor_urb(urb, &portdata->submitted);
-			err = usb_submit_urb(urb, GFP_ATOMIC);
-			if (err) {
-				usb_unanchor_urb(urb);
-				if (err != -EPERM) {
-					printk(KERN_ERR "%s: resubmit read urb failed. "
-						"(%d)", __func__, err);
-					/* busy also in error unless we are killed */
-					usb_mark_last_busy(port->serial->dev);
-				}
-			} else {
-				usb_mark_last_busy(port->serial->dev);
-			}
-		}
+	if (!status && urb->actual_length) {
+		spin_lock_irqsave(&portdata->in_lock, flags);
+		list_add_tail(&urb->urb_list, &portdata->in_urb_list);
+		spin_unlock_irqrestore(&portdata->in_lock, flags);
 
+		schedule_work(&portdata->in_work);
+
+		return;
+	}
+
+	dbg("%s: nonzero status: %d on endpoint %02x.",
+		__func__, status, endpoint);
+
+	if (status != -ESHUTDOWN) {
+		usb_anchor_urb(urb, &portdata->submitted);
+		err = usb_submit_urb(urb, GFP_ATOMIC);
+		if (err) {
+			usb_unanchor_urb(urb);
+			if (err != -EPERM)
+				pr_err("%s: submit read urb failed:%d",
+						__func__, err);
+		}
 	}
 }
 
@@ -401,6 +456,31 @@
 }
 EXPORT_SYMBOL(usb_wwan_chars_in_buffer);
 
+void usb_wwan_throttle(struct tty_struct *tty)
+{
+	struct usb_serial_port *port = tty->driver_data;
+
+	port->throttle_req = true;
+
+	dbg("%s:\n", __func__);
+}
+EXPORT_SYMBOL(usb_wwan_throttle);
+
+void usb_wwan_unthrottle(struct tty_struct *tty)
+{
+	struct usb_serial_port *port = tty->driver_data;
+	struct usb_wwan_port_private *portdata;
+
+	portdata = usb_get_serial_port_data(port);
+
+	dbg("%s:\n", __func__);
+	port->throttle_req = false;
+	port->throttled = false;
+
+	schedule_work(&portdata->in_work);
+}
+EXPORT_SYMBOL(usb_wwan_unthrottle);
+
 int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
 	struct usb_wwan_port_private *portdata;
@@ -560,6 +640,9 @@
 		}
 		init_usb_anchor(&portdata->delayed);
 		init_usb_anchor(&portdata->submitted);
+		INIT_WORK(&portdata->in_work, usb_wwan_in_work);
+		INIT_LIST_HEAD(&portdata->in_urb_list);
+		spin_lock_init(&portdata->in_lock);
 
 		for (j = 0; j < N_IN_URB; j++) {
 			buffer = kmalloc(IN_BUFLEN, GFP_KERNEL);
@@ -624,14 +707,25 @@
 	int i, j;
 	struct usb_serial_port *port;
 	struct usb_wwan_port_private *portdata;
-
-	dbg("%s", __func__);
+	struct urb *urb;
+	struct list_head *q;
+	unsigned long flags;
 
 	/* Now free them */
 	for (i = 0; i < serial->num_ports; ++i) {
 		port = serial->port[i];
 		portdata = usb_get_serial_port_data(port);
 
+		cancel_work_sync(&portdata->in_work);
+		/* TBD: do we really need this */
+		spin_lock_irqsave(&portdata->in_lock, flags);
+		q = &portdata->in_urb_list;
+		while (!list_empty(q)) {
+			urb = list_first_entry(q, struct urb, urb_list);
+			list_del_init(&urb->urb_list);
+		}
+		spin_unlock_irqrestore(&portdata->in_lock, flags);
+
 		for (j = 0; j < N_IN_URB; j++) {
 			usb_free_urb(portdata->in_urbs[j]);
 			kfree(portdata->in_buffer[j]);
diff --git a/drivers/video/msm/hdmi_msm.c b/drivers/video/msm/hdmi_msm.c
index 03243ac..2526d76 100644
--- a/drivers/video/msm/hdmi_msm.c
+++ b/drivers/video/msm/hdmi_msm.c
@@ -4187,39 +4187,6 @@
 }
 #endif
 
-static void hdmi_msm_hpd_read_work(struct work_struct *work)
-{
-	uint32 hpd_ctrl;
-
-	clk_prepare_enable(hdmi_msm_state->hdmi_app_clk);
-	hdmi_msm_state->pd->core_power(1, 1);
-	hdmi_msm_state->pd->enable_5v(1);
-	hdmi_msm_set_mode(FALSE);
-	hdmi_msm_init_phy(external_common_state->video_resolution);
-	/* HDMI_USEC_REFTIMER[0x0208] */
-	HDMI_OUTP(0x0208, 0x0001001B);
-	hpd_ctrl = (HDMI_INP(0x0258) & ~0xFFF) | 0xFFF;
-
-	/* Toggle HPD circuit to trigger HPD sense */
-	HDMI_OUTP(0x0258, ~(1 << 28) & hpd_ctrl);
-	HDMI_OUTP(0x0258, (1 << 28) | hpd_ctrl);
-
-	hdmi_msm_set_mode(TRUE);
-	msleep(1000);
-	external_common_state->hpd_state = (HDMI_INP(0x0250) & 0x2) >> 1;
-	if (external_common_state->hpd_state) {
-		hdmi_msm_read_edid();
-		DEV_DBG("%s: sense CONNECTED: send ONLINE\n", __func__);
-		kobject_uevent(external_common_state->uevent_kobj,
-			KOBJ_ONLINE);
-	}
-	hdmi_msm_hpd_off();
-	hdmi_msm_set_mode(FALSE);
-	hdmi_msm_state->pd->core_power(0, 1);
-	hdmi_msm_state->pd->enable_5v(0);
-	clk_disable_unprepare(hdmi_msm_state->hdmi_app_clk);
-}
-
 static void hdmi_msm_hpd_off(void)
 {
 	int rc = 0;
@@ -4583,8 +4550,6 @@
 #endif
 	}
 
-	queue_work(hdmi_work_queue, &hdmi_msm_state->hpd_read_work);
-
 	/* Initialize hdmi node and register with switch driver */
 	if (hdmi_prim_display)
 		external_common_state->sdev.name = "hdmi_as_primary";
@@ -4754,7 +4719,6 @@
 	hdmi_common_init_panel_info(&hdmi_msm_panel_data.panel_info);
 	init_completion(&hdmi_msm_state->ddc_sw_done);
 	INIT_WORK(&hdmi_msm_state->hpd_state_work, hdmi_msm_hpd_state_work);
-	INIT_WORK(&hdmi_msm_state->hpd_read_work, hdmi_msm_hpd_read_work);
 #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
 	init_completion(&hdmi_msm_state->hdcp_success_done);
 	INIT_WORK(&hdmi_msm_state->hdcp_reauth_work, hdmi_msm_hdcp_reauth_work);
diff --git a/drivers/video/msm/hdmi_msm.h b/drivers/video/msm/hdmi_msm.h
index 5195f2c..06ebb06 100644
--- a/drivers/video/msm/hdmi_msm.h
+++ b/drivers/video/msm/hdmi_msm.h
@@ -61,7 +61,7 @@
 	boolean hpd_cable_chg_detected;
 	boolean full_auth_done;
 	boolean hpd_during_auth;
-	struct work_struct hpd_state_work, hpd_read_work;
+	struct work_struct hpd_state_work;
 	struct timer_list hpd_state_timer;
 	struct completion ddc_sw_done;
 
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index cad6e02..ea9e5ab 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -51,7 +51,7 @@
 int mdp_rev;
 
 static struct platform_device *mdp_init_pdev;
-static struct regulator *footswitch;
+static struct regulator *footswitch, *hdmi_pll_fs;
 static unsigned int mdp_footswitch_on;
 
 struct completion mdp_ppp_comp;
@@ -847,6 +847,7 @@
 	MDP_OUTP(base + 0x0018, INTR_HIST_DONE | INTR_HIST_RESET_SEQ_DONE);
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
+	mgmt->hist = NULL;
 	complete(&mgmt->mdp_hist_comp);
 	mdp_disable_irq(mgmt->irq_term);
 	return 0;
@@ -926,7 +927,6 @@
 	mgmt->frame_cnt = req->frame_cnt;
 	mgmt->bit_mask = req->bit_mask;
 	mgmt->num_bins = req->num_bins;
-	mgmt->hist = NULL;
 
 	ret = mdp_histogram_enable(mgmt);
 
@@ -963,6 +963,7 @@
 
 	if (!mfd->panel_power_on) {
 		mgmt->mdp_is_hist_data = FALSE;
+		mgmt->hist = NULL;
 		complete(&mgmt->mdp_hist_comp);
 		ret = -EINVAL;
 		goto error_lock;
@@ -2123,10 +2124,27 @@
 	}
 	disable_irq(mdp_irq);
 
+	hdmi_pll_fs = regulator_get(&pdev->dev, "hdmi_pll_fs");
+	if (IS_ERR(hdmi_pll_fs)) {
+		hdmi_pll_fs = NULL;
+	} else {
+		if (mdp_rev != MDP_REV_44) {
+			ret = regulator_set_voltage(hdmi_pll_fs, 1800000,
+				1800000);
+			if (ret) {
+				pr_err("set_voltage failed for hdmi_pll_fs, ret=%d\n",
+					ret);
+			}
+		}
+	}
+
 	footswitch = regulator_get(&pdev->dev, "vdd");
-	if (IS_ERR(footswitch))
+	if (IS_ERR(footswitch)) {
 		footswitch = NULL;
-	else {
+	} else {
+		if (hdmi_pll_fs)
+			regulator_enable(hdmi_pll_fs);
+
 		regulator_enable(footswitch);
 		mdp_footswitch_on = 1;
 
@@ -2135,6 +2153,8 @@
 			msleep(20);
 			regulator_enable(footswitch);
 		}
+		if (hdmi_pll_fs)
+			regulator_disable(hdmi_pll_fs);
 	}
 
 	mdp_clk = clk_get(&pdev->dev, "core_clk");
@@ -2626,6 +2646,9 @@
 		return;
 	}
 
+	if (hdmi_pll_fs)
+		regulator_enable(hdmi_pll_fs);
+
 	if (on && !mdp_footswitch_on) {
 		pr_debug("Enable MDP FS\n");
 		regulator_enable(footswitch);
@@ -2636,6 +2659,9 @@
 		mdp_footswitch_on = 0;
 	}
 
+	if (hdmi_pll_fs)
+		regulator_disable(hdmi_pll_fs);
+
 	mutex_unlock(&mdp_suspend_mutex);
 }
 
diff --git a/drivers/video/msm/mdp.h b/drivers/video/msm/mdp.h
index b232e0a..e60b24e 100644
--- a/drivers/video/msm/mdp.h
+++ b/drivers/video/msm/mdp.h
@@ -74,8 +74,7 @@
 
 struct mdp_buf_type {
 	struct ion_handle *ihdl;
-	u32 write_addr;
-	u32 read_addr;
+	u32 phys_addr;
 	u32 size;
 };
 
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index 59404d0..1557eed 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -337,8 +337,7 @@
 	uint32 element1; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
 	uint32 element0; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
 	struct completion comp;
-	ulong ov_blt_addr; /* blt mode addr */
-	ulong dma_blt_addr; /* blt mode addr */
+	ulong blt_addr; /* blt mode addr */
 	ulong blt_base;
 	ulong blt_offset;
 	uint32 blt_cnt;
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 287b564..ad37d2f 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -116,17 +116,17 @@
 	if (!display_iclient)
 		return -EINVAL;
 
-	*srcp_ihdl = ion_import_fd(display_iclient, mem_id);
+	*srcp_ihdl = ion_import_dma_buf(display_iclient, mem_id);
 	if (IS_ERR_OR_NULL(*srcp_ihdl)) {
-		pr_err("ion_import_fd() failed\n");
+		pr_err("ion_import_dma_buf() failed\n");
 		return PTR_ERR(*srcp_ihdl);
 	}
-	pr_debug("%s(): ion_hdl %p, ion_buf %p\n", __func__, *srcp_ihdl,
-		ion_share(display_iclient, *srcp_ihdl));
+	pr_debug("%s(): ion_hdl %p, ion_buf %d\n", __func__, *srcp_ihdl,
+		ion_share_dma_buf(display_iclient, *srcp_ihdl));
 	pr_debug("mixer %u, pipe %u, plane %u\n", pipe->mixer_num,
 		pipe->pipe_ndx, plane);
 	if (ion_map_iommu(display_iclient, *srcp_ihdl,
-		DISPLAY_READ_DOMAIN, GEN_POOL, SZ_4K, 0, start,
+		DISPLAY_DOMAIN, GEN_POOL, SZ_4K, 0, start,
 		len, 0, ION_IOMMU_UNMAP_DELAYED)) {
 		ion_free(display_iclient, *srcp_ihdl);
 		pr_err("ion_map_iommu() failed\n");
@@ -140,7 +140,7 @@
 		if (iom_pipe_info->prev_ihdl[plane]) {
 			ion_unmap_iommu(display_iclient,
 				iom_pipe_info->prev_ihdl[plane],
-				DISPLAY_READ_DOMAIN, GEN_POOL);
+				DISPLAY_DOMAIN, GEN_POOL);
 			ion_free(display_iclient,
 				iom_pipe_info->prev_ihdl[plane]);
 			pr_debug("Previous: mixer %u, pipe %u, plane %u, "
@@ -175,7 +175,7 @@
 					iom_pipe_info->prev_ihdl[i]);
 				ion_unmap_iommu(display_iclient,
 					iom_pipe_info->prev_ihdl[i],
-					DISPLAY_READ_DOMAIN, GEN_POOL);
+					DISPLAY_DOMAIN, GEN_POOL);
 				ion_free(display_iclient,
 					iom_pipe_info->prev_ihdl[i]);
 				iom_pipe_info->prev_ihdl[i] = NULL;
@@ -191,7 +191,7 @@
 						iom_pipe_info->ihdl[i]);
 					ion_unmap_iommu(display_iclient,
 						iom_pipe_info->ihdl[i],
-						DISPLAY_READ_DOMAIN, GEN_POOL);
+						DISPLAY_DOMAIN, GEN_POOL);
 					ion_free(display_iclient,
 						iom_pipe_info->ihdl[i]);
 					iom_pipe_info->ihdl[i] = NULL;
@@ -346,7 +346,7 @@
 
 	MDP_OUTP(MDP_BASE + 0xb0004,
 			(pipe->src_height << 16 | pipe->src_width));
-	if (pipe->dma_blt_addr) {
+	if (pipe->blt_addr) {
 		uint32 off, bpp;
 #ifdef BLT_RGB565
 		bpp = 2; /* overlay ouput is RGB565 */
@@ -356,7 +356,7 @@
 		off = 0;
 		if (pipe->ov_cnt & 0x01)
 			off = pipe->src_height * pipe->src_width * bpp;
-		MDP_OUTP(MDP_BASE + 0xb0008, pipe->dma_blt_addr + off);
+		MDP_OUTP(MDP_BASE + 0xb0008, pipe->blt_addr + off);
 		/* RGB888, output of overlay blending */
 		MDP_OUTP(MDP_BASE + 0xb000c, pipe->src_width * bpp);
 	} else {
@@ -427,7 +427,7 @@
 	/* dma_p source */
 	MDP_OUTP(MDP_BASE + 0x90004,
 			(pipe->src_height << 16 | pipe->src_width));
-	if (pipe->dma_blt_addr) {
+	if (pipe->blt_addr) {
 #ifdef BLT_RGB565
 		bpp = 2; /* overlay ouput is RGB565 */
 #else
@@ -436,7 +436,7 @@
 		off = 0;
 		if (pipe->dmap_cnt & 0x01)
 			off = pipe->src_height * pipe->src_width * bpp;
-		MDP_OUTP(MDP_BASE + 0x90008, pipe->dma_blt_addr + off);
+		MDP_OUTP(MDP_BASE + 0x90008, pipe->blt_addr + off);
 		/* RGB888, output of overlay blending */
 		MDP_OUTP(MDP_BASE + 0x9000c, pipe->src_width * bpp);
 	} else {
@@ -585,14 +585,14 @@
 
 	mdp4_scale_setup(pipe);
 
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
 	/* Ensure proper covert matrix loaded when color space swaps */
 	curr = inpdw(rgb_base + 0x0058);
 	/* Don't touch bits you don't want to configure*/
 	mask = 0xFFFEFFFF;
 	pipe->op_mode = (pipe->op_mode & mask) | (curr & ~mask);
 
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
 	outpdw(rgb_base + 0x0000, src_size);	/* MDP_RGB_SRC_SIZE */
 	outpdw(rgb_base + 0x0004, src_xy);	/* MDP_RGB_SRC_XY */
 	outpdw(rgb_base + 0x0008, dst_size);	/* MDP_RGB_DST_SIZE */
@@ -1321,7 +1321,7 @@
 	/*
 	 * BLT support both primary and external external
 	 */
-	if (pipe->ov_blt_addr) {
+	if (pipe->blt_addr) {
 		int off, bpp;
 #ifdef BLT_RGB565
 		bpp = 2;  /* overlay ouput is RGB565 */
@@ -1338,10 +1338,10 @@
 			if (pipe->ov_cnt & 0x01)
 				off = pipe->src_height * pipe->src_width * bpp;
 
-			outpdw(overlay_base + 0x000c, pipe->ov_blt_addr + off);
+			outpdw(overlay_base + 0x000c, pipe->blt_addr + off);
 			/* overlay ouput is RGB888 */
 			outpdw(overlay_base + 0x0010, pipe->src_width * bpp);
-			outpdw(overlay_base + 0x001c, pipe->ov_blt_addr + off);
+			outpdw(overlay_base + 0x001c, pipe->blt_addr + off);
 			/* MDDI - BLT + on demand */
 			outpdw(overlay_base + 0x0004, 0x08);
 
@@ -1361,19 +1361,19 @@
 							pipe->src_width * bpp;
 
 				outpdw(overlay_base + 0x000c,
-						pipe->ov_blt_addr + off);
+						pipe->blt_addr + off);
 				/* overlay ouput is RGB888 */
 				outpdw(overlay_base + 0x0010,
 					((pipe->src_width << 16) |
 					 pipe->src_width));
 				outpdw(overlay_base + 0x001c,
-						pipe->ov_blt_addr + off);
+						pipe->blt_addr + off);
 				off = pipe->src_height * pipe->src_width;
 				/* align chroma to 2k address */
 				off = (off + 2047) & ~2047;
 				/* UV plane adress */
 				outpdw(overlay_base + 0x0020,
-						pipe->ov_blt_addr + off);
+						pipe->blt_addr + off);
 				/* MDDI - BLT + on demand */
 				outpdw(overlay_base + 0x0004, 0x08);
 				/* pseudo planar + writeback */
@@ -3086,7 +3086,13 @@
 		mdp4_overlay_rgb_setup(pipe);	/* rgb pipe */
 	}
 
-	mdp4_overlay_reg_flush(pipe, 1);
+	if (pipe->mixer_num != MDP4_MIXER2) {
+		if ((ctrl->panel_mode & MDP4_PANEL_DTV) ||
+			(ctrl->panel_mode & MDP4_PANEL_LCDC) ||
+			(ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO))
+			mdp4_overlay_reg_flush(pipe, 1);
+	}
+
 	mdp4_mixer_stage_up(pipe);
 
 	if (pipe->mixer_num == MDP4_MIXER2) {
@@ -3180,25 +3186,25 @@
 	char *name;
 	int  domain;
 } msm_iommu_ctx_names[] = {
-	/* Display read*/
+	/* Display */
 	{
 		.name = "mdp_port0_cb0",
-		.domain = DISPLAY_READ_DOMAIN,
+		.domain = DISPLAY_DOMAIN,
 	},
-	/* Display read*/
+	/* Display */
 	{
 		.name = "mdp_port0_cb1",
-		.domain = DISPLAY_WRITE_DOMAIN,
+		.domain = DISPLAY_DOMAIN,
 	},
-	/* Display write */
+	/* Display */
 	{
 		.name = "mdp_port1_cb0",
-		.domain = DISPLAY_READ_DOMAIN,
+		.domain = DISPLAY_DOMAIN,
 	},
-	/* Display write */
+	/* Display */
 	{
 		.name = "mdp_port1_cb1",
-		.domain = DISPLAY_WRITE_DOMAIN,
+		.domain = DISPLAY_DOMAIN,
 	},
 };
 
diff --git a/drivers/video/msm/mdp4_overlay_dsi_cmd.c b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
index 0d8fea7..8ebf8a0 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_cmd.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
@@ -162,8 +162,7 @@
 		dsi_pipe = pipe; /* keep it */
 
 		mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
-		pipe->ov_blt_addr = 0;
-		pipe->dma_blt_addr = 0;
+		pipe->blt_addr = 0;
 
 	} else {
 		pipe = dsi_pipe;
@@ -322,25 +321,24 @@
 {
 	unsigned long flag;
 
-	pr_debug("%s: blt_end=%d ov_blt_addr=%x pid=%d\n",
-	__func__, dsi_pipe->blt_end, (int)dsi_pipe->ov_blt_addr, current->pid);
+	pr_debug("%s: blt_end=%d blt_addr=%x pid=%d\n",
+	__func__, dsi_pipe->blt_end, (int)dsi_pipe->blt_addr, current->pid);
 
 	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
 
-	if (mfd->ov0_wb_buf->write_addr == 0) {
+	if (mfd->ov0_wb_buf->phys_addr == 0) {
 		pr_info("%s: no blt_base assigned\n", __func__);
 		return -EBUSY;
 	}
 
-	if (dsi_pipe->ov_blt_addr == 0) {
+	if (dsi_pipe->blt_addr == 0) {
 		mdp4_dsi_cmd_dma_busy_wait(mfd);
 		spin_lock_irqsave(&mdp_spin_lock, flag);
 		dsi_pipe->blt_end = 0;
 		dsi_pipe->blt_cnt = 0;
 		dsi_pipe->ov_cnt = 0;
 		dsi_pipe->dmap_cnt = 0;
-		dsi_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
-		dsi_pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
+		dsi_pipe->blt_addr = mfd->ov0_wb_buf->phys_addr;
 		mdp4_stat.blt_dsi_cmd++;
 		spin_unlock_irqrestore(&mdp_spin_lock, flag);
 		return 0;
@@ -354,10 +352,10 @@
 	unsigned long flag;
 
 
-	pr_debug("%s: blt_end=%d ov_blt_addr=%x\n",
-		 __func__, dsi_pipe->blt_end, (int)dsi_pipe->ov_blt_addr);
+	pr_debug("%s: blt_end=%d blt_addr=%x\n",
+		 __func__, dsi_pipe->blt_end, (int)dsi_pipe->blt_addr);
 
-	if ((dsi_pipe->blt_end == 0) && dsi_pipe->ov_blt_addr) {
+	if ((dsi_pipe->blt_end == 0) && dsi_pipe->blt_addr) {
 		spin_lock_irqsave(&mdp_spin_lock, flag);
 		dsi_pipe->blt_end = 1;	/* mark as end */
 		spin_unlock_irqrestore(&mdp_spin_lock, flag);
@@ -395,7 +393,7 @@
 	char *overlay_base;
 
 
-	if (pipe->ov_blt_addr == 0)
+	if (pipe->blt_addr == 0)
 		return;
 
 
@@ -407,7 +405,7 @@
 	off = 0;
 	if (pipe->dmap_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
-	addr = pipe->dma_blt_addr + off;
+	addr = pipe->blt_addr + off;
 
 	/* dmap */
 	MDP_OUTP(MDP_BASE + 0x90008, addr);
@@ -415,7 +413,7 @@
 	off = 0;
 	if (pipe->ov_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
-	addr2 = pipe->ov_blt_addr + off;
+	addr2 = pipe->blt_addr + off;
 	/* overlay 0 */
 	overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
 	outpdw(overlay_base + 0x000c, addr2);
@@ -443,8 +441,7 @@
 		spin_unlock(&mdp_spin_lock);
 		if (dsi_pipe->blt_end) {
 			dsi_pipe->blt_end = 0;
-			dsi_pipe->dma_blt_addr = 0;
-			dsi_pipe->ov_blt_addr = 0;
+			dsi_pipe->blt_addr = 0;
 			pr_debug("%s: END, ov_cnt=%d dmap_cnt=%d\n",
 				__func__, dsi_pipe->ov_cnt, dsi_pipe->dmap_cnt);
 			mdp_intr_mask &= ~INTR_DMA_P_DONE;
@@ -482,7 +479,7 @@
 {
 	int diff;
 
-	if (dsi_pipe->ov_blt_addr == 0) {
+	if (dsi_pipe->blt_addr == 0) {
 		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
 		spin_lock(&mdp_spin_lock);
 		dma->busy = FALSE;
@@ -542,7 +539,7 @@
 		mipi_dsi_mdp_busy_wait(dsi_mfd);
 		mdp4_overlay_update_dsi_cmd(dsi_mfd);
 
-		if (dsi_pipe->ov_blt_addr)
+		if (dsi_pipe->blt_addr)
 			mdp4_dsi_blt_dmap_busy_wait(dsi_mfd);
 		mdp4_dsi_cmd_overlay_kickoff(dsi_mfd, dsi_pipe);
 	}
@@ -625,17 +622,17 @@
 	 * to be called before kickoff.
 	 * vice versa for blt disabled.
 	 */
-	if (dsi_pipe->ov_blt_addr && dsi_pipe->blt_cnt == 0)
+	if (dsi_pipe->blt_addr && dsi_pipe->blt_cnt == 0)
 		mdp4_overlay_update_dsi_cmd(mfd); /* first time */
-	else if (dsi_pipe->ov_blt_addr == 0  && dsi_pipe->blt_cnt) {
+	else if (dsi_pipe->blt_addr == 0  && dsi_pipe->blt_cnt) {
 		mdp4_overlay_update_dsi_cmd(mfd); /* last time */
 		dsi_pipe->blt_cnt = 0;
 	}
 
-	pr_debug("%s: ov_blt_addr=%d blt_cnt=%d\n",
-		__func__, (int)dsi_pipe->ov_blt_addr, dsi_pipe->blt_cnt);
+	pr_debug("%s: blt_addr=%d blt_cnt=%d\n",
+		__func__, (int)dsi_pipe->blt_addr, dsi_pipe->blt_cnt);
 
-	if (dsi_pipe->ov_blt_addr)
+	if (dsi_pipe->blt_addr)
 		mdp4_dsi_blt_dmap_busy_wait(dsi_mfd);
 
 	mdp4_dsi_cmd_overlay_kickoff(mfd, pipe);
@@ -655,13 +652,13 @@
 {
 	unsigned long flag;
 
-
+	mdp4_iommu_attach();
 	/* change mdp clk */
 	mdp4_set_perf_level();
 
 	mipi_dsi_mdp_busy_wait(mfd);
 
-	if (dsi_pipe->ov_blt_addr == 0)
+	if (dsi_pipe->blt_addr == 0)
 		mipi_dsi_cmd_mdp_start();
 
 	mdp4_overlay_dsi_state_set(ST_DSI_PLAYING);
@@ -669,7 +666,7 @@
 	spin_lock_irqsave(&mdp_spin_lock, flag);
 	mdp_enable_irq(MDP_OVERLAY0_TERM);
 	mfd->dma->busy = TRUE;
-	if (dsi_pipe->ov_blt_addr)
+	if (dsi_pipe->blt_addr)
 		mfd->dma->dmap_busy = TRUE;
 	/* start OVERLAY pipe */
 	spin_unlock_irqrestore(&mdp_spin_lock, flag);
@@ -703,12 +700,11 @@
 	if (mfd && mfd->panel_power_on) {
 		mdp4_dsi_cmd_dma_busy_wait(mfd);
 
-		if (dsi_pipe && dsi_pipe->ov_blt_addr)
+		if (dsi_pipe && dsi_pipe->blt_addr)
 			mdp4_dsi_blt_dmap_busy_wait(mfd);
 
 		mdp4_overlay_update_dsi_cmd(mfd);
 
-		mdp4_iommu_attach();
 		mdp4_dsi_cmd_kickoff_ui(mfd, dsi_pipe);
 		mdp4_iommu_unmap(dsi_pipe);
 	/* signal if pan function is waiting for the update completion */
diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c
index 478a8ce..3cdd72e 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_video.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_video.c
@@ -152,8 +152,7 @@
 		init_completion(&dsi_video_comp);
 
 		mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
-		pipe->ov_blt_addr = 0;
-		pipe->dma_blt_addr = 0;
+		pipe->blt_addr = 0;
 
 	} else {
 		pipe = dsi_pipe;
@@ -282,6 +281,7 @@
 
 	ret = panel_next_on(pdev);
 	if (ret == 0) {
+		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 		if (display_on != NULL) {
 			msleep(50);
 			display_on(pdev);
@@ -416,7 +416,7 @@
 	char *overlay_base;
 
 
-	if (pipe->ov_blt_addr == 0)
+	if (pipe->blt_addr == 0)
 		return;
 
 
@@ -428,7 +428,7 @@
 	off = 0;
 	if (pipe->ov_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
-	addr = pipe->ov_blt_addr + off;
+	addr = pipe->blt_addr + off;
 
 	/* overlay 0 */
 	overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
@@ -441,7 +441,7 @@
 	uint32 off, addr;
 	int bpp;
 
-	if (pipe->ov_blt_addr == 0)
+	if (pipe->blt_addr == 0)
 		return;
 
 
@@ -453,7 +453,7 @@
 	off = 0;
 	if (pipe->dmap_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
-	addr = pipe->dma_blt_addr + off;
+	addr = pipe->blt_addr + off;
 
 	/* dmap */
 	MDP_OUTP(MDP_BASE + 0x90008, addr);
@@ -516,7 +516,6 @@
 		mdp4_iommu_attach();
 		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 		MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1);
-		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 		dsi_video_enabled = 1;
 	}
@@ -530,7 +529,7 @@
 	if (pipe->flags & MDP_OV_PLAY_NOWAIT)
 		return;
 
-	if (dsi_pipe->ov_blt_addr) {
+	if (dsi_pipe->blt_addr) {
 		mdp4_overlay_dsi_video_dma_busy_wait(mfd);
 
 		mdp4_dsi_video_blt_ov_update(dsi_pipe);
@@ -573,7 +572,7 @@
 		mdp4_overlayproc_cfg(dsi_pipe);
 		mdp4_overlay_dmap_xy(dsi_pipe);
 		mdp_is_in_isr = FALSE;
-		if (dsi_pipe->ov_blt_addr) {
+		if (dsi_pipe->blt_addr) {
 			mdp4_dsi_video_blt_ov_update(dsi_pipe);
 			dsi_pipe->ov_cnt++;
 			outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
@@ -596,7 +595,7 @@
 {
 	spin_lock(&mdp_spin_lock);
 	dma->busy = FALSE;
-	if (dsi_pipe->ov_blt_addr == 0) {
+	if (dsi_pipe->blt_addr == 0) {
 		spin_unlock(&mdp_spin_lock);
 		return;
 	}
@@ -619,23 +618,21 @@
 
 	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
 
-	if (mfd->ov0_wb_buf->write_addr == 0) {
+	if (mfd->ov0_wb_buf->phys_addr == 0) {
 		pr_info("%s: no blt_base assigned\n", __func__);
 		return;
 	}
 
 	spin_lock_irqsave(&mdp_spin_lock, flag);
-	if (enable && dsi_pipe->ov_blt_addr == 0) {
-		dsi_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
-		dsi_pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
+	if (enable && dsi_pipe->blt_addr == 0) {
+		dsi_pipe->blt_addr = mfd->ov0_wb_buf->phys_addr;
 		dsi_pipe->blt_cnt = 0;
 		dsi_pipe->ov_cnt = 0;
 		dsi_pipe->dmap_cnt = 0;
 		mdp4_stat.blt_dsi_video++;
 		change++;
-	} else if (enable == 0 && dsi_pipe->ov_blt_addr) {
-		dsi_pipe->ov_blt_addr = 0;
-		dsi_pipe->dma_blt_addr = 0;
+	} else if (enable == 0 && dsi_pipe->blt_addr) {
+		dsi_pipe->blt_addr = 0;
 		change++;
 	}
 
@@ -644,8 +641,8 @@
 		return;
 	}
 
-	pr_debug("%s: enable=%d ov_blt_addr=%x\n", __func__,
-			enable, (int)dsi_pipe->ov_blt_addr);
+	pr_debug("%s: enable=%d blt_addr=%x\n", __func__,
+			enable, (int)dsi_pipe->blt_addr);
 	blt_cfg_changed = 1;
 
 	spin_unlock_irqrestore(&mdp_spin_lock, flag);
diff --git a/drivers/video/msm/mdp4_overlay_dtv.c b/drivers/video/msm/mdp4_overlay_dtv.c
index e41f9e8..e26522b 100644
--- a/drivers/video/msm/mdp4_overlay_dtv.c
+++ b/drivers/video/msm/mdp4_overlay_dtv.c
@@ -201,6 +201,7 @@
 	/* Test pattern 8 x 8 pixel */
 	/* MDP_OUTP(MDP_BASE + DTV_BASE + 0x4C, 0x80000808); */
 
+	mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 	/* MDP cmd block disable */
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
@@ -376,8 +377,7 @@
 		return -ENODEV;
 
 	mdp4_init_writeback_buf(mfd, MDP4_MIXER1);
-	dtv_pipe->ov_blt_addr = 0;
-	dtv_pipe->dma_blt_addr = 0;
+	dtv_pipe->blt_addr = 0;
 
 	return mdp4_dtv_start(mfd);
 }
@@ -408,7 +408,7 @@
 	int bpp;
 	char *overlay_base;
 
-	if (pipe->ov_blt_addr == 0)
+	if (pipe->blt_addr == 0)
 		return;
 #ifdef BLT_RGB565
 	bpp = 2; /* overlay ouput is RGB565 */
@@ -418,7 +418,7 @@
 	off = (pipe->ov_cnt & 0x01) ?
 		pipe->src_height * pipe->src_width * bpp : 0;
 
-	addr = pipe->ov_blt_addr + off;
+	addr = pipe->blt_addr + off;
 	pr_debug("%s overlay addr 0x%x\n", __func__, addr);
 	/* overlay 1 */
 	overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
@@ -431,7 +431,7 @@
 	uint32 off, addr;
 	int bpp;
 
-	if (pipe->ov_blt_addr == 0)
+	if (pipe->blt_addr == 0)
 		return;
 
 #ifdef BLT_RGB565
@@ -441,7 +441,7 @@
 #endif
 	off =  (pipe->dmae_cnt & 0x01) ?
 		pipe->src_height * pipe->src_width * bpp : 0;
-	addr = pipe->dma_blt_addr + off;
+	addr = pipe->blt_addr + off;
 	MDP_OUTP(MDP_BASE + 0xb0008, addr);
 }
 
@@ -464,7 +464,7 @@
 		return;
 	}
 
-	if (dtv_pipe->ov_blt_addr) {
+	if (dtv_pipe->blt_addr) {
 		mdp4_dtv_blt_ov_update(dtv_pipe);
 		dtv_pipe->ov_cnt++;
 		mdp4_overlay_dtv_ov_kick_start();
@@ -521,10 +521,10 @@
 	}
 
 	wait_for_completion_timeout(&dtv_pipe->comp,
-			msecs_to_jiffies(VSYNC_PERIOD*2));
+			msecs_to_jiffies(VSYNC_PERIOD * 3));
 	mdp_disable_irq(MDP_OVERLAY1_TERM);
 
-	if (dtv_pipe->ov_blt_addr)
+	if (dtv_pipe->blt_addr)
 		mdp4_overlay_dtv_wait4dmae(mfd);
 }
 
@@ -535,7 +535,6 @@
 		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 		/* enable DTV block */
 		MDP_OUTP(MDP_BASE + DTV_BASE, 1);
-		mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 		dtv_enabled = 1;
 	}
@@ -582,7 +581,7 @@
 {
 	if (!dtv_pipe)
 		return;
-	if (dtv_pipe->ov_blt_addr) {
+	if (dtv_pipe->blt_addr) {
 		mdp4_dtv_blt_dmae_update(dtv_pipe);
 		dtv_pipe->dmae_cnt++;
 	}
@@ -643,7 +642,7 @@
 	unsigned long flag;
 	int change = 0;
 
-	if (!mfd->ov1_wb_buf->write_addr) {
+	if (!mfd->ov1_wb_buf->phys_addr) {
 		pr_debug("%s: no writeback buf assigned\n", __func__);
 		return;
 	}
@@ -655,18 +654,16 @@
 	}
 
 	spin_lock_irqsave(&mdp_spin_lock, flag);
-	if (enable && dtv_pipe->ov_blt_addr == 0) {
-		dtv_pipe->ov_blt_addr = mfd->ov1_wb_buf->write_addr;
-		dtv_pipe->dma_blt_addr = mfd->ov1_wb_buf->read_addr;
+	if (enable && dtv_pipe->blt_addr == 0) {
+		dtv_pipe->blt_addr = mfd->ov1_wb_buf->phys_addr;
 		change++;
 		dtv_pipe->ov_cnt = 0;
 		dtv_pipe->dmae_cnt = 0;
-	} else if (enable == 0 && dtv_pipe->ov_blt_addr) {
-		dtv_pipe->ov_blt_addr = 0;
-		dtv_pipe->dma_blt_addr = 0;
+	} else if (enable == 0 && dtv_pipe->blt_addr) {
+		dtv_pipe->blt_addr = 0;
 		change++;
 	}
-	pr_debug("%s: ov_blt_addr=%x\n", __func__, (int)dtv_pipe->ov_blt_addr);
+	pr_debug("%s: blt_addr=%x\n", __func__, (int)dtv_pipe->blt_addr);
 	spin_unlock_irqrestore(&mdp_spin_lock, flag);
 
 	if (!change)
diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c
index 1c3bf3f..98c8191 100644
--- a/drivers/video/msm/mdp4_overlay_lcdc.c
+++ b/drivers/video/msm/mdp4_overlay_lcdc.c
@@ -133,8 +133,8 @@
 		init_completion(&lcdc_comp);
 
 		mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
-		pipe->ov_blt_addr = 0;
-		pipe->dma_blt_addr = 0;
+		pipe->blt_addr = 0;
+
 	} else {
 		pipe = lcdc_pipe;
 	}
@@ -261,6 +261,9 @@
 	mdp_histogram_ctrl_all(TRUE);
 
 	ret = panel_next_on(pdev);
+	if (ret == 0)
+		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
 	/* MDP cmd block disable */
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
@@ -322,7 +325,7 @@
 	char *overlay_base;
 
 
-	if (pipe->ov_blt_addr == 0)
+	if (pipe->blt_addr == 0)
 		return;
 
 
@@ -334,7 +337,7 @@
 	off = 0;
 	if (pipe->ov_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
-	addr = pipe->ov_blt_addr + off;
+	addr = pipe->blt_addr + off;
 
 	/* overlay 0 */
 	overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
@@ -347,7 +350,7 @@
 	uint32 off, addr;
 	int bpp;
 
-	if (pipe->ov_blt_addr == 0)
+	if (pipe->blt_addr == 0)
 		return;
 
 
@@ -359,7 +362,7 @@
 	off = 0;
 	if (pipe->dmap_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
-	addr = pipe->dma_blt_addr + off;
+	addr = pipe->blt_addr + off;
 
 	/* dmap */
 	MDP_OUTP(MDP_BASE + 0x90008, addr);
@@ -422,7 +425,6 @@
 		mdp4_iommu_attach();
 		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 		MDP_OUTP(MDP_BASE + LCDC_BASE, 1);
-		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 		lcdc_enabled = 1;
 	}
@@ -436,7 +438,7 @@
 	if (pipe->flags & MDP_OV_PLAY_NOWAIT)
 		return;
 
-	if (lcdc_pipe->ov_blt_addr) {
+	if (lcdc_pipe->blt_addr) {
 		mdp4_overlay_lcdc_dma_busy_wait(mfd);
 
 		mdp4_lcdc_blt_ov_update(lcdc_pipe);
@@ -483,7 +485,7 @@
 {
 	spin_lock(&mdp_spin_lock);
 	dma->busy = FALSE;
-	if (lcdc_pipe->ov_blt_addr == 0) {
+	if (lcdc_pipe->blt_addr == 0) {
 		spin_unlock(&mdp_spin_lock);
 		return;
 	}
@@ -498,7 +500,7 @@
 {
 	unsigned long flag;
 
-	if (lcdc_pipe->ov_blt_addr) {
+	if (lcdc_pipe->blt_addr) {
 		mdp4_overlay_lcdc_dma_busy_wait(mfd);
 
 		mdp4_lcdc_blt_ov_update(lcdc_pipe);
@@ -528,26 +530,24 @@
 
 	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
 
-	if (!mfd->ov0_wb_buf->write_addr) {
+	if (!mfd->ov0_wb_buf->phys_addr) {
 		pr_debug("%s: no blt_base assigned\n", __func__);
 		return;
 	}
 
 	spin_lock_irqsave(&mdp_spin_lock, flag);
-	if (enable && lcdc_pipe->ov_blt_addr == 0) {
-		lcdc_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
-		lcdc_pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
+	if (enable && lcdc_pipe->blt_addr == 0) {
+		lcdc_pipe->blt_addr = mfd->ov0_wb_buf->phys_addr;
 		change++;
 		lcdc_pipe->blt_cnt = 0;
 		lcdc_pipe->ov_cnt = 0;
 		lcdc_pipe->dmap_cnt = 0;
 		mdp4_stat.blt_lcdc++;
-	} else if (enable == 0 && lcdc_pipe->ov_blt_addr) {
-		lcdc_pipe->ov_blt_addr = 0;
-		lcdc_pipe->dma_blt_addr = 0;
+	} else if (enable == 0 && lcdc_pipe->blt_addr) {
+		lcdc_pipe->blt_addr = 0;
 		change++;
 	}
-	pr_info("%s: ov_blt_addr=%x\n", __func__, (int)lcdc_pipe->ov_blt_addr);
+	pr_info("%s: blt_addr=%x\n", __func__, (int)lcdc_pipe->blt_addr);
 	spin_unlock_irqrestore(&mdp_spin_lock, flag);
 
 	if (!change)
@@ -561,7 +561,7 @@
 
 	mdp4_overlayproc_cfg(lcdc_pipe);
 	mdp4_overlay_dmap_xy(lcdc_pipe);
-	if (lcdc_pipe->ov_blt_addr) {
+	if (lcdc_pipe->blt_addr) {
 		mdp4_overlay_lcdc_prefill(mfd);
 		mdp4_overlay_lcdc_prefill(mfd);
 	}
diff --git a/drivers/video/msm/mdp4_overlay_mddi.c b/drivers/video/msm/mdp4_overlay_mddi.c
index c4e6793..82864918 100644
--- a/drivers/video/msm/mdp4_overlay_mddi.c
+++ b/drivers/video/msm/mdp4_overlay_mddi.c
@@ -163,8 +163,7 @@
 
 		MDP_OUTP(MDP_BASE + 0x00098, 0x01);
 		mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
-		pipe->ov_blt_addr = 0;
-		pipe->dma_blt_addr = 0;
+		pipe->blt_addr = 0;
 	} else {
 		pipe = mddi_pipe;
 	}
@@ -255,25 +254,23 @@
 	unsigned long flag;
 
 	pr_debug("%s: blt_end=%d blt_addr=%x pid=%d\n",
-		__func__, mddi_pipe->blt_end,
-		(int)mddi_pipe->ov_blt_addr, current->pid);
+	__func__, mddi_pipe->blt_end, (int)mddi_pipe->blt_addr, current->pid);
 
 	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
 
-	if (mfd->ov0_wb_buf->write_addr == 0) {
+	if (mfd->ov0_wb_buf->phys_addr == 0) {
 		pr_info("%s: no blt_base assigned\n", __func__);
 		return -EBUSY;
 	}
 
-	if (mddi_pipe->ov_blt_addr == 0) {
+	if (mddi_pipe->blt_addr == 0) {
 		mdp4_mddi_dma_busy_wait(mfd);
 		spin_lock_irqsave(&mdp_spin_lock, flag);
 		mddi_pipe->blt_end = 0;
 		mddi_pipe->blt_cnt = 0;
 		mddi_pipe->ov_cnt = 0;
 		mddi_pipe->dmap_cnt = 0;
-		mddi_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
-		mddi_pipe->dma_blt_addr = mfd->ov0_wb_buf->write_addr;
+		mddi_pipe->blt_addr = mfd->ov0_wb_buf->phys_addr;
 		mdp4_stat.blt_mddi++;
 		spin_unlock_irqrestore(&mdp_spin_lock, flag);
 	return 0;
@@ -287,9 +284,9 @@
 	unsigned long flag;
 
 	pr_debug("%s: blt_end=%d blt_addr=%x\n",
-		 __func__, mddi_pipe->blt_end, (int)mddi_pipe->ov_blt_addr);
+		 __func__, mddi_pipe->blt_end, (int)mddi_pipe->blt_addr);
 
-	if ((mddi_pipe->blt_end == 0) && mddi_pipe->ov_blt_addr) {
+	if ((mddi_pipe->blt_end == 0) && mddi_pipe->blt_addr) {
 		spin_lock_irqsave(&mdp_spin_lock, flag);
 		mddi_pipe->blt_end = 1;	/* mark as end */
 		spin_unlock_irqrestore(&mdp_spin_lock, flag);
@@ -326,7 +323,7 @@
 	int bpp;
 	char *overlay_base;
 
-	if (pipe->ov_blt_addr == 0)
+	if (pipe->blt_addr == 0)
 		return;
 
 
@@ -339,7 +336,7 @@
 	if (pipe->dmap_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
 
-	addr = pipe->ov_blt_addr + off;
+	addr = pipe->blt_addr + off;
 
 	/* dmap */
 	MDP_OUTP(MDP_BASE + 0x90008, addr);
@@ -347,7 +344,7 @@
 	off = 0;
 	if (pipe->ov_cnt & 0x01)
 		off = pipe->src_height * pipe->src_width * bpp;
-	addr2 = pipe->ov_blt_addr + off;
+	addr2 = pipe->blt_addr + off;
 	/* overlay 0 */
 	overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
 	outpdw(overlay_base + 0x000c, addr2);
@@ -374,8 +371,7 @@
 
 		if (mddi_pipe->blt_end) {
 			mddi_pipe->blt_end = 0;
-			mddi_pipe->ov_blt_addr = 0;
-			mddi_pipe->dma_blt_addr = 0;
+			mddi_pipe->blt_addr = 0;
 			pr_debug("%s: END, ov_cnt=%d dmap_cnt=%d\n", __func__,
 				mddi_pipe->ov_cnt, mddi_pipe->dmap_cnt);
 			mdp_intr_mask &= ~INTR_DMA_P_DONE;
@@ -410,7 +406,7 @@
 {
 	int diff;
 
-	if (mddi_pipe->ov_blt_addr == 0) {
+	if (mddi_pipe->blt_addr == 0) {
 		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
 		spin_lock(&mdp_spin_lock);
 		dma->busy = FALSE;
@@ -477,7 +473,7 @@
 		mdp4_mddi_dma_busy_wait(mddi_mfd);
 		mdp4_overlay_update_lcd(mddi_mfd);
 
-		if (mddi_pipe->ov_blt_addr)
+		if (mddi_pipe->blt_addr)
 			mdp4_mddi_blt_dmap_busy_wait(mddi_mfd);
 		mdp4_mddi_overlay_kickoff(mddi_mfd, mddi_pipe);
 		mddi_mfd->dma_update_flag = 1;
@@ -543,17 +539,17 @@
 	 * to be called before kickoff.
 	 * vice versa for blt disabled.
 	 */
-	if (mddi_pipe->ov_blt_addr && mddi_pipe->blt_cnt == 0)
+	if (mddi_pipe->blt_addr && mddi_pipe->blt_cnt == 0)
 		mdp4_overlay_update_lcd(mfd); /* first time */
-	else if (mddi_pipe->ov_blt_addr == 0  && mddi_pipe->blt_cnt) {
+	else if (mddi_pipe->blt_addr == 0  && mddi_pipe->blt_cnt) {
 		mdp4_overlay_update_lcd(mfd); /* last time */
 		mddi_pipe->blt_cnt = 0;
 	}
 
 	pr_debug("%s: blt_addr=%d blt_cnt=%d\n",
-		__func__, (int)mddi_pipe->ov_blt_addr, mddi_pipe->blt_cnt);
+		__func__, (int)mddi_pipe->blt_addr, mddi_pipe->blt_cnt);
 
-	if (mddi_pipe->ov_blt_addr)
+	if (mddi_pipe->blt_addr)
 		mdp4_mddi_blt_dmap_busy_wait(mddi_mfd);
 	mdp4_mddi_overlay_kickoff(mfd, pipe);
 }
@@ -576,7 +572,7 @@
 	mdp_enable_irq(MDP_OVERLAY0_TERM);
 	spin_lock_irqsave(&mdp_spin_lock, flag);
 	mfd->dma->busy = TRUE;
-	if (mddi_pipe->ov_blt_addr)
+	if (mddi_pipe->blt_addr)
 		mfd->dma->dmap_busy = TRUE;
 	spin_unlock_irqrestore(&mdp_spin_lock, flag);
 	/* start OVERLAY pipe */
@@ -661,7 +657,7 @@
 
 	mdp_enable_irq(MDP_DMA_S_TERM);
 
-	if (mddi_pipe->ov_blt_addr == 0)
+	if (mddi_pipe->blt_addr == 0)
 		mfd->dma->busy = TRUE;
 
 	mfd->ibuf_flushed = TRUE;
@@ -692,7 +688,7 @@
 	if (mfd && mfd->panel_power_on) {
 		mdp4_mddi_dma_busy_wait(mfd);
 
-		if (mddi_pipe && mddi_pipe->ov_blt_addr)
+		if (mddi_pipe && mddi_pipe->blt_addr)
 			mdp4_mddi_blt_dmap_busy_wait(mfd);
 
 		mdp4_overlay_update_lcd(mfd);
diff --git a/drivers/video/msm/mdp4_overlay_writeback.c b/drivers/video/msm/mdp4_overlay_writeback.c
index 0174309..d59c4a8 100644
--- a/drivers/video/msm/mdp4_overlay_writeback.c
+++ b/drivers/video/msm/mdp4_overlay_writeback.c
@@ -21,7 +21,7 @@
 #include <linux/delay.h>
 #include <mach/hardware.h>
 #include <linux/io.h>
-
+#include <mach/iommu_domains.h>
 #include <asm/system.h>
 #include <asm/mach-types.h>
 #include <linux/semaphore.h>
@@ -272,11 +272,11 @@
 	}
 	mutex_unlock(&mfd->writeback_mutex);
 
-	writeback_pipe->ov_blt_addr = (ulong) (node ? node->addr : NULL);
+	writeback_pipe->blt_addr = (ulong) (node ? node->addr : NULL);
 
-	if (!writeback_pipe->ov_blt_addr) {
+	if (!writeback_pipe->blt_addr) {
 		pr_err("%s: no writeback buffer 0x%x, %p\n", __func__,
-			(unsigned int)writeback_pipe->ov_blt_addr, node);
+				(unsigned int)writeback_pipe->blt_addr, node);
 		mutex_unlock(&mfd->unregister_mutex);
 		return;
 	}
@@ -324,13 +324,13 @@
 	}
 	mutex_unlock(&mfd->writeback_mutex);
 
-	writeback_pipe->ov_blt_addr = (ulong) (node ? node->addr : NULL);
+	writeback_pipe->blt_addr = (ulong) (node ? node->addr : NULL);
 
 	mutex_lock(&mfd->dma->ov_mutex);
 	pr_debug("%s in writeback\n", __func__);
-	if (writeback_pipe && !writeback_pipe->ov_blt_addr) {
+	if (writeback_pipe && !writeback_pipe->blt_addr) {
 		pr_err("%s: no writeback buffer 0x%x\n", __func__,
-				(unsigned int)writeback_pipe->ov_blt_addr);
+				(unsigned int)writeback_pipe->blt_addr);
 		ret = mdp4_overlay_writeback_update(mfd);
 		if (ret)
 			pr_err("%s: update failed writeback pipe NULL\n",
@@ -351,7 +351,7 @@
 		}
 
 		pr_debug("%s: in writeback pan display 0x%x\n", __func__,
-				(unsigned int)writeback_pipe->ov_blt_addr);
+				(unsigned int)writeback_pipe->blt_addr);
 		mdp4_writeback_kickoff_ui(mfd, writeback_pipe);
 		mdp4_iommu_unmap(writeback_pipe);
 
@@ -407,35 +407,42 @@
 			pr_err("%s: out of memory\n", __func__);
 			goto register_alloc_fail;
 		}
-
+		temp->ihdl = NULL;
 		if (data->iova)
 			temp->addr = (void *)(data->iova + data->offset);
-#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
-		else {
+		else if (mfd->iclient) {
 			struct ion_handle *srcp_ihdl;
 			ulong len;
-			srcp_ihdl = ion_import_fd(mfd->iclient,
+			srcp_ihdl = ion_import_dma_buf(mfd->iclient,
 						  data->memory_id);
 			if (IS_ERR_OR_NULL(srcp_ihdl)) {
 				pr_err("%s: ion import fd failed\n", __func__);
 				goto register_ion_fail;
 			}
-			if (ion_phys(mfd->iclient,
-				     srcp_ihdl,
-				     (ulong *)&temp->addr,
-				     (size_t *)&len)) {
-				pr_err("%s: unable to get ion phys\n",
+
+			if (ion_map_iommu(mfd->iclient,
+					  srcp_ihdl,
+					  DISPLAY_DOMAIN,
+					  GEN_POOL,
+					  SZ_4K,
+					  0,
+					  (ulong *)&temp->addr,
+					  (ulong *)&len,
+					  0,
+					  ION_IOMMU_UNMAP_DELAYED)) {
+				ion_free(mfd->iclient, srcp_ihdl);
+				pr_err("%s: unable to get ion mapping addr\n",
 				       __func__);
 				goto register_ion_fail;
 			}
 			temp->addr += data->offset;
+			temp->ihdl = srcp_ihdl;
 		}
-#else
 		else {
 			pr_err("%s: only support ion memory\n", __func__);
 			goto register_ion_fail;
 		}
-#endif
+
 		memcpy(&temp->buf_info, data, sizeof(struct msmfb_data));
 		if (mdp4_overlay_writeback_register_buffer(mfd, temp)) {
 			pr_err("%s: error registering node\n", __func__);
@@ -514,6 +521,15 @@
 		list_del(&node->active_entry);
 		node->state = WITH_CLIENT;
 		memcpy(data, &node->buf_info, sizeof(struct msmfb_data));
+		if (!data->iova)
+			if (mfd->iclient && node->ihdl) {
+				ion_unmap_iommu(mfd->iclient,
+						node->ihdl,
+						DISPLAY_DOMAIN,
+						GEN_POOL);
+				ion_free(mfd->iclient,
+					 node->ihdl);
+			}
 	} else {
 		pr_err("node is NULL. Somebody else dequeued?\n");
 		rc = -ENOBUFS;
diff --git a/drivers/video/msm/mdp4_util.c b/drivers/video/msm/mdp4_util.c
index 208e3ce..f192b12 100644
--- a/drivers/video/msm/mdp4_util.c
+++ b/drivers/video/msm/mdp4_util.c
@@ -2559,14 +2559,13 @@
 		buf = mfd->ov1_wb_buf;
 
 	buf->ihdl = NULL;
-	buf->write_addr = 0;
-	buf->read_addr = 0;
+	buf->phys_addr = 0;
 }
 
 u32 mdp4_allocate_writeback_buf(struct msm_fb_data_type *mfd, u32 mix_num)
 {
 	struct mdp_buf_type *buf;
-	ion_phys_addr_t	addr, read_addr = 0;
+	ion_phys_addr_t	addr;
 	size_t buffer_size;
 	unsigned long len;
 
@@ -2575,7 +2574,7 @@
 	else
 		buf = mfd->ov1_wb_buf;
 
-	if (buf->write_addr || !IS_ERR_OR_NULL(buf->ihdl))
+	if (buf->phys_addr || !IS_ERR_OR_NULL(buf->ihdl))
 		return 0;
 
 	if (!buf->size) {
@@ -2592,25 +2591,10 @@
 		buf->ihdl = ion_alloc(mfd->iclient, buffer_size, SZ_4K,
 			mfd->mem_hid);
 		if (!IS_ERR_OR_NULL(buf->ihdl)) {
-			if (mfd->mem_hid & ION_SECURE) {
-				if (ion_phys(mfd->iclient, buf->ihdl,
-					&addr, (unsigned *)&len)) {
-					pr_err("%s:%d: ion_phys map failed\n",
-						 __func__, __LINE__);
-					return -ENOMEM;
-				}
-			} else {
-				if (ion_map_iommu(mfd->iclient, buf->ihdl,
-					DISPLAY_WRITE_DOMAIN, GEN_POOL, SZ_4K,
-					0, &addr, &len, 0, 0)) {
-					pr_err("ion_map_iommu() write failed\n");
-					return -ENOMEM;
-				}
-			}
 			if (ion_map_iommu(mfd->iclient, buf->ihdl,
-				DISPLAY_READ_DOMAIN, GEN_POOL, SZ_4K,
-				0, &read_addr, &len, 0, 0)) {
-				pr_err("ion_map_iommu() read failed\n");
+				DISPLAY_DOMAIN, GEN_POOL, SZ_4K, 0, &addr,
+				&len, 0, 0)) {
+				pr_err("ion_map_iommu() failed\n");
 				return -ENOMEM;
 			}
 		} else {
@@ -2625,13 +2609,7 @@
 	if (addr) {
 		pr_info("allocating %d bytes at %x for mdp writeback\n",
 			buffer_size, (u32) addr);
-		buf->write_addr = addr;
-
-		if (read_addr)
-			buf->read_addr = read_addr;
-		else
-			buf->read_addr = buf->write_addr;
-
+		buf->phys_addr = addr;
 		return 0;
 	} else {
 		pr_err("%s cannot allocate memory for mdp writeback!\n",
@@ -2651,25 +2629,21 @@
 
 	if (!IS_ERR_OR_NULL(mfd->iclient)) {
 		if (!IS_ERR_OR_NULL(buf->ihdl)) {
-			if (!(mfd->mem_hid & ION_SECURE))
-				ion_unmap_iommu(mfd->iclient, buf->ihdl,
-					DISPLAY_WRITE_DOMAIN, GEN_POOL);
 			ion_unmap_iommu(mfd->iclient, buf->ihdl,
-				DISPLAY_READ_DOMAIN, GEN_POOL);
+				DISPLAY_DOMAIN, GEN_POOL);
 			ion_free(mfd->iclient, buf->ihdl);
 			pr_debug("%s:%d free writeback imem\n", __func__,
 				__LINE__);
 			buf->ihdl = NULL;
 		}
 	} else {
-		if (buf->write_addr) {
-			free_contiguous_memory_by_paddr(buf->write_addr);
+		if (buf->phys_addr) {
+			free_contiguous_memory_by_paddr(buf->phys_addr);
 			pr_debug("%s:%d free writeback pmem\n", __func__,
 				__LINE__);
 		}
 	}
-	buf->write_addr = 0;
-	buf->read_addr = 0;
+	buf->phys_addr = 0;
 }
 
 static int mdp4_update_pcc_regs(uint32_t offset,
diff --git a/drivers/video/msm/mdp_ppp.c b/drivers/video/msm/mdp_ppp.c
index 4009ffc..8999395 100644
--- a/drivers/video/msm/mdp_ppp.c
+++ b/drivers/video/msm/mdp_ppp.c
@@ -61,6 +61,7 @@
 
 extern uint32 mdp_plv[];
 extern struct semaphore mdp_ppp_mutex;
+static struct ion_client *ppp_display_iclient;
 
 int mdp_get_bytes_per_pixel(uint32_t format,
 				 struct msm_fb_data_type *mfd)
@@ -1276,41 +1277,69 @@
 	return kgsl_gem_obj_addr(img->memory_id, (int) img->priv, start, len);
 }
 
-int get_img(struct mdp_img *img, struct fb_info *info, unsigned long *start,
-	    unsigned long *len, struct file **pp_file)
+int get_img(struct mdp_img *img, struct mdp_blit_req *req,
+		struct fb_info *info, unsigned long *start, unsigned long *len,
+		struct file **srcp_file, struct ion_handle **srcp_ihdl)
 {
-	int put_needed, ret = 0;
+	int put_needed, fb_num, ret = 0;
 	struct file *file;
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+#endif
 #ifdef CONFIG_ANDROID_PMEM
 	unsigned long vstart;
 #endif
 
-#ifdef CONFIG_ANDROID_PMEM
-	if (!get_pmem_file(img->memory_id, start, &vstart, len, pp_file))
-		return 0;
-#endif
-	file = fget_light(img->memory_id, &put_needed);
-	if (file == NULL)
-		return -1;
+	if (req->flags & MDP_MEMORY_ID_TYPE_FB) {
+		file = fget_light(img->memory_id, &put_needed);
+		if (file == NULL)
+			return -EINVAL;
 
-	if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
-		*start = info->fix.smem_start;
-		*len = info->fix.smem_len;
-		*pp_file = file;
-	} else {
-		ret = -1;
-		fput_light(file, put_needed);
+		if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
+			fb_num = MINOR(file->f_dentry->d_inode->i_rdev);
+			if (get_fb_phys_info(start, len, fb_num,
+				DISPLAY_SUBSYSTEM_ID)) {
+				pr_err("get_fb_phys_info() failed\n");
+				fput_light(file, put_needed);
+			} else {
+				*srcp_file = file;
+			}
+
+			return ret;
+		} else {
+			fput_light(file, put_needed);
+		}
 	}
-	return ret;
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+		*srcp_ihdl = ion_import_dma_buf(mfd->iclient, img->memory_id);
+		if (IS_ERR_OR_NULL(*srcp_ihdl))
+			return PTR_ERR(*srcp_ihdl);
+
+		if (!ion_phys(mfd->iclient, *srcp_ihdl, start, (size_t *) len))
+			return ret;
+		 else
+			return -EINVAL;
+#endif
+
+#ifdef CONFIG_ANDROID_PMEM
+	if (!get_pmem_file(img->memory_id, start, &vstart, len, srcp_file))
+		return ret;
+	else
+		return -EINVAL;
+#endif
 }
 
-
-void put_img(struct file *p_src_file)
+void put_img(struct file *p_src_file, struct ion_handle *p_ihdl)
 {
 #ifdef CONFIG_ANDROID_PMEM
 	if (p_src_file)
 		put_pmem_file(p_src_file);
 #endif
+
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+	if (!IS_ERR_OR_NULL(p_ihdl))
+		ion_free(ppp_display_iclient, p_ihdl);
+#endif
 }
 
 
@@ -1318,7 +1347,8 @@
 	unsigned long srcp0_start, unsigned long srcp0_len,
 	unsigned long srcp1_start, unsigned long srcp1_len,
 	unsigned long dst_start, unsigned long dst_len,
-	struct file *p_src_file, struct file *p_dst_file)
+	struct file *p_src_file, struct file *p_dst_file,
+	struct ion_handle **src_ihdl, struct ion_handle **dst_ihdl)
 {
 	MDPIBUF iBuf;
 	u32 dst_width, dst_height;
@@ -1330,9 +1360,9 @@
 		req->src.format = mfd->fb_imgType;
 
 	if (mdp_ppp_verify_req(req)) {
-		printk(KERN_ERR "mdp_ppp: invalid image!\n");
-		put_img(p_src_file);
-		put_img(p_dst_file);
+		pr_err("mdp_ppp: invalid image!\n");
+		put_img(p_src_file, *src_ihdl);
+		put_img(p_dst_file, *dst_ihdl);
 		return -1;
 	}
 
@@ -1402,8 +1432,8 @@
 #if defined(CONFIG_FB_MSM_MDP31) || defined(CONFIG_FB_MSM_MDP303)
 		iBuf.mdpImg.mdpOp |= MDPOP_FG_PM_ALPHA;
 #else
-		put_img(p_src_file);
-		put_img(p_dst_file);
+		put_img(p_src_file, *src_ihdl);
+		put_img(p_dst_file, *dst_ihdl);
 		return -EINVAL;
 #endif
 	}
@@ -1413,8 +1443,8 @@
 		if ((req->src.format != MDP_Y_CBCR_H2V2) &&
 			(req->src.format != MDP_Y_CRCB_H2V2)) {
 #endif
-			put_img(p_src_file);
-			put_img(p_dst_file);
+			put_img(p_src_file, *src_ihdl);
+			put_img(p_dst_file, *dst_ihdl);
 			return -EINVAL;
 #ifdef CONFIG_FB_MSM_MDP31
 		}
@@ -1453,16 +1483,16 @@
 			printk(KERN_ERR
 				"%s: sharpening strength out of range\n",
 				__func__);
-			put_img(p_src_file);
-			put_img(p_dst_file);
+			put_img(p_src_file, *src_ihdl);
+			put_img(p_dst_file, *dst_ihdl);
 			return -EINVAL;
 		}
 
 		iBuf.mdpImg.mdpOp |= MDPOP_ASCALE | MDPOP_SHARPENING;
 		iBuf.mdpImg.sp_value = req->sharpening_strength & 0xff;
 #else
-		put_img(p_src_file);
-		put_img(p_dst_file);
+		put_img(p_src_file, *src_ihdl);
+		put_img(p_dst_file, *dst_ihdl);
 		return -EINVAL;
 #endif
 	}
@@ -1567,8 +1597,8 @@
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 	up(&mdp_ppp_mutex);
 
-	put_img(p_src_file);
-	put_img(p_dst_file);
+	put_img(p_src_file, *src_ihdl);
+	put_img(p_dst_file, *dst_ihdl);
 	return 0;
 }
 
@@ -1578,29 +1608,36 @@
 	unsigned long src_len = 0;
 	unsigned long dst_len = 0;
 	struct file *p_src_file = 0 , *p_dst_file = 0;
+	struct ion_handle *src_ihdl = NULL;
+	struct ion_handle *dst_ihdl = NULL;
+	struct msm_fb_data_type *mfd = info->par;
+	ppp_display_iclient = mfd->iclient;
 
 	if (req->flags & MDP_BLIT_SRC_GEM)
 		get_gem_img(&req->src, &src_start, &src_len);
 	else
-		get_img(&req->src, info, &src_start, &src_len, &p_src_file);
+		get_img(&req->src, req, info, &src_start, &src_len, &p_src_file,
+			&src_ihdl);
 	if (src_len == 0) {
-		printk(KERN_ERR "mdp_ppp: could not retrieve image from "
+		pr_err("mdp_ppp: could not retrieve source image from "
 		       "memory\n");
 		return -EINVAL;
 	}
 	if (req->flags & MDP_BLIT_DST_GEM)
 		get_gem_img(&req->dst, &dst_start, &dst_len);
 	else
-		get_img(&req->dst, info, &dst_start, &dst_len, &p_dst_file);
+		get_img(&req->dst, req, info, &dst_start, &dst_len, &p_dst_file,
+			&dst_ihdl);
+
 	if (dst_len == 0) {
-		put_img(p_src_file);
-		printk(KERN_ERR "mdp_ppp: could not retrieve image from "
+		put_img(p_src_file, src_ihdl);
+		pr_err("mdp_ppp: could not retrieve destination image from "
 		       "memory\n");
 		return -EINVAL;
 	}
 
 	return mdp_ppp_blit_addr(info, req, src_start, src_len, 0, 0, dst_start,
-		dst_len, p_src_file, p_dst_file);
+		dst_len, p_src_file, p_dst_file, &src_ihdl, &dst_ihdl);
 }
 
 static struct mdp_blit_req overlay_req;
@@ -1677,7 +1714,8 @@
 
 	ret = mdp_ppp_blit_addr(info, &overlay_req,
 		srcp0_addr, srcp0_size, srcp1_addr, srcp1_size,
-		info->fix.smem_start, info->fix.smem_len, NULL, NULL);
+		info->fix.smem_start, info->fix.smem_len, NULL, NULL,
+		NULL, NULL);
 
 	if (ret)
 		pr_err("%s:Blitting overlay failed(%d)\n", __func__, ret);
diff --git a/drivers/video/msm/mdss/Makefile b/drivers/video/msm/mdss/Makefile
index 780e0c6..492437e 100644
--- a/drivers/video/msm/mdss/Makefile
+++ b/drivers/video/msm/mdss/Makefile
@@ -4,6 +4,7 @@
 mdss-mdp-objs += mdss_mdp_intf_writeback.o
 mdss-mdp-objs += mdss_mdp_rotator.o
 mdss-mdp-objs += mdss_mdp_overlay.o
+mdss-mdp-objs += mdss_mdp_wb.o
 obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp.o
 obj-$(CONFIG_FB_MSM_MDSS) += mdss_fb.o
 obj-$(CONFIG_FB_MSM_MDSS_WRITEBACK) += mdss_wb.o
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index 0fedb6c..a96bf3a 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -1158,6 +1158,20 @@
 	return ret;
 }
 
+struct fb_info *msm_fb_get_writeback_fb(void)
+{
+	int c = 0;
+	for (c = 0; c < fbi_list_index; ++c) {
+		struct msm_fb_data_type *mfd;
+		mfd = (struct msm_fb_data_type *)fbi_list[c]->par;
+		if (mfd->panel.type == WRITEBACK_PANEL)
+			return fbi_list[c];
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(msm_fb_get_writeback_fb);
+
 int mdss_register_panel(struct mdss_panel_data *pdata)
 {
 	struct platform_device *mdss_fb_dev = NULL;
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index a3f0dbe..ac6c213 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -55,6 +55,7 @@
 
 	int op_enable;
 	u32 fb_imgType;
+	u32 dst_format;
 
 	int hw_refresh;
 
@@ -90,6 +91,7 @@
 	struct ion_client *iclient;
 
 	struct mdss_mdp_ctl *ctl;
+	struct mdss_mdp_wb *wb;
 };
 
 int mdss_fb_get_phys_info(unsigned long *start, unsigned long *len, int fb_num);
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index 41e0c18..46e49da 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -554,10 +554,10 @@
 	}
 	disable_irq(mdss_res->irq);
 
-	mdss_res->fs = regulator_get(NULL, "gdsc_mdss");
+	mdss_res->fs = regulator_get(&pdev->dev, "vdd");
 	if (IS_ERR_OR_NULL(mdss_res->fs)) {
 		mdss_res->fs = NULL;
-		pr_err("unable to get gdsc_mdss regulator\n");
+		pr_err("unable to get gdsc regulator\n");
 		goto error;
 	}
 	regulator_enable(mdss_res->fs);
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 2cdd9f6..4489fbb 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -247,6 +247,12 @@
 	unsigned long smp[MAX_PLANES];
 };
 
+struct mdss_mdp_writeback_arg {
+	struct mdss_mdp_data *data;
+	void (*callback_fnc) (void *arg);
+	void *priv_data;
+};
+
 static inline void mdss_mdp_ctl_write(struct mdss_mdp_ctl *ctl,
 				      u32 reg, u32 val)
 {
@@ -313,4 +319,7 @@
 int mdss_mdp_get_img(struct ion_client *iclient, struct msmfb_data *img,
 		     struct mdss_mdp_img_data *data);
 
+int mdss_mdp_wb_kickoff(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_wb_ioctl_handler(struct msm_fb_data_type *mfd, u32 cmd, void *arg);
+
 #endif /* MDSS_MDP_H */
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
index c1bc58a..af422b7 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
@@ -39,9 +39,12 @@
 	u16 height;
 	u8 rot90;
 
-	struct completion comp;
+	int initialized;
+
 	struct mdss_mdp_plane_sizes dst_planes;
-	struct mdss_mdp_data wb_data;
+
+	void (*callback_fnc) (void *arg);
+	void *callback_arg;
 };
 
 static struct mdss_mdp_writeback_ctx wb_ctx_list[MDSS_MDP_MAX_WRITEBACK] = {
@@ -72,8 +75,6 @@
 	},
 };
 
-static void *videomemory;
-
 static int mdss_mdp_writeback_addr_setup(struct mdss_mdp_writeback_ctx *ctx,
 					 struct mdss_mdp_data *data)
 {
@@ -101,7 +102,8 @@
 {
 	struct mdss_mdp_format_params *fmt;
 	u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
-	int off, ret;
+	int off;
+	u32 opmode = ctx->opmode;
 
 	pr_debug("wb_num=%d format=%d\n", ctx->wb_num, ctx->format);
 
@@ -111,11 +113,30 @@
 	fmt = mdss_mdp_get_format_params(ctx->format);
 	if (!fmt) {
 		pr_err("wb format=%d not supported\n", ctx->format);
-		return ret;
+		return -EINVAL;
 	}
 
 	chroma_samp = fmt->chroma_sample;
-	if (ctx->rot90) {
+
+	if (ctx->type != MDSS_MDP_WRITEBACK_TYPE_ROTATOR && fmt->is_yuv) {
+		mdss_mdp_csc_setup(MDSS_MDP_BLOCK_WB, ctx->wb_num, 0,
+				   MDSS_MDP_CSC_RGB2YUV);
+		opmode |= (1 << 8) |	/* CSC_EN */
+			  (0 << 9) |	/* SRC_DATA=RGB */
+			  (1 << 10);	/* DST_DATA=YCBCR */
+
+		switch (chroma_samp) {
+		case MDSS_MDP_CHROMA_RGB:
+		case MDSS_MDP_CHROMA_420:
+		case MDSS_MDP_CHROMA_H2V1:
+			opmode |= (chroma_samp << 11);
+			break;
+		case MDSS_MDP_CHROMA_H1V2:
+		default:
+			pr_err("unsupported wb chroma samp=%d\n", chroma_samp);
+			return -EINVAL;
+		}
+	} else if (ctx->rot90) {
 		if (chroma_samp == MDSS_MDP_CHROMA_H2V1)
 			chroma_samp = MDSS_MDP_CHROMA_H1V2;
 		else if (chroma_samp == MDSS_MDP_CHROMA_H1V2)
@@ -147,7 +168,7 @@
 
 	off = MDSS_MDP_REG_WB_OFFSET(ctx->wb_num);
 	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_FORMAT, dst_format);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_OP_MODE, ctx->opmode);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_OP_MODE, opmode);
 	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_PACK_PATTERN, pattern);
 	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_YSTRIDE0, ystride0);
 	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_YSTRIDE1, ystride1);
@@ -156,32 +177,24 @@
 	return 0;
 }
 
-static int mdss_mdp_writeback_wfd_setup(struct mdss_mdp_ctl *ctl,
-					struct mdss_mdp_writeback_ctx *ctx)
+static int mdss_mdp_writeback_prepare_wfd(struct mdss_mdp_ctl *ctl, void *arg)
 {
-	struct msm_fb_data_type *mfd;
-	struct fb_info *fbi;
+	struct mdss_mdp_writeback_ctx *ctx;
 	int ret;
-	u32 plane_size;
 
-	mfd = ctl->mfd;
-	fbi = mfd->fbi;
+	ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+	if (!ctx)
+		return -ENODEV;
 
-	pr_debug("setup ctl=%d\n", ctl->num);
+	if (ctx->initialized) /* already set */
+		return 0;
+
+	pr_debug("wfd setup ctl=%d\n", ctl->num);
 
 	ctx->opmode = 0;
 	ctx->format = ctl->dst_format;
-	ctx->width = fbi->var.xres;
-	ctx->height = fbi->var.yres;
-
-	plane_size = ctx->width * ctx->height * fbi->var.bits_per_pixel / 8;
-
-	videomemory = (void *) fbi->fix.smem_start + fbi->fix.smem_len -
-		      plane_size;
-
-	ctx->wb_data.num_planes = 1;
-	ctx->wb_data.p[0].addr = (u32) videomemory;
-	ctx->wb_data.p[0].len = plane_size;
+	ctx->width = ctl->width;
+	ctx->height = ctl->height;
 
 	ret = mdss_mdp_writeback_format_setup(ctx);
 	if (ret) {
@@ -189,16 +202,30 @@
 		return ret;
 	}
 
-	ctl->flush_bits |=  BIT(16); /* WB */
+	ctx->initialized = true;
 
 	return 0;
 }
 
-static int mdss_mdp_writeback_rot_setup(struct mdss_mdp_ctl *ctl,
-					struct mdss_mdp_writeback_ctx *ctx,
-					struct mdss_mdp_rotator_session *rot)
+static int mdss_mdp_writeback_prepare_rot(struct mdss_mdp_ctl *ctl, void *arg)
 {
-	pr_debug("rotator wb_num=%d\n", ctx->wb_num);
+	struct mdss_mdp_writeback_ctx *ctx;
+	struct mdss_mdp_writeback_arg *wb_args;
+	struct mdss_mdp_rotator_session *rot;
+
+	ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+	if (!ctx)
+		return -ENODEV;
+	wb_args = (struct mdss_mdp_writeback_arg *) arg;
+	if (!wb_args)
+		return -ENOENT;
+
+	rot = (struct mdss_mdp_rotator_session *) wb_args->priv_data;
+	if (!rot) {
+		pr_err("unable to retrieve rot session ctl=%d\n", ctl->num);
+		return -ENODEV;
+	}
+	pr_debug("rot setup wb_num=%d\n", ctx->wb_num);
 
 	ctx->opmode = BIT(6); /* ROT EN */
 	if (ROT_BLK_SIZE == 128)
@@ -238,27 +265,6 @@
 	return 0;
 }
 
-static int mdss_mdp_writeback_prepare(struct mdss_mdp_ctl *ctl, void *arg)
-{
-	struct mdss_mdp_writeback_ctx *ctx;
-	ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
-	if (!ctx)
-		return -ENODEV;
-
-	if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR) {
-		struct mdss_mdp_rotator_session *rot;
-		rot = (struct mdss_mdp_rotator_session *) arg;
-		if (!rot) {
-			pr_err("unable to retrieve rot session ctl=%d\n",
-			       ctl->num);
-			return -ENODEV;
-		}
-		mdss_mdp_writeback_rot_setup(ctl, ctx, rot);
-	}
-
-	return 0;
-}
-
 static void mdss_mdp_writeback_intr_done(void *arg)
 {
 	struct mdss_mdp_writeback_ctx *ctx;
@@ -274,14 +280,14 @@
 	mdss_mdp_irq_disable_nosync(ctx->intr_type, ctx->intf_num);
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, true);
 
-	complete_all(&ctx->comp);
+	if (ctx->callback_fnc)
+		ctx->callback_fnc(ctx->callback_arg);
 }
 
 static int mdss_mdp_writeback_display(struct mdss_mdp_ctl *ctl, void *arg)
 {
 	struct mdss_mdp_writeback_ctx *ctx;
-	struct mdss_mdp_rotator_session *rot = NULL;
-	struct mdss_mdp_data *wb_data;
+	struct mdss_mdp_writeback_arg *wb_args;
 	u32 flush_bits;
 	int ret;
 
@@ -289,28 +295,22 @@
 	if (!ctx)
 		return -ENODEV;
 
-	if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR) {
-		rot = (struct mdss_mdp_rotator_session *) arg;
-		if (!rot) {
-			pr_err("unable to retrieve rot session ctl=%d\n",
-			       ctl->num);
-			return -ENODEV;
-		}
-		wb_data = rot->dst_data;
-	} else {
-		wb_data = &ctx->wb_data;
-	}
+	wb_args = (struct mdss_mdp_writeback_arg *) arg;
+	if (!wb_args)
+		return -ENOENT;
 
-	ret = mdss_mdp_writeback_addr_setup(ctx, wb_data);
+	ret = mdss_mdp_writeback_addr_setup(ctx, wb_args->data);
 	if (ret) {
 		pr_err("writeback data setup error ctl=%d\n", ctl->num);
 		return ret;
 	}
 
+	ctx->callback_fnc = wb_args->callback_fnc;
+	ctx->callback_arg = wb_args->priv_data;
+
 	flush_bits = BIT(16); /* WB */
 	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
 
-	INIT_COMPLETION(ctx->comp);
 	mdss_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
 				   mdss_mdp_writeback_intr_done, ctx);
 	mdss_mdp_irq_enable(ctx->intr_type, ctx->intf_num);
@@ -319,17 +319,6 @@
 	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
 	wmb();
 
-	if (rot) {
-		pr_debug("rotator kickoff wb_num=%d\n", ctx->wb_num);
-		mutex_lock(&rot->lock);
-		rot->comp = &ctx->comp;
-		rot->busy = 1;
-		mutex_unlock(&rot->lock);
-	} else {
-		pr_debug("writeback kickoff wb_num=%d\n", ctx->wb_num);
-		wait_for_completion_interruptible(&ctx->comp);
-	}
-
 	return 0;
 }
 
@@ -355,16 +344,12 @@
 	}
 	ctl->priv_data = ctx;
 	ctx->wb_num = ctl->num;	/* wb num should match ctl num */
+	ctx->initialized = false;
 
-	init_completion(&ctx->comp);
-
-	if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_WFD)
-		ret = mdss_mdp_writeback_wfd_setup(ctl, ctx);
-	else if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR)
-		ctl->prepare_fnc = mdss_mdp_writeback_prepare;
-	else /* line mode not supported */
-		return -ENOSYS;
-
+	if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR)
+		ctl->prepare_fnc = mdss_mdp_writeback_prepare_rot;
+	else /* wfd or line mode */
+		ctl->prepare_fnc = mdss_mdp_writeback_prepare_wfd;
 	ctl->stop_fnc = mdss_mdp_writeback_stop;
 	ctl->display_fnc = mdss_mdp_writeback_display;
 
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index f1b158d..43ddb5e 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -793,6 +793,11 @@
 			ret = 0;
 		}
 		break;
+
+	default:
+		if (mfd->panel_info.type == WRITEBACK_PANEL)
+			ret = mdss_mdp_wb_ioctl_handler(mfd, cmd, argp);
+		break;
 	}
 
 	return ret;
@@ -809,7 +814,11 @@
 	mfd->cursor_update = mdss_mdp_hw_cursor_update;
 	mfd->dma_fnc = mdss_mdp_overlay_pan_display;
 	mfd->ioctl_handler = mdss_mdp_overlay_ioctl_handler;
-	mfd->kickoff_fnc = mdss_mdp_overlay_kickoff;
+
+	if (mfd->panel_info.type == WRITEBACK_PANEL)
+		mfd->kickoff_fnc = mdss_mdp_wb_kickoff;
+	else
+		mfd->kickoff_fnc = mdss_mdp_overlay_kickoff;
 
 	return 0;
 }
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
index 628b7f5..fc3a843 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -39,6 +39,7 @@
 			rot->ref_cnt++;
 			rot->session_id = i | MDSS_MDP_ROT_SESSION_MASK;
 			mutex_init(&rot->lock);
+			init_completion(&rot->comp);
 			break;
 		}
 	}
@@ -65,19 +66,6 @@
 	return NULL;
 }
 
-static int mdss_mdp_rotator_busy_wait(struct mdss_mdp_rotator_session *rot)
-{
-	mutex_lock(&rot->lock);
-	if (rot->busy) {
-		pr_debug("waiting for rot=%d to complete\n", rot->pipe->num);
-		wait_for_completion_interruptible(rot->comp);
-		rot->busy = 0;
-	}
-	mutex_unlock(&rot->lock);
-
-	return 0;
-}
-
 static struct mdss_mdp_pipe *mdss_mdp_rotator_pipe_alloc(void)
 {
 	struct mdss_mdp_mixer *mixer;
@@ -110,6 +98,52 @@
 	return pipe;
 }
 
+static int mdss_mdp_rotator_busy_wait(struct mdss_mdp_rotator_session *rot)
+{
+	mutex_lock(&rot->lock);
+	if (rot->busy) {
+		pr_debug("waiting for rot=%d to complete\n", rot->pipe->num);
+		wait_for_completion_interruptible(&rot->comp);
+		rot->busy = false;
+
+	}
+	mutex_unlock(&rot->lock);
+
+	return 0;
+}
+
+static void mdss_mdp_rotator_callback(void *arg)
+{
+	struct mdss_mdp_rotator_session *rot;
+
+	rot = (struct mdss_mdp_rotator_session *) arg;
+	if (rot)
+		complete(&rot->comp);
+}
+
+static int mdss_mdp_rotator_kickoff(struct mdss_mdp_ctl *ctl,
+				    struct mdss_mdp_rotator_session *rot,
+				    struct mdss_mdp_data *dst_data)
+{
+	int ret;
+	struct mdss_mdp_writeback_arg wb_args = {
+		.callback_fnc = mdss_mdp_rotator_callback,
+		.data = dst_data,
+		.priv_data = rot,
+	};
+
+	mutex_lock(&rot->lock);
+	INIT_COMPLETION(rot->comp);
+	rot->busy = true;
+	ret = mdss_mdp_display_commit(ctl, &wb_args);
+	if (ret) {
+		rot->busy = false;
+		pr_err("problem with kickoff rot pipe=%d", rot->pipe->num);
+	}
+	mutex_unlock(&rot->lock);
+	return ret;
+}
+
 static int mdss_mdp_rotator_pipe_dequeue(struct mdss_mdp_rotator_session *rot)
 {
 	if (rot->pipe) {
@@ -182,15 +216,13 @@
 		rot_pipe->params_changed++;
 	}
 
-	rot->dst_data = dst_data;
-
 	ret = mdss_mdp_pipe_queue_data(rot->pipe, src_data);
 	if (ret) {
 		pr_err("unable to queue rot data\n");
 		goto done;
 	}
 
-	ret = mdss_mdp_display_commit(ctl, rot);
+	ret = mdss_mdp_rotator_kickoff(ctl, rot, dst_data);
 
 done:
 	mutex_unlock(&rotator_lock);
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.h b/drivers/video/msm/mdss/mdss_mdp_rotator.h
index 8940c46..1e4b81e0 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.h
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.h
@@ -33,12 +33,11 @@
 
 	u32 bwc_mode;
 	struct mdss_mdp_pipe *pipe;
-	struct mdss_mdp_data *dst_data;
 
 	struct mutex lock;
+	struct completion comp;
 	u8 busy;
 	u8 no_wait;
-	struct completion *comp;
 
 	struct list_head head;
 };
diff --git a/drivers/video/msm/mdss/mdss_mdp_util.c b/drivers/video/msm/mdss/mdss_mdp_util.c
index 2e86806..f6b4fce 100644
--- a/drivers/video/msm/mdss/mdss_mdp_util.c
+++ b/drivers/video/msm/mdss/mdss_mdp_util.c
@@ -326,7 +326,7 @@
 		}
 	} else if (iclient) {
 		data->iclient = iclient;
-		data->srcp_ihdl = ion_import_fd(iclient, img->memory_id);
+		data->srcp_ihdl = ion_import_dma_buf(iclient, img->memory_id);
 		if (IS_ERR_OR_NULL(data->srcp_ihdl))
 			return PTR_ERR(data->srcp_ihdl);
 		ret = ion_phys(iclient, data->srcp_ihdl,
diff --git a/drivers/video/msm/mdss/mdss_mdp_wb.c b/drivers/video/msm/mdss/mdss_mdp_wb.c
new file mode 100644
index 0000000..da55edc
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_mdp_wb.c
@@ -0,0 +1,539 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include "mdss_mdp.h"
+#include "mdss_fb.h"
+
+#define DEBUG_WRITEBACK
+
+enum mdss_mdp_wb_state {
+	WB_OPEN,
+	WB_START,
+	WB_STOPING,
+	WB_STOP
+};
+
+struct mdss_mdp_wb {
+	u32 fb_ndx;
+	struct mutex lock;
+	struct list_head busy_queue;
+	struct list_head free_queue;
+	struct list_head register_queue;
+	wait_queue_head_t wait_q;
+	u32 state;
+};
+
+enum mdss_mdp_wb_node_state {
+	REGISTERED,
+	IN_FREE_QUEUE,
+	IN_BUSY_QUEUE,
+	WITH_CLIENT
+};
+
+struct mdss_mdp_wb_data {
+	struct list_head registered_entry;
+	struct list_head active_entry;
+	struct msmfb_data buf_info;
+	struct mdss_mdp_data buf_data;
+	int state;
+};
+
+static DEFINE_MUTEX(mdss_mdp_wb_buf_lock);
+static struct mdss_mdp_wb mdss_mdp_wb_info;
+
+#ifdef DEBUG_WRITEBACK
+/* for debugging: writeback output buffer to framebuffer memory */
+static inline
+struct mdss_mdp_data *mdss_mdp_wb_debug_buffer(struct msm_fb_data_type *mfd)
+{
+	static void *videomemory;
+	static void *mdss_wb_mem;
+	static struct mdss_mdp_data buffer = {
+		.num_planes = 1,
+	};
+
+	struct fb_info *fbi;
+	int img_size;
+	int offset;
+
+
+	fbi = mfd->fbi;
+	img_size = fbi->var.xres * fbi->var.yres * fbi->var.bits_per_pixel / 8;
+	offset = fbi->fix.smem_len - img_size;
+
+	videomemory = fbi->screen_base + offset;
+	mdss_wb_mem = (void *)(fbi->fix.smem_start + offset);
+
+	buffer.p[0].addr = fbi->fix.smem_start + offset;
+	buffer.p[0].len = img_size;
+
+	return &buffer;
+}
+#else
+static inline
+struct mdss_mdp_data *mdss_mdp_wb_debug_buffer(struct msm_fb_data_type *mfd)
+{
+	return NULL;
+}
+#endif
+
+static int mdss_mdp_wb_init(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_wb *wb;
+
+	mutex_lock(&mdss_mdp_wb_buf_lock);
+	wb = mfd->wb;
+	if (wb == NULL) {
+		wb = &mdss_mdp_wb_info;
+		wb->fb_ndx = mfd->index;
+		mfd->wb = wb;
+	} else if (mfd->index != wb->fb_ndx) {
+		pr_err("only one writeback intf supported at a time\n");
+		return -EMLINK;
+	} else {
+		pr_debug("writeback already initialized\n");
+	}
+
+	pr_debug("init writeback on fb%d\n", wb->fb_ndx);
+
+	mutex_init(&wb->lock);
+	INIT_LIST_HEAD(&wb->free_queue);
+	INIT_LIST_HEAD(&wb->busy_queue);
+	INIT_LIST_HEAD(&wb->register_queue);
+	wb->state = WB_OPEN;
+	init_waitqueue_head(&wb->wait_q);
+
+	mfd->wb = wb;
+	mutex_unlock(&mdss_mdp_wb_buf_lock);
+	return 0;
+}
+
+static int mdss_mdp_wb_terminate(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_wb *wb = mfd->wb;
+
+	if (!wb) {
+		pr_err("unable to terminate, writeback is not initialized\n");
+		return -ENODEV;
+	}
+
+	pr_debug("terminate writeback\n");
+
+	mutex_lock(&mdss_mdp_wb_buf_lock);
+	mutex_lock(&wb->lock);
+	if (!list_empty(&wb->register_queue)) {
+		struct mdss_mdp_wb_data *node, *temp;
+		list_for_each_entry_safe(node, temp, &wb->register_queue,
+					 registered_entry) {
+			list_del(&node->registered_entry);
+			kfree(node);
+		}
+	}
+	mutex_unlock(&wb->lock);
+
+	mfd->wb = NULL;
+	mutex_unlock(&mdss_mdp_wb_buf_lock);
+
+	return 0;
+}
+
+static int mdss_mdp_wb_start(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_wb *wb = mfd->wb;
+
+	if (!wb) {
+		pr_err("unable to start, writeback is not initialized\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&wb->lock);
+	wb->state = WB_START;
+	mutex_unlock(&wb->lock);
+	wake_up(&wb->wait_q);
+
+	return 0;
+}
+
+static int mdss_mdp_wb_stop(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_wb *wb = mfd->wb;
+
+	if (!wb) {
+		pr_err("unable to stop, writeback is not initialized\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&wb->lock);
+	wb->state = WB_STOPING;
+	mutex_unlock(&wb->lock);
+	wake_up(&wb->wait_q);
+
+	return 0;
+}
+
+static int mdss_mdp_wb_register_node(struct mdss_mdp_wb *wb,
+				     struct mdss_mdp_wb_data *node)
+{
+	node->state = REGISTERED;
+	list_add_tail(&node->registered_entry, &wb->register_queue);
+	if (!node) {
+		pr_err("Invalid wb node\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct mdss_mdp_wb_data *get_local_node(struct mdss_mdp_wb *wb,
+					       struct msmfb_data *data) {
+	struct mdss_mdp_wb_data *node;
+	struct mdss_mdp_img_data *buf;
+	int ret;
+
+	if (!data->iova)
+		return NULL;
+
+	if (!list_empty(&wb->register_queue)) {
+		list_for_each_entry(node, &wb->register_queue, registered_entry)
+		if (node->buf_info.iova == data->iova) {
+			pr_debug("found node iova=%x addr=%x\n",
+				 data->iova, node->buf_data.p[0].addr);
+			return node;
+		}
+	}
+
+	node = kzalloc(sizeof(struct mdss_mdp_wb_data), GFP_KERNEL);
+	if (node == NULL) {
+		pr_err("out of memory\n");
+		return NULL;
+	}
+
+	node->buf_data.num_planes = 1;
+	buf = &node->buf_data.p[0];
+	buf->addr = (u32) (data->iova + data->offset);
+	buf->len = UINT_MAX; /* trusted source */
+	ret = mdss_mdp_wb_register_node(wb, node);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("error registering wb node\n");
+		kfree(node);
+		return NULL;
+	}
+
+	pr_debug("register node iova=0x%x addr=0x%x\n", data->iova, buf->addr);
+
+	return node;
+}
+
+static struct mdss_mdp_wb_data *get_user_node(struct msm_fb_data_type *mfd,
+					      struct msmfb_data *data) {
+	struct mdss_mdp_wb *wb = mfd->wb;
+	struct mdss_mdp_wb_data *node;
+	struct mdss_mdp_img_data *buf;
+	int ret;
+
+	node = kzalloc(sizeof(struct mdss_mdp_wb_data), GFP_KERNEL);
+	if (node == NULL) {
+		pr_err("out of memory\n");
+		return NULL;
+	}
+
+	node->buf_data.num_planes = 1;
+	buf = &node->buf_data.p[0];
+	ret = mdss_mdp_get_img(mfd->iclient, data, buf);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("error getting buffer info\n");
+		goto register_fail;
+	}
+	memcpy(&node->buf_info, data, sizeof(*data));
+
+	ret = mdss_mdp_wb_register_node(wb, node);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("error registering wb node\n");
+		goto register_fail;
+	}
+
+	pr_debug("register node mem_id=%d offset=%u addr=0x%x len=%d\n",
+		 data->memory_id, data->offset, buf->addr, buf->len);
+
+	return node;
+
+register_fail:
+	kfree(node);
+	return NULL;
+}
+
+static int mdss_mdp_wb_queue(struct msm_fb_data_type *mfd,
+			     struct msmfb_data *data, int local)
+{
+	struct mdss_mdp_wb *wb = mfd->wb;
+	struct mdss_mdp_wb_data *node = NULL;
+	int ret = 0;
+
+	if (!wb) {
+		pr_err("unable to queue, writeback is not initialized\n");
+		return -ENODEV;
+	}
+
+	pr_debug("fb%d queue\n", wb->fb_ndx);
+
+	mutex_lock(&wb->lock);
+	if (local)
+		node = get_local_node(wb, data);
+	if (node == NULL)
+		node = get_user_node(mfd, data);
+
+	if (!node || node->state == IN_BUSY_QUEUE ||
+	    node->state == IN_FREE_QUEUE) {
+		pr_err("memory not registered or Buffer already with us\n");
+		ret = -EINVAL;
+	} else {
+		list_add_tail(&node->active_entry, &wb->free_queue);
+		node->state = IN_FREE_QUEUE;
+	}
+	mutex_unlock(&wb->lock);
+
+	return ret;
+}
+
+static int is_buffer_ready(struct mdss_mdp_wb *wb)
+{
+	int rc;
+	mutex_lock(&wb->lock);
+	rc = !list_empty(&wb->busy_queue) || (wb->state == WB_STOPING);
+	mutex_unlock(&wb->lock);
+
+	return rc;
+}
+
+static int mdss_mdp_wb_dequeue(struct msm_fb_data_type *mfd,
+			       struct msmfb_data *data)
+{
+	struct mdss_mdp_wb *wb = mfd->wb;
+	struct mdss_mdp_wb_data *node = NULL;
+	int ret;
+
+	if (!wb) {
+		pr_err("unable to dequeue, writeback is not initialized\n");
+		return -ENODEV;
+	}
+
+	ret = wait_event_interruptible(wb->wait_q, is_buffer_ready(wb));
+	if (ret) {
+		pr_err("failed to get dequeued buffer\n");
+		return -ENOBUFS;
+	}
+
+	mutex_lock(&wb->lock);
+	if (wb->state == WB_STOPING) {
+		pr_debug("wfd stopped\n");
+		wb->state = WB_STOP;
+		ret = -ENOBUFS;
+	} else if (!list_empty(&wb->busy_queue)) {
+		struct mdss_mdp_img_data *buf;
+		node = list_first_entry(&wb->busy_queue,
+					struct mdss_mdp_wb_data,
+					active_entry);
+		list_del(&node->active_entry);
+		node->state = WITH_CLIENT;
+		memcpy(data, &node->buf_info, sizeof(*data));
+
+		buf = &node->buf_data.p[0];
+		pr_debug("found node addr=%x len=%d\n", buf->addr, buf->len);
+	} else {
+		pr_debug("node is NULL, wait for next\n");
+		ret = -ENOBUFS;
+	}
+	mutex_unlock(&wb->lock);
+	return 0;
+}
+
+static void mdss_mdp_wb_callback(void *arg)
+{
+	if (arg)
+		complete((struct completion *) arg);
+}
+
+int mdss_mdp_wb_kickoff(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_wb *wb;
+	struct mdss_mdp_wb_data *node = NULL;
+	int ret = 0;
+	DECLARE_COMPLETION_ONSTACK(comp);
+	struct mdss_mdp_writeback_arg wb_args = {
+		.callback_fnc = mdss_mdp_wb_callback,
+		.priv_data = &comp,
+	};
+
+	if (!ctl || !ctl->mfd)
+		return -ENODEV;
+
+	mutex_lock(&mdss_mdp_wb_buf_lock);
+	wb = ctl->mfd->wb;
+	if (wb) {
+		mutex_lock(&wb->lock);
+		if (!list_empty(&wb->free_queue) && wb->state != WB_STOPING &&
+		    wb->state != WB_STOP) {
+			node = list_first_entry(&wb->free_queue,
+						struct mdss_mdp_wb_data,
+						active_entry);
+			list_del(&node->active_entry);
+			node->state = IN_BUSY_QUEUE;
+			wb_args.data = &node->buf_data;
+		} else {
+			pr_debug("unable to get buf wb state=%d\n", wb->state);
+		}
+		mutex_unlock(&wb->lock);
+	}
+
+	if (wb_args.data == NULL)
+		wb_args.data = mdss_mdp_wb_debug_buffer(ctl->mfd);
+
+	if (wb_args.data == NULL) {
+		pr_err("unable to get writeback buf ctl=%d\n", ctl->num);
+		ret = -ENOMEM;
+		goto kickoff_fail;
+	}
+
+	ret = mdss_mdp_display_commit(ctl, &wb_args);
+	if (ret) {
+		pr_err("error on commit ctl=%d\n", ctl->num);
+		goto kickoff_fail;
+	}
+
+	wait_for_completion_interruptible(&comp);
+	if (wb && node) {
+		mutex_lock(&wb->lock);
+		list_add_tail(&node->active_entry, &wb->busy_queue);
+		mutex_unlock(&wb->lock);
+		wake_up(&wb->wait_q);
+	}
+
+kickoff_fail:
+	mutex_unlock(&mdss_mdp_wb_buf_lock);
+	return ret;
+}
+
+int mdss_mdp_wb_ioctl_handler(struct msm_fb_data_type *mfd, u32 cmd, void *arg)
+{
+	struct msmfb_data data;
+	int ret = -ENOSYS;
+
+	switch (cmd) {
+	case MSMFB_WRITEBACK_INIT:
+		ret = mdss_mdp_wb_init(mfd);
+		break;
+	case MSMFB_WRITEBACK_START:
+		ret = mdss_mdp_wb_start(mfd);
+		break;
+	case MSMFB_WRITEBACK_STOP:
+		ret = mdss_mdp_wb_stop(mfd);
+		break;
+	case MSMFB_WRITEBACK_QUEUE_BUFFER:
+		if (!copy_from_user(&data, arg, sizeof(data))) {
+			ret = mdss_mdp_wb_queue(mfd, arg, false);
+		} else {
+			pr_err("wb queue buf failed on copy_from_user\n");
+			ret = -EFAULT;
+		}
+		break;
+	case MSMFB_WRITEBACK_DEQUEUE_BUFFER:
+		if (!copy_from_user(&data, arg, sizeof(data))) {
+			ret = mdss_mdp_wb_dequeue(mfd, arg);
+		} else {
+			pr_err("wb dequeue buf failed on copy_from_user\n");
+			ret = -EFAULT;
+		}
+		break;
+	case MSMFB_WRITEBACK_TERMINATE:
+		ret = mdss_mdp_wb_terminate(mfd);
+		break;
+	}
+
+	return ret;
+}
+
+int msm_fb_writeback_start(struct fb_info *info)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+	if (!mfd)
+		return -ENODEV;
+
+	return mdss_mdp_wb_start(mfd);
+}
+EXPORT_SYMBOL(msm_fb_writeback_start);
+
+int msm_fb_writeback_queue_buffer(struct fb_info *info,
+				  struct msmfb_data *data)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+	if (!mfd)
+		return -ENODEV;
+
+	return mdss_mdp_wb_queue(mfd, data, true);
+}
+EXPORT_SYMBOL(msm_fb_writeback_queue_buffer);
+
+int msm_fb_writeback_dequeue_buffer(struct fb_info *info,
+				    struct msmfb_data *data)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+	if (!mfd)
+		return -ENODEV;
+
+	return mdss_mdp_wb_dequeue(mfd, data);
+}
+EXPORT_SYMBOL(msm_fb_writeback_dequeue_buffer);
+
+int msm_fb_writeback_stop(struct fb_info *info)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+	if (!mfd)
+		return -ENODEV;
+
+	return mdss_mdp_wb_stop(mfd);
+}
+EXPORT_SYMBOL(msm_fb_writeback_stop);
+
+int msm_fb_writeback_init(struct fb_info *info)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+	if (!mfd)
+		return -ENODEV;
+
+	return mdss_mdp_wb_init(mfd);
+}
+EXPORT_SYMBOL(msm_fb_writeback_init);
+
+int msm_fb_writeback_terminate(struct fb_info *info)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+	if (!mfd)
+		return -ENODEV;
+
+	return mdss_mdp_wb_terminate(mfd);
+}
+EXPORT_SYMBOL(msm_fb_writeback_terminate);
diff --git a/drivers/video/msm/mipi_toshiba_video_wsvga_pt.c b/drivers/video/msm/mipi_toshiba_video_wsvga_pt.c
index 6edd776..eb2946b 100644
--- a/drivers/video/msm/mipi_toshiba_video_wsvga_pt.c
+++ b/drivers/video/msm/mipi_toshiba_video_wsvga_pt.c
@@ -22,14 +22,14 @@
     /* regulator */
 	{0x09, 0x08, 0x05, 0x00, 0x20},
 	/* timing */
-	{0xab, 0x8a, 0x18, 0x00, 0x92, 0x97, 0x1b, 0x8c,
-	0x0c, 0x03, 0x04, 0xa0},
+	{0x73, 0x2e, 0x11, 0x00, 0x3c, 0x46, 0x14, 0x31,
+	0x1c, 0x03, 0x04, 0xa0},
     /* phy ctrl */
 	{0x5f, 0x00, 0x00, 0x10},
     /* strength */
 	{0xff, 0x00, 0x06, 0x00},
 	/* pll control */
-	{0x0, 0x7f, 0x31, 0xda, 0x00, 0x50, 0x48, 0x63,
+	{0x0, 0x49, 0x30, 0xc4, 0x00, 0x20, 0x07, 0x62,
 	0x41, 0x0f, 0x01,
 	0x00, 0x14, 0x03, 0x00, 0x02, 0x00, 0x20, 0x00, 0x01 },
 };
@@ -59,8 +59,8 @@
 	pinfo.lcdc.h_back_porch = 16;
 	pinfo.lcdc.h_front_porch = 23;
 	pinfo.lcdc.h_pulse_width = 8;
-	pinfo.lcdc.v_back_porch = 2;
-	pinfo.lcdc.v_front_porch = 7;
+	pinfo.lcdc.v_back_porch = 3;
+	pinfo.lcdc.v_front_porch = 45;
 	pinfo.lcdc.v_pulse_width = 2;
 	pinfo.lcdc.border_clr = 0;	/* blk */
 	pinfo.lcdc.underflow_clr = 0xff;	/* blue */
@@ -68,7 +68,6 @@
 	pinfo.bl_max = MIPI_TOSHIBA_PWM_LEVEL;
 	pinfo.bl_min = 1;
 	pinfo.fb_num = 2;
-	pinfo.clk_rate = 384000000;
 
 	pinfo.mipi.mode = DSI_VIDEO_MODE;
 	pinfo.mipi.pulse_mode_hsa_he = FALSE;
@@ -84,8 +83,8 @@
 	pinfo.mipi.data_lane0 = TRUE;
 	pinfo.mipi.data_lane1 = TRUE;
 	pinfo.mipi.data_lane2 = TRUE;
-	pinfo.mipi.t_clk_post = 0x20;
-	pinfo.mipi.t_clk_pre = 0x2d;
+	pinfo.mipi.t_clk_post = 0x04;
+	pinfo.mipi.t_clk_pre = 0x1a;
 	pinfo.mipi.esc_byte_ratio = 4;
 	pinfo.mipi.stream = 0; /* dma_p */
 	pinfo.mipi.mdp_trigger = 0;
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 18ee3e6..81a6e50 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -1326,7 +1326,7 @@
 	fbi->fix.smem_start = (unsigned long)fbram_phys;
 
 	msm_iommu_map_contig_buffer(fbi->fix.smem_start,
-					DISPLAY_READ_DOMAIN,
+					DISPLAY_DOMAIN,
 					GEN_POOL,
 					fbi->fix.smem_len,
 					SZ_4K,
@@ -1334,7 +1334,7 @@
 					&(mfd->display_iova));
 
 	msm_iommu_map_contig_buffer(fbi->fix.smem_start,
-					ROTATOR_SRC_DOMAIN,
+					ROTATOR_DOMAIN,
 					GEN_POOL,
 					fbi->fix.smem_len,
 					SZ_4K,
diff --git a/drivers/video/msm/msm_fb.h b/drivers/video/msm/msm_fb.h
index c9eb7dd..0658365 100644
--- a/drivers/video/msm/msm_fb.h
+++ b/drivers/video/msm/msm_fb.h
@@ -60,6 +60,7 @@
 	struct list_head registered_entry;
 	struct list_head active_entry;
 	void *addr;
+	struct ion_handle *ihdl;
 	struct file *pmem_file;
 	struct msmfb_data buf_info;
 	struct msmfb_img img;
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
index 6fd5656..d7ebd54 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
@@ -168,6 +168,28 @@
 			} else if (operation == DDL_DPB_OP_MARK_FREE) {
 				dpb_mask->client_mask |= (0x1 << loopc);
 				*found_frame = *in_out_frame;
+				if ((decoder->meta_data_enable_flag) &&
+				    (in_out_frame->vcd_frm.buff_ion_handle)) {
+					struct ddl_context *ddl_context =
+						ddl_get_context();
+					unsigned long *vaddr =
+						(unsigned long *)((u32)
+						in_out_frame->vcd_frm.virtual +
+						decoder->meta_data_offset);
+					DDL_MSG_LOW("%s: Cache clean: vaddr"\
+						" (%p), offset %u, size %u",
+						__func__,
+						in_out_frame->vcd_frm.virtual,
+						decoder->meta_data_offset,
+						decoder->suffix);
+					msm_ion_do_cache_op(
+						ddl_context->video_ion_client,
+						in_out_frame->vcd_frm.\
+						buff_ion_handle,
+						vaddr,
+						(unsigned long)decoder->suffix,
+						ION_IOC_CLEAN_CACHES);
+				}
 			}
 		} else {
 			in_out_frame->vcd_frm.physical = NULL;
diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
index a5171f0..291de5f 100644
--- a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
+++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
@@ -25,7 +25,6 @@
 #include "vidc.h"
 #include "vcd_res_tracker.h"
 
-#define PIL_FW_BASE_ADDR 0x9fe00000
 #define PIL_FW_SIZE 0x200000
 
 static unsigned int vidc_clk_table[4] = {
@@ -181,6 +180,7 @@
 {
 	u32 alloc_size;
 	struct ddl_context *ddl_context;
+	unsigned long fw_addr;
 	int rc = 0;
 	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
 	if (!addr) {
@@ -216,8 +216,9 @@
 				goto bail_out;
 			}
 		} else {
+			fw_addr = resource_context.vidc_platform_data->fw_addr;
 			addr->alloc_handle = NULL;
-			addr->alloced_phys_addr = PIL_FW_BASE_ADDR;
+			addr->alloced_phys_addr = fw_addr;
 			addr->buffer_size = sz;
 		}
 	} else {
@@ -966,6 +967,10 @@
 		}
 		msm_ion_secure_heap(ION_HEAP(resource_context.memtype));
 		msm_ion_secure_heap(ION_HEAP(resource_context.cmd_mem_type));
+
+		if (resource_context.vidc_platform_data->secure_wb_heap)
+			msm_ion_secure_heap(ION_HEAP(ION_CP_WB_HEAP_ID));
+
 		res_trk_disable_iommu_clocks();
 		mutex_unlock(&resource_context.secure_lock);
 	}
@@ -988,6 +993,10 @@
 		}
 		msm_ion_unsecure_heap(ION_HEAP(resource_context.cmd_mem_type));
 		msm_ion_unsecure_heap(ION_HEAP(resource_context.memtype));
+
+		if (resource_context.vidc_platform_data->secure_wb_heap)
+			msm_ion_unsecure_heap(ION_HEAP(ION_CP_WB_HEAP_ID));
+
 		res_trk_disable_iommu_clocks();
 		mutex_unlock(&resource_context.secure_lock);
 	}
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_errors.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_errors.c
index ac5bce9..91136f3 100644
--- a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_errors.c
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_errors.c
@@ -391,8 +391,10 @@
 		ddl->decoding &&
 		!ddl->codec_data.decoder.header_in_start &&
 		!ddl->codec_data.decoder.dec_disp_info.img_size_x &&
-		!ddl->codec_data.decoder.dec_disp_info.img_size_y
-		) {
+		!ddl->codec_data.decoder.dec_disp_info.img_size_y &&
+		!eos) {
+		DBG("Treat header in start error %u as success",
+			vcd_status);
 		/* this is first frame seq. header only case */
 		vcd_status = VCD_S_SUCCESS;
 		ddl->input_frame.vcd_frm.flags |=
@@ -426,9 +428,10 @@
 	}
 
 	/* if it is decoder EOS case */
-	if (ddl->decoding && eos)
+	if (ddl->decoding && eos) {
+		DBG("DEC-EOS_RUN");
 		ddl_decode_eos_run(ddl);
-	else
+	} else
 		DDL_IDLE(ddl_context);
 
 	return true;
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_interrupt_handler.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_interrupt_handler.c
index 5fa9b09..fe71dc1 100644
--- a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_interrupt_handler.c
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_interrupt_handler.c
@@ -290,8 +290,6 @@
 			decoder->client_output_buf_req.actual_count
 			&& decoder->progressive_only)
 			need_reconfig = false;
-		if (input_vcd_frm->flags & VCD_FRAME_FLAG_EOS)
-			need_reconfig = false;
 		if ((input_vcd_frm->data_len <= seq_hdr_info.dec_frm_size ||
 			 (input_vcd_frm->flags & VCD_FRAME_FLAG_CODECCONFIG)) &&
 			(!need_reconfig ||
diff --git a/drivers/video/msm/vidc/common/dec/vdec.c b/drivers/video/msm/vidc/common/dec/vdec.c
index 3076aa1..634011b 100644
--- a/drivers/video/msm/vidc/common/dec/vdec.c
+++ b/drivers/video/msm/vidc/common/dec/vdec.c
@@ -348,10 +348,15 @@
 				pmem_fd, kernel_vaddr, buffer_index,
 				&buff_handle);
 		if (ion_flag == CACHED && buff_handle) {
+			DBG("%s: Cache invalidate: vaddr (%p), "\
+				"size %u\n", __func__,
+				(void *)kernel_vaddr,
+				vcd_frame_data->alloc_len);
 			msm_ion_do_cache_op(client_ctx->user_ion_client,
 					buff_handle,
 					(unsigned long *) kernel_vaddr,
-					(unsigned long)vcd_frame_data->data_len,
+					(unsigned long)vcd_frame_data->\
+					alloc_len,
 					ION_IOC_INV_CACHES);
 		}
 	}
@@ -886,7 +891,7 @@
 		vcd_h264_mv_buffer->client_data = (void *) mapped_buffer;
 		vcd_h264_mv_buffer->dev_addr = (u8 *)mapped_buffer->iova[0];
 	} else {
-		client_ctx->h264_mv_ion_handle = ion_import_fd(
+		client_ctx->h264_mv_ion_handle = ion_import_dma_buf(
 					client_ctx->user_ion_client,
 					vcd_h264_mv_buffer->pmem_fd);
 		if (IS_ERR_OR_NULL(client_ctx->h264_mv_ion_handle)) {
@@ -1785,7 +1790,7 @@
 			}
 			put_pmem_file(pmem_file);
 		} else {
-			client_ctx->seq_hdr_ion_handle = ion_import_fd(
+			client_ctx->seq_hdr_ion_handle = ion_import_dma_buf(
 				client_ctx->user_ion_client,
 				seq_header.pmem_fd);
 			if (!client_ctx->seq_hdr_ion_handle) {
diff --git a/drivers/video/msm/vidc/common/enc/venc_internal.c b/drivers/video/msm/vidc/common/enc/venc_internal.c
index 43e8d5e..9450ee7 100644
--- a/drivers/video/msm/vidc/common/enc/venc_internal.c
+++ b/drivers/video/msm/vidc/common/enc/venc_internal.c
@@ -1821,7 +1821,7 @@
 			control->client_data = (void *) mapped_buffer;
 			control->dev_addr = (u8 *)mapped_buffer->iova[0];
 	} else {
-		client_ctx->recon_buffer_ion_handle[i] = ion_import_fd(
+		client_ctx->recon_buffer_ion_handle[i] = ion_import_dma_buf(
 				client_ctx->user_ion_client, control->pmem_fd);
 		if (IS_ERR_OR_NULL(client_ctx->recon_buffer_ion_handle[i])) {
 			ERR("%s(): get_ION_handle failed\n", __func__);
diff --git a/drivers/video/msm/vidc/common/init/vidc_init.c b/drivers/video/msm/vidc/common/init/vidc_init.c
index 23c990a..dcacb3c 100644
--- a/drivers/video/msm/vidc/common/init/vidc_init.c
+++ b/drivers/video/msm/vidc/common/init/vidc_init.c
@@ -627,7 +627,7 @@
 			buf_addr_table[*num_of_buffers].dev_addr =
 				mapped_buffer->iova[0];
 		} else {
-			buff_ion_handle = ion_import_fd(
+			buff_ion_handle = ion_import_dma_buf(
 				client_ctx->user_ion_client, pmem_fd);
 			if (IS_ERR_OR_NULL(buff_ion_handle)) {
 				ERR("%s(): get_ION_handle failed\n",
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_core.h b/drivers/video/msm/vidc/common/vcd/vcd_core.h
index 5351589..79bcac0 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_core.h
+++ b/drivers/video/msm/vidc/common/vcd/vcd_core.h
@@ -37,6 +37,7 @@
 
 #define VCD_TIMESTAMP_RESOLUTION             1000000
 #define VCD_DEC_INITIAL_FRAME_RATE           30
+#define VCD_MAXPERF_FPS_THRESHOLD_X_1000     (59*1000)
 
 #define VCD_FIRST_IP_RCVD                    0x00000004
 #define VCD_FIRST_OP_RCVD                    0x00000008
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_power_sm.c b/drivers/video/msm/vidc/common/vcd/vcd_power_sm.c
index 33b2300..44d270a 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_power_sm.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_power_sm.c
@@ -341,6 +341,18 @@
 	u32 new_perf_lvl;
 	new_perf_lvl = frm_p_units *\
 		(fps->fps_numerator / fps->fps_denominator);
+
+	if ((fps->fps_numerator * 1000) / fps->fps_denominator
+		 > VCD_MAXPERF_FPS_THRESHOLD_X_1000) {
+		u32 max_perf_level = 0;
+		if (res_trk_get_max_perf_level(&max_perf_level)) {
+			new_perf_lvl = max_perf_level;
+			VCD_MSG_HIGH("Using max perf level(%d) for >60fps\n",
+						 new_perf_lvl);
+		} else {
+			VCD_MSG_ERROR("Failed to get max perf level\n");
+		}
+	}
 	if (cctxt->status.req_perf_lvl) {
 		dev_ctxt->reqd_perf_lvl =
 		    dev_ctxt->reqd_perf_lvl - cctxt->reqd_perf_lvl +
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
index f79a147..5b64f20 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_sub.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
@@ -2045,10 +2045,12 @@
 		transc->in_use = true;
 		if ((codec_config &&
 			 (status != VCD_ERR_BITSTREAM_ERR)) ||
-			((status == VCD_ERR_BITSTREAM_ERR) &&
+			(codec_config && (status == VCD_ERR_BITSTREAM_ERR) &&
 			 !(cctxt->status.mask & VCD_FIRST_IP_DONE) &&
-			 (core_type == VCD_CORE_720P)))
+			(core_type == VCD_CORE_720P))) {
+			VCD_MSG_HIGH("handle EOS for codec config");
 			vcd_handle_eos_done(cctxt, transc, VCD_S_SUCCESS);
+		}
 	}
 	return rc;
 }
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
new file mode 100644
index 0000000..c544356
--- /dev/null
+++ b/include/asm-generic/dma-contiguous.h
@@ -0,0 +1,28 @@
+#ifndef ASM_DMA_CONTIGUOUS_H
+#define ASM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_CMA
+
+#include <linux/device.h>
+#include <linux/dma-contiguous.h>
+
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+	if (dev && dev->cma_area)
+		return dev->cma_area;
+	return dma_contiguous_default_area;
+}
+
+static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
+{
+	if (dev)
+		dev->cma_area = cma;
+	if (!dev || !dma_contiguous_default_area)
+		dma_contiguous_default_area = cma;
+}
+
+#endif
+#endif
+
+#endif
diff --git a/include/linux/android_pmem.h b/include/linux/android_pmem.h
index ab96379..cfca491 100644
--- a/include/linux/android_pmem.h
+++ b/include/linux/android_pmem.h
@@ -108,26 +108,6 @@
 	PMEM_ALLOCATORTYPE_MAX,
 };
 
-#define PMEM_MEMTYPE_MASK 0x7
-#define PMEM_INVALID_MEMTYPE 0x0
-#define PMEM_MEMTYPE_EBI1 0x1
-#define PMEM_MEMTYPE_SMI  0x2
-#define PMEM_MEMTYPE_RESERVED_INVALID2 0x3
-#define PMEM_MEMTYPE_RESERVED_INVALID3 0x4
-#define PMEM_MEMTYPE_RESERVED_INVALID4 0x5
-#define PMEM_MEMTYPE_RESERVED_INVALID5 0x6
-#define PMEM_MEMTYPE_RESERVED_INVALID6 0x7
-
-#define PMEM_ALIGNMENT_MASK 0x18
-#define PMEM_ALIGNMENT_RESERVED_INVALID1 0x0
-#define PMEM_ALIGNMENT_4K 0x8 /* the default */
-#define PMEM_ALIGNMENT_1M 0x10
-#define PMEM_ALIGNMENT_RESERVED_INVALID2 0x18
-
-/* flags in the following function defined as above. */
-int32_t pmem_kalloc(const size_t size, const uint32_t flags);
-int32_t pmem_kfree(const int32_t physaddr);
-
 /* kernel api names for board specific data structures */
 #define PMEM_KERNEL_EBI1_DATA_NAME "pmem_kernel_ebi1"
 #define PMEM_KERNEL_SMI_DATA_NAME "pmem_kernel_smi"
diff --git a/arch/arm/mach-msm/include/mach/stm.h b/include/linux/coresight-stm.h
similarity index 100%
rename from arch/arm/mach-msm/include/mach/stm.h
rename to include/linux/coresight-stm.h
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
new file mode 100644
index 0000000..f03a493
--- /dev/null
+++ b/include/linux/coresight.h
@@ -0,0 +1,132 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_CORESIGHT_H
+#define _LINUX_CORESIGHT_H
+
+#include <linux/device.h>
+
+/* Peripheral id registers (0xFD0-0xFEC) */
+#define CORESIGHT_PERIPHIDR4	(0xFD0)
+#define CORESIGHT_PERIPHIDR5	(0xFD4)
+#define CORESIGHT_PERIPHIDR6	(0xFD8)
+#define CORESIGHT_PERIPHIDR7	(0xFDC)
+#define CORESIGHT_PERIPHIDR0	(0xFE0)
+#define CORESIGHT_PERIPHIDR1	(0xFE4)
+#define CORESIGHT_PERIPHIDR2	(0xFE8)
+#define CORESIGHT_PERIPHIDR3	(0xFEC)
+/* Component id registers (0xFF0-0xFFC) */
+#define CORESIGHT_COMPIDR0	(0xFF0)
+#define CORESIGHT_COMPIDR1	(0xFF4)
+#define CORESIGHT_COMPIDR2	(0xFF8)
+#define CORESIGHT_COMPIDR3	(0xFFC)
+
+/* DBGv7 with baseline CP14 registers implemented */
+#define ARM_DEBUG_ARCH_V7B	(0x3)
+/* DBGv7 with all CP14 registers implemented */
+#define ARM_DEBUG_ARCH_V7	(0x4)
+#define ARM_DEBUG_ARCH_V7_1	(0x5)
+#define ETM_ARCH_V3_3		(0x23)
+#define PFT_ARCH_V1_1		(0x31)
+
+enum coresight_clk_rate {
+	CORESIGHT_CLK_RATE_OFF,
+	CORESIGHT_CLK_RATE_TRACE,
+	CORESIGHT_CLK_RATE_HSTRACE,
+};
+
+enum coresight_dev_type {
+	CORESIGHT_DEV_TYPE_SINK,
+	CORESIGHT_DEV_TYPE_LINK,
+	CORESIGHT_DEV_TYPE_SOURCE,
+	CORESIGHT_DEV_TYPE_MAX,
+};
+
+struct coresight_connection {
+	int child_id;
+	int child_port;
+	struct coresight_device *child_dev;
+	struct list_head link;
+};
+
+struct coresight_device {
+	int id;
+	struct coresight_connection *conns;
+	int nr_conns;
+	const struct coresight_ops *ops;
+	struct device dev;
+	struct mutex mutex;
+	int *refcnt;
+	struct list_head link;
+	struct module *owner;
+	bool enable;
+};
+
+#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
+
+struct coresight_ops {
+	int (*enable)(struct coresight_device *csdev, int port);
+	void (*disable)(struct coresight_device *csdev, int port);
+};
+
+struct coresight_platform_data {
+	int id;
+	const char *name;
+	int nr_ports;
+	int *child_ids;
+	int *child_ports;
+	int nr_children;
+};
+
+struct coresight_desc {
+	enum coresight_dev_type type;
+	const struct coresight_ops *ops;
+	struct coresight_platform_data *pdata;
+	struct device *dev;
+	const struct attribute_group **groups;
+	struct module *owner;
+};
+
+struct qdss_source {
+	struct list_head link;
+	const char *name;
+	uint32_t fport_mask;
+};
+
+struct msm_qdss_platform_data {
+	struct qdss_source *src_table;
+	size_t size;
+	uint8_t afamily;
+};
+
+
+extern struct coresight_device *
+coresight_register(struct coresight_desc *desc);
+extern void coresight_unregister(struct coresight_device *csdev);
+extern int coresight_enable(struct coresight_device *csdev, int port);
+extern void coresight_disable(struct coresight_device *csdev, int port);
+
+#ifdef CONFIG_MSM_QDSS
+extern struct qdss_source *qdss_get(const char *name);
+extern void qdss_put(struct qdss_source *src);
+extern int qdss_enable(struct qdss_source *src);
+extern void qdss_disable(struct qdss_source *src);
+extern void qdss_disable_sink(void);
+#else
+static inline struct qdss_source *qdss_get(const char *name) { return NULL; }
+static inline void qdss_put(struct qdss_source *src) {}
+static inline int qdss_enable(struct qdss_source *src) { return -ENOSYS; }
+static inline void qdss_disable(struct qdss_source *src) {}
+static inline void qdss_disable_sink(void) {}
+#endif
+
+#endif
diff --git a/include/linux/device.h b/include/linux/device.h
index 84be123..9fca83b 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -666,6 +666,10 @@
 
 	struct dma_coherent_mem	*dma_mem; /* internal for coherent mem
 					     override */
+#ifdef CONFIG_CMA
+	struct cma *cma_area;		/* contiguous memory area for dma
+					   allocations */
+#endif
 	/* arch specific additions */
 	struct dev_archdata	archdata;
 
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 537960b..4ff1147 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -108,10 +108,10 @@
 /* This needs to be modified manually now, when we add
  a new RANGE of SSIDs to the msg_mask_tbl */
 #define MSG_MASK_TBL_CNT		23
-#define EVENT_LAST_ID			0x083F
+#define EVENT_LAST_ID			0x08AD
 
 #define MSG_SSID_0			0
-#define MSG_SSID_0_LAST			90
+#define MSG_SSID_0_LAST			91
 #define MSG_SSID_1			500
 #define MSG_SSID_1_LAST			506
 #define MSG_SSID_2			1000
@@ -125,7 +125,7 @@
 #define MSG_SSID_6			4500
 #define MSG_SSID_6_LAST			4526
 #define MSG_SSID_7			4600
-#define MSG_SSID_7_LAST			4612
+#define MSG_SSID_7_LAST			4613
 #define MSG_SSID_8			5000
 #define MSG_SSID_8_LAST			5029
 #define MSG_SSID_9			5500
@@ -271,6 +271,7 @@
 	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
 	MSG_LVL_LOW,
 	MSG_LVL_MED,
+	MSG_LVL_LOW,
 	MSG_LVL_LOW
 };
 
@@ -383,6 +384,7 @@
 	MSG_LVL_MED,
 	MSG_LVL_MED,
 	MSG_LVL_MED,
+	MSG_LVL_LOW,
 	MSG_LVL_LOW
 };
 
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
new file mode 100644
index 0000000..2f303e4
--- /dev/null
+++ b/include/linux/dma-contiguous.h
@@ -0,0 +1,110 @@
+#ifndef __LINUX_CMA_H
+#define __LINUX_CMA_H
+
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ *	Marek Szyprowski <m.szyprowski@samsung.com>
+ *	Michal Nazarewicz <mina86@mina86.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+/*
+ * Contiguous Memory Allocator
+ *
+ *   The Contiguous Memory Allocator (CMA) makes it possible to
+ *   allocate big contiguous chunks of memory after the system has
+ *   booted.
+ *
+ * Why is it needed?
+ *
+ *   Various devices on embedded systems have no scatter-getter and/or
+ *   IO map support and require contiguous blocks of memory to
+ *   operate.  They include devices such as cameras, hardware video
+ *   coders, etc.
+ *
+ *   Such devices often require big memory buffers (a full HD frame
+ *   is, for instance, more then 2 mega pixels large, i.e. more than 6
+ *   MB of memory), which makes mechanisms such as kmalloc() or
+ *   alloc_page() ineffective.
+ *
+ *   At the same time, a solution where a big memory region is
+ *   reserved for a device is suboptimal since often more memory is
+ *   reserved then strictly required and, moreover, the memory is
+ *   inaccessible to page system even if device drivers don't use it.
+ *
+ *   CMA tries to solve this issue by operating on memory regions
+ *   where only movable pages can be allocated from.  This way, kernel
+ *   can use the memory for pagecache and when device driver requests
+ *   it, allocated pages can be migrated.
+ *
+ * Driver usage
+ *
+ *   CMA should not be used by the device drivers directly. It is
+ *   only a helper framework for dma-mapping subsystem.
+ *
+ *   For more information, see kernel-docs in drivers/base/dma-contiguous.c
+ */
+
+#ifdef __KERNEL__
+
+struct cma;
+struct page;
+struct device;
+
+#ifdef CONFIG_CMA
+
+/*
+ * There is always at least global CMA area and a few optional device
+ * private areas configured in kernel .config.
+ */
+#define MAX_CMA_AREAS	(1 + CONFIG_CMA_AREAS)
+
+extern struct cma *dma_contiguous_default_area;
+
+void dma_contiguous_reserve(phys_addr_t addr_limit);
+int dma_declare_contiguous(struct device *dev, unsigned long size,
+			   phys_addr_t base, phys_addr_t limit);
+
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+				       unsigned int order);
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+				 int count);
+
+#else
+
+#define MAX_CMA_AREAS	(0)
+
+static inline void dma_contiguous_reserve(phys_addr_t limit) { }
+
+static inline
+int dma_declare_contiguous(struct device *dev, unsigned long size,
+			   phys_addr_t base, phys_addr_t limit)
+{
+	return -ENOSYS;
+}
+
+static inline
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+				       unsigned int order)
+{
+	return NULL;
+}
+
+static inline
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+				 int count)
+{
+	return false;
+}
+
+#endif
+
+#endif
+
+#endif
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 581e74b..1e49be4 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -391,4 +391,16 @@
 }
 #endif /* CONFIG_PM_SLEEP */
 
+#ifdef CONFIG_CMA
+
+/* The below functions must be run on a range from a single zone. */
+extern int alloc_contig_range(unsigned long start, unsigned long end,
+			      unsigned migratetype);
+extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
+
+/* CMA stuff */
+extern void init_cma_reserved_pageblock(struct page *page);
+
+#endif
+
 #endif /* __LINUX_GFP_H */
diff --git a/include/linux/ion.h b/include/linux/ion.h
index fca5517..2519270 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -21,7 +21,6 @@
 #include <linux/ioctl.h>
 #include <linux/types.h>
 
-
 struct ion_handle;
 /**
  * enum ion_heap_types - list of all possible types of heaps
@@ -261,6 +260,17 @@
 #ifdef CONFIG_ION
 
 /**
+ * ion_reserve() - reserve memory for ion heaps if applicable
+ * @data:	platform data specifying starting physical address and
+ *		size
+ *
+ * Calls memblock reserve to set aside memory for heaps that are
+ * located at specific memory addresses or of specfic sizes not
+ * managed by the kernel
+ */
+void ion_reserve(struct ion_platform_data *data);
+
+/**
  * ion_client_create() -  allocate a client and returns it
  * @dev:	the global ion device
  * @heap_mask:	mask of heaps this client can allocate from
@@ -323,7 +333,7 @@
  * This function queries the heap for a particular handle to get the
  * handle's physical address.  It't output is only correct if
  * a heap returns physically contiguous memory -- in other cases
- * this api should not be implemented -- ion_map_dma should be used
+ * this api should not be implemented -- ion_sg_table should be used
  * instead.  Returns -EINVAL if the handle is invalid.  This has
  * no implications on the reference counting of the handle --
  * the returned value may not be valid if the caller is not
@@ -333,6 +343,17 @@
 	     ion_phys_addr_t *addr, size_t *len);
 
 /**
+ * ion_map_dma - return an sg_table describing a handle
+ * @client:	the client
+ * @handle:	the handle
+ *
+ * This function returns the sg_table describing
+ * a particular ion handle.
+ */
+struct sg_table *ion_sg_table(struct ion_client *client,
+			      struct ion_handle *handle);
+
+/**
  * ion_map_kernel - create mapping for the given handle
  * @client:	the client
  * @handle:	handle to map
@@ -353,64 +374,22 @@
 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
 
 /**
- * ion_map_dma - create a dma mapping for a given handle
+ * ion_share_dma_buf() - given an ion client, create a dma-buf fd
  * @client:	the client
- * @handle:	handle to map
- *
- * Return an sglist describing the given handle
+ * @handle:	the handle
  */
-struct scatterlist *ion_map_dma(struct ion_client *client,
-				struct ion_handle *handle,
-				unsigned long flags);
+int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle);
 
 /**
- * ion_unmap_dma() - destroy a dma mapping for a handle
+ * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
  * @client:	the client
- * @handle:	handle to unmap
- */
-void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle);
-
-/**
- * ion_share() - given a handle, obtain a buffer to pass to other clients
- * @client:	the client
- * @handle:	the handle to share
+ * @fd:		the dma-buf fd
  *
- * Given a handle, return a buffer, which exists in a global name
- * space, and can be passed to other clients.  Should be passed into ion_import
- * to obtain a new handle for this buffer.
- *
- * NOTE: This function does do not an extra reference.  The burden is on the
- * caller to make sure the buffer doesn't go away while it's being passed to
- * another client.  That is, ion_free should not be called on this handle until
- * the buffer has been imported into the other client.
+ * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
+ * import that fd and return a handle representing it.  If a dma-buf from
+ * another exporter is passed in this function will return ERR_PTR(-EINVAL)
  */
-struct ion_buffer *ion_share(struct ion_client *client,
-			     struct ion_handle *handle);
-
-/**
- * ion_import() - given an buffer in another client, import it
- * @client:	this blocks client
- * @buffer:	the buffer to import (as obtained from ion_share)
- *
- * Given a buffer, add it to the client and return the handle to use to refer
- * to it further.  This is called to share a handle from one kernel client to
- * another.
- */
-struct ion_handle *ion_import(struct ion_client *client,
-			      struct ion_buffer *buffer);
-
-/**
- * ion_import_fd() - given an fd obtained via ION_IOC_SHARE ioctl, import it
- * @client:	this blocks client
- * @fd:		the fd
- *
- * A helper function for drivers that will be recieving ion buffers shared
- * with them from userspace.  These buffers are represented by a file
- * descriptor obtained as the return from the ION_IOC_SHARE ioctl.
- * This function coverts that fd into the underlying buffer, and returns
- * the handle to use to refer to it further.
- */
-struct ion_handle *ion_import_fd(struct ion_client *client, int fd);
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
 
 /**
  * ion_handle_get_flags - get the flags for a given handle
@@ -575,6 +554,11 @@
 			void *vaddr, unsigned long len, unsigned int cmd);
 
 #else
+static inline void ion_reserve(struct ion_platform_data *data)
+{
+
+}
+
 static inline struct ion_client *ion_client_create(struct ion_device *dev,
 				     unsigned int heap_mask, const char *name)
 {
@@ -605,6 +589,12 @@
 	return -ENODEV;
 }
 
+static inline struct sg_table *ion_sg_table(struct ion_client *client,
+			      struct ion_handle *handle)
+{
+	return ERR_PTR(-ENODEV);
+}
+
 static inline void *ion_map_kernel(struct ion_client *client,
 	struct ion_handle *handle, unsigned long flags)
 {
@@ -614,29 +604,12 @@
 static inline void ion_unmap_kernel(struct ion_client *client,
 	struct ion_handle *handle) { }
 
-static inline struct scatterlist *ion_map_dma(struct ion_client *client,
-	struct ion_handle *handle, unsigned long flags)
+static inline int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
 {
-	return ERR_PTR(-ENODEV);
+	return -ENODEV;
 }
 
-static inline void ion_unmap_dma(struct ion_client *client,
-	struct ion_handle *handle) { }
-
-static inline struct ion_buffer *ion_share(struct ion_client *client,
-	struct ion_handle *handle)
-{
-	return ERR_PTR(-ENODEV);
-}
-
-static inline struct ion_handle *ion_import(struct ion_client *client,
-	struct ion_buffer *buffer)
-{
-	return ERR_PTR(-ENODEV);
-}
-
-static inline struct ion_handle *ion_import_fd(struct ion_client *client,
-	int fd)
+static inline struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
 {
 	return ERR_PTR(-ENODEV);
 }
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index ed6bb39..c65740d 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -9,106 +9,178 @@
  * representation into a hardware irq number that can be mapped back to a
  * Linux irq number without any extra platform support code.
  *
- * irq_domain is expected to be embedded in an interrupt controller's private
- * data structure.
+ * Interrupt controller "domain" data structure. This could be defined as a
+ * irq domain controller. That is, it handles the mapping between hardware
+ * and virtual interrupt numbers for a given interrupt domain. The domain
+ * structure is generally created by the PIC code for a given PIC instance
+ * (though a domain can cover more than one PIC if they have a flat number
+ * model). It's the domain callbacks that are responsible for setting the
+ * irq_chip on a given irq_desc after it's been mapped.
+ *
+ * The host code and data structures are agnostic to whether or not
+ * we use an open firmware device-tree. We do have references to struct
+ * device_node in two places: in irq_find_host() to find the host matching
+ * a given interrupt controller node, and of course as an argument to its
+ * counterpart domain->ops->match() callback. However, those are treated as
+ * generic pointers by the core and the fact that it's actually a device-node
+ * pointer is purely a convention between callers and implementation. This
+ * code could thus be used on other architectures by replacing those two
+ * by some sort of arch-specific void * "token" used to identify interrupt
+ * controllers.
  */
+
 #ifndef _LINUX_IRQDOMAIN_H
 #define _LINUX_IRQDOMAIN_H
 
-#include <linux/irq.h>
-#include <linux/mod_devicetable.h>
+#include <linux/types.h>
+#include <linux/radix-tree.h>
 
-#ifdef CONFIG_IRQ_DOMAIN
 struct device_node;
 struct irq_domain;
+struct of_device_id;
+
+/* Number of irqs reserved for a legacy isa controller */
+#define NUM_ISA_INTERRUPTS	16
 
 /**
  * struct irq_domain_ops - Methods for irq_domain objects
- * @to_irq: (optional) given a local hardware irq number, return the linux
- *          irq number.  If to_irq is not implemented, then the irq_domain
- *          will use this translation: irq = (domain->irq_base + hwirq)
- * @dt_translate: Given a device tree node and interrupt specifier, decode
- *                the hardware irq number and linux irq type value.
+ * @match: Match an interrupt controller device node to a host, returns
+ *         1 on a match
+ * @map: Create or update a mapping between a virtual irq number and a hw
+ *       irq number. This is called only once for a given mapping.
+ * @unmap: Dispose of such a mapping
+ * @xlate: Given a device tree node and interrupt specifier, decode
+ *         the hardware irq number and linux irq type value.
+ *
+ * Functions below are provided by the driver and called whenever a new mapping
+ * is created or an old mapping is disposed. The driver can then proceed to
+ * whatever internal data structures management is required. It also needs
+ * to setup the irq_desc when returning from map().
  */
 struct irq_domain_ops {
-	unsigned int (*to_irq)(struct irq_domain *d, unsigned long hwirq);
-
-#ifdef CONFIG_OF
-	int (*dt_translate)(struct irq_domain *d, struct device_node *node,
-			    const u32 *intspec, unsigned int intsize,
-			    unsigned long *out_hwirq, unsigned int *out_type);
-#endif /* CONFIG_OF */
+	int (*match)(struct irq_domain *d, struct device_node *node);
+	int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
+	void (*unmap)(struct irq_domain *d, unsigned int virq);
+	int (*xlate)(struct irq_domain *d, struct device_node *node,
+		     const u32 *intspec, unsigned int intsize,
+		     unsigned long *out_hwirq, unsigned int *out_type);
 };
 
 /**
  * struct irq_domain - Hardware interrupt number translation object
- * @list: Element in global irq_domain list.
+ * @link: Element in global irq_domain list.
+ * @revmap_type: Method used for reverse mapping hwirq numbers to linux irq. This
+ *               will be one of the IRQ_DOMAIN_MAP_* values.
+ * @revmap_data: Revmap method specific data.
+ * @ops: pointer to irq_domain methods
+ * @host_data: private data pointer for use by owner.  Not touched by irq_domain
+ *             core code.
  * @irq_base: Start of irq_desc range assigned to the irq_domain.  The creator
  *            of the irq_domain is responsible for allocating the array of
  *            irq_desc structures.
  * @nr_irq: Number of irqs managed by the irq domain
  * @hwirq_base: Starting number for hwirqs managed by the irq domain
- * @ops: pointer to irq_domain methods
- * @priv: private data pointer for use by owner.  Not touched by irq_domain
- *        core code.
  * @of_node: (optional) Pointer to device tree nodes associated with the
  *           irq_domain.  Used when decoding device tree interrupt specifiers.
  */
 struct irq_domain {
-	struct list_head list;
-	unsigned int irq_base;
-	unsigned int nr_irq;
-	unsigned int hwirq_base;
+	struct list_head link;
+
+	/* type of reverse mapping_technique */
+	unsigned int revmap_type;
+	union {
+		struct {
+			unsigned int size;
+			unsigned int first_irq;
+			irq_hw_number_t first_hwirq;
+		} legacy;
+		struct {
+			unsigned int size;
+			unsigned int *revmap;
+		} linear;
+		struct {
+			unsigned int max_irq;
+		} nomap;
+		struct radix_tree_root tree;
+	} revmap_data;
 	const struct irq_domain_ops *ops;
-	void *priv;
+	void *host_data;
+	irq_hw_number_t inval_irq;
+
+	/* Optional device node pointer */
 	struct device_node *of_node;
 };
 
-/**
- * irq_domain_to_irq() - Translate from a hardware irq to a linux irq number
- *
- * Returns the linux irq number associated with a hardware irq.  By default,
- * the mapping is irq == domain->irq_base + hwirq, but this mapping can
- * be overridden if the irq_domain implements a .to_irq() hook.
- */
-static inline unsigned int irq_domain_to_irq(struct irq_domain *d,
-					     unsigned long hwirq)
+#ifdef CONFIG_IRQ_DOMAIN
+struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
+					 unsigned int size,
+					 unsigned int first_irq,
+					 irq_hw_number_t first_hwirq,
+					 const struct irq_domain_ops *ops,
+					 void *host_data);
+struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
+					 unsigned int size,
+					 const struct irq_domain_ops *ops,
+					 void *host_data);
+struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
+					 unsigned int max_irq,
+					 const struct irq_domain_ops *ops,
+					 void *host_data);
+struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
+					 const struct irq_domain_ops *ops,
+					 void *host_data);
+
+extern struct irq_domain *irq_find_host(struct device_node *node);
+extern void irq_set_default_host(struct irq_domain *host);
+
+static inline struct irq_domain *irq_domain_add_legacy_isa(
+				struct device_node *of_node,
+				const struct irq_domain_ops *ops,
+				void *host_data)
 {
-	if (d->ops->to_irq)
-		return d->ops->to_irq(d, hwirq);
-	if (WARN_ON(hwirq < d->hwirq_base))
-		return 0;
-	return d->irq_base + hwirq - d->hwirq_base;
+	return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops,
+				     host_data);
 }
+extern struct irq_domain *irq_find_host(struct device_node *node);
+extern void irq_set_default_host(struct irq_domain *host);
 
-#define irq_domain_for_each_hwirq(d, hw) \
-	for (hw = d->hwirq_base; hw < d->hwirq_base + d->nr_irq; hw++)
 
-#define irq_domain_for_each_irq(d, hw, irq) \
-	for (hw = d->hwirq_base, irq = irq_domain_to_irq(d, hw); \
-	     hw < d->hwirq_base + d->nr_irq; \
-	     hw++, irq = irq_domain_to_irq(d, hw))
-
+extern unsigned int irq_create_mapping(struct irq_domain *host,
+				       irq_hw_number_t hwirq);
 extern void irq_dispose_mapping(unsigned int virq);
+extern unsigned int irq_find_mapping(struct irq_domain *host,
+				     irq_hw_number_t hwirq);
+extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
+extern void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq,
+				    irq_hw_number_t hwirq);
+extern unsigned int irq_radix_revmap_lookup(struct irq_domain *host,
+					    irq_hw_number_t hwirq);
+extern unsigned int irq_linear_revmap(struct irq_domain *host,
+				      irq_hw_number_t hwirq);
 
-extern int irq_domain_add(struct irq_domain *domain);
-extern void irq_domain_del(struct irq_domain *domain);
-extern void irq_domain_register(struct irq_domain *domain);
-extern void irq_domain_register_irq(struct irq_domain *domain, int hwirq);
-extern void irq_domain_unregister(struct irq_domain *domain);
-extern void irq_domain_unregister_irq(struct irq_domain *domain, int hwirq);
-extern int irq_domain_find_free_range(unsigned int from, unsigned int cnt);
+extern const struct irq_domain_ops irq_domain_simple_ops;
 
-extern struct irq_domain_ops irq_domain_simple_ops;
-#endif /* CONFIG_IRQ_DOMAIN */
+/* stock xlate functions */
+int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
+			const u32 *intspec, unsigned int intsize,
+			irq_hw_number_t *out_hwirq, unsigned int *out_type);
+int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
+			const u32 *intspec, unsigned int intsize,
+			irq_hw_number_t *out_hwirq, unsigned int *out_type);
+int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
+			const u32 *intspec, unsigned int intsize,
+			irq_hw_number_t *out_hwirq, unsigned int *out_type);
 
-#if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ)
-extern void irq_domain_add_simple(struct device_node *controller, int irq_base);
+#if defined(CONFIG_OF_IRQ)
 extern void irq_domain_generate_simple(const struct of_device_id *match,
 					u64 phys_base, unsigned int irq_start);
-#else /* CONFIG_IRQ_DOMAIN && CONFIG_OF_IRQ */
+#else /* CONFIG_OF_IRQ */
 static inline void irq_domain_generate_simple(const struct of_device_id *match,
 					u64 phys_base, unsigned int irq_start) { }
-#endif /* CONFIG_IRQ_DOMAIN && CONFIG_OF_IRQ */
+#endif /* !CONFIG_OF_IRQ */
+
+#else /* CONFIG_IRQ_DOMAIN */
+static inline void irq_dispose_mapping(unsigned int virq) { }
+#endif /* !CONFIG_IRQ_DOMAIN */
 
 #endif /* _LINUX_IRQDOMAIN_H */
diff --git a/include/linux/mfd/pm8xxx/ccadc.h b/include/linux/mfd/pm8xxx/ccadc.h
index 0bd4cc3..29f7a62 100644
--- a/include/linux/mfd/pm8xxx/ccadc.h
+++ b/include/linux/mfd/pm8xxx/ccadc.h
@@ -27,32 +27,13 @@
 	unsigned int	calib_delay_ms;
 };
 
-#define CCADC_READING_RESOLUTION_N_V1	1085069
-#define CCADC_READING_RESOLUTION_D_V1	100000
-#define CCADC_READING_RESOLUTION_N_V2	542535
-#define CCADC_READING_RESOLUTION_D_V2	100000
-
-static s64 pm8xxx_ccadc_reading_to_microvolt_v1(s64 cc)
-{
-	return div_s64(cc * CCADC_READING_RESOLUTION_N_V1,
-					CCADC_READING_RESOLUTION_D_V1);
-}
-
-static s64 pm8xxx_ccadc_reading_to_microvolt_v2(s64 cc)
-{
-	return div_s64(cc * CCADC_READING_RESOLUTION_N_V2,
-					CCADC_READING_RESOLUTION_D_V2);
-}
+#define CCADC_READING_RESOLUTION_N	542535
+#define CCADC_READING_RESOLUTION_D	100000
 
 static inline s64 pm8xxx_ccadc_reading_to_microvolt(int revision, s64 cc)
 {
-	/*
-	 * resolution (the value of a single bit) was changed after revision 2.0
-	 * for more accurate readings
-	 */
-	return (revision < PM8XXX_REVISION_8921_2p0) ?
-				pm8xxx_ccadc_reading_to_microvolt_v1((s64)cc) :
-				pm8xxx_ccadc_reading_to_microvolt_v2((s64)cc);
+	return div_s64(cc * CCADC_READING_RESOLUTION_N,
+					CCADC_READING_RESOLUTION_D);
 }
 
 #if defined(CONFIG_PM8XXX_CCADC) || defined(CONFIG_PM8XXX_CCADC_MODULE)
diff --git a/include/linux/mfd/pm8xxx/pm8921-bms.h b/include/linux/mfd/pm8xxx/pm8921-bms.h
index 90c2d99..bbd032d 100644
--- a/include/linux/mfd/pm8xxx/pm8921-bms.h
+++ b/include/linux/mfd/pm8xxx/pm8921-bms.h
@@ -200,6 +200,13 @@
  * pm8921_bms_get_rbatt - function to get the battery resistance in mOhm.
  */
 int pm8921_bms_get_rbatt(void);
+/**
+ * pm8921_bms_invalidate_shutdown_soc - function to notify the bms driver that
+ *					the battery was replaced between reboot
+ *					and so it should not use the shutdown
+ *					soc stored in a coincell backed register
+ */
+void pm8921_bms_invalidate_shutdown_soc(void);
 #else
 static inline int pm8921_bms_get_vsense_avg(int *result)
 {
@@ -235,6 +242,9 @@
 {
 	return -EINVAL;
 }
+static inline void pm8921_bms_invalidate_shutdown_soc(void)
+{
+}
 #endif
 
 #endif
diff --git a/include/linux/mfd/wcd9xxx/wcd9xxx-slimslave.h b/include/linux/mfd/wcd9xxx/wcd9xxx-slimslave.h
index fcd3bd3..93c21ce 100644
--- a/include/linux/mfd/wcd9xxx/wcd9xxx-slimslave.h
+++ b/include/linux/mfd/wcd9xxx/wcd9xxx-slimslave.h
@@ -99,4 +99,5 @@
 int wcd9xxx_get_channel(struct wcd9xxx *wcd9xxx,
 			unsigned int *rx_ch,
 			unsigned int *tx_ch);
+int wcd9xxx_get_slave_port(unsigned int ch_num);
 #endif /* __WCD9310_SLIMSLAVE_H_ */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 447fbbb..05a6b5b 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -241,7 +241,6 @@
 #define MMC_CAP2_BROKEN_VOLTAGE	(1 << 7)	/* Use the broken voltage */
 #define MMC_CAP2_DETECT_ON_ERR	(1 << 8)	/* On I/O err check card removal */
 #define MMC_CAP2_HC_ERASE_SZ	(1 << 9)	/* High-capacity erase size */
-#define MMC_CAP2_POWER_OFF_VCCQ_DURING_SUSPEND	(1 << 10)
 #define MMC_CAP2_PACKED_RD	(1 << 10)	/* Allow packed read */
 #define MMC_CAP2_PACKED_WR	(1 << 11)	/* Allow packed write */
 #define MMC_CAP2_PACKED_CMD	(MMC_CAP2_PACKED_RD | \
@@ -250,6 +249,7 @@
 #define MMC_CAP2_SANITIZE	(1 << 13)		/* Support Sanitize */
 #define MMC_CAP2_BKOPS		    (1 << 14)	/* BKOPS supported */
 #define MMC_CAP2_INIT_BKOPS	    (1 << 15)	/* Need to set BKOPS_EN */
+#define MMC_CAP2_POWER_OFF_VCCQ_DURING_SUSPEND	(1 << 16)
 
 	mmc_pm_flag_t		pm_caps;	/* supported pm features */
 
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 64290b3..f8a3a10 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -35,13 +35,39 @@
  */
 #define PAGE_ALLOC_COSTLY_ORDER 3
 
-#define MIGRATE_UNMOVABLE     0
-#define MIGRATE_RECLAIMABLE   1
-#define MIGRATE_MOVABLE       2
-#define MIGRATE_PCPTYPES      3 /* the number of types on the pcp lists */
-#define MIGRATE_RESERVE       3
-#define MIGRATE_ISOLATE       4 /* can't allocate from here */
-#define MIGRATE_TYPES         5
+enum {
+	MIGRATE_UNMOVABLE,
+	MIGRATE_RECLAIMABLE,
+	MIGRATE_MOVABLE,
+	MIGRATE_PCPTYPES,	/* the number of types on the pcp lists */
+	MIGRATE_RESERVE = MIGRATE_PCPTYPES,
+#ifdef CONFIG_CMA
+	/*
+	 * MIGRATE_CMA migration type is designed to mimic the way
+	 * ZONE_MOVABLE works.  Only movable pages can be allocated
+	 * from MIGRATE_CMA pageblocks and page allocator never
+	 * implicitly change migration type of MIGRATE_CMA pageblock.
+	 *
+	 * The way to use it is to change migratetype of a range of
+	 * pageblocks to MIGRATE_CMA which can be done by
+	 * __free_pageblock_cma() function.  What is important though
+	 * is that a range of pageblocks must be aligned to
+	 * MAX_ORDER_NR_PAGES should biggest page be bigger then
+	 * a single pageblock.
+	 */
+	MIGRATE_CMA,
+#endif
+	MIGRATE_ISOLATE,	/* can't allocate from here */
+	MIGRATE_TYPES
+};
+
+#ifdef CONFIG_CMA
+#  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+#  define cma_wmark_pages(zone)	zone->min_cma_pages
+#else
+#  define is_migrate_cma(migratetype) false
+#  define cma_wmark_pages(zone) 0
+#endif
 
 #define for_each_migratetype_order(order, type) \
 	for (order = 0; order < MAX_ORDER; order++) \
@@ -347,6 +373,13 @@
 	/* see spanned/present_pages for more description */
 	seqlock_t		span_seqlock;
 #endif
+#ifdef CONFIG_CMA
+	/*
+	 * CMA needs to increase watermark levels during the allocation
+	 * process to make sure that the system is not starved.
+	 */
+	unsigned long		min_cma_pages;
+#endif
 	struct free_area	free_area[MAX_ORDER];
 
 #ifndef CONFIG_SPARSEMEM
diff --git a/include/linux/msm_rotator.h b/include/linux/msm_rotator.h
index 6cfbb35..0f15a8b 100644
--- a/include/linux/msm_rotator.h
+++ b/include/linux/msm_rotator.h
@@ -31,7 +31,6 @@
 	unsigned char   rotations;
 	int enable;
 	unsigned int	downscale_ratio;
-	unsigned int secure;
 };
 
 struct msm_rotator_data_info {
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 051c1b1..3bdcab3 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -3,7 +3,7 @@
 
 /*
  * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
- * If specified range includes migrate types other than MOVABLE,
+ * If specified range includes migrate types other than MOVABLE or CMA,
  * this will fail with -EBUSY.
  *
  * For isolating all pages in the range finally, the caller have to
@@ -11,27 +11,27 @@
  * test it.
  */
 extern int
-start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
+start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+			 unsigned migratetype);
 
 /*
  * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
  * target range is [start_pfn, end_pfn)
  */
 extern int
-undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
+undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+			unsigned migratetype);
 
 /*
- * test all pages in [start_pfn, end_pfn)are isolated or not.
+ * Test all pages in [start_pfn, end_pfn) are isolated or not.
  */
-extern int
-test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
+int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
 
 /*
- * Internal funcs.Changes pageblock's migrate type.
- * Please use make_pagetype_isolated()/make_pagetype_movable().
+ * Internal functions. Changes pageblock's migrate type.
  */
 extern int set_migratetype_isolate(struct page *page);
-extern void unset_migratetype_isolate(struct page *page);
+extern void unset_migratetype_isolate(struct page *page, unsigned migratetype);
 
 
 #endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 643c80e..32d8ec2 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -216,6 +216,7 @@
 extern int power_supply_set_battery_charged(struct power_supply *psy);
 extern int power_supply_set_current_limit(struct power_supply *psy, int limit);
 extern int power_supply_set_online(struct power_supply *psy, bool enable);
+extern int power_supply_set_scope(struct power_supply *psy, int scope);
 extern int power_supply_set_charge_type(struct power_supply *psy, int type);
 
 #if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE)
diff --git a/include/linux/qpnp/gpio.h b/include/linux/qpnp/gpio.h
deleted file mode 100644
index e7fb53e..0000000
--- a/include/linux/qpnp/gpio.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <mach/qpnp.h>
-
-#define QPNP_GPIO_DIR_IN			0
-#define QPNP_GPIO_DIR_OUT			1
-#define QPNP_GPIO_DIR_BOTH			2
-
-#define QPNP_GPIO_INVERT_DISABLE		0
-#define QPNP_GPIO_INVERT_ENABLE			1
-
-#define QPNP_GPIO_OUT_BUF_CMOS			0
-#define QPNP_GPIO_OUT_BUF_OPEN_DRAIN_NMOS	1
-#define QPNP_GPIO_OUT_BUF_OPEN_DRAIN_PMOS	2
-
-#define QPNP_GPIO_VIN0				0
-#define QPNP_GPIO_VIN1				1
-#define QPNP_GPIO_VIN2				2
-#define QPNP_GPIO_VIN3				3
-#define QPNP_GPIO_VIN4				4
-#define QPNP_GPIO_VIN5				5
-#define QPNP_GPIO_VIN6				6
-#define QPNP_GPIO_VIN7				7
-
-#define QPNP_GPIO_PULL_UP_30			0
-#define QPNP_GPIO_PULL_UP_1P5			1
-#define QPNP_GPIO_PULL_UP_31P5			2
-#define QPNP_GPIO_PULL_UP_1P5_30		3
-#define QPNP_GPIO_PULL_DN			4
-#define QPNP_GPIO_PULL_NO			5
-
-#define QPNP_GPIO_OUT_STRENGTH_LOW		1
-#define QPNP_GPIO_OUT_STRENGTH_MED		2
-#define QPNP_GPIO_OUT_STRENGTH_HIGH		3
-
-#define QPNP_GPIO_SRC_FUNC_NORMAL		0
-#define QPNP_GPIO_SRC_FUNC_PAIRED		1
-#define QPNP_GPIO_SRC_FUNC_1			2
-#define QPNP_GPIO_SRC_FUNC_2			3
-#define QPNP_GPIO_SRC_DTEST1			4
-#define QPNP_GPIO_SRC_DTEST2			5
-#define QPNP_GPIO_SRC_DTEST3			6
-#define QPNP_GPIO_SRC_DTEST4			7
-
-#define QPNP_GPIO_MASTER_DISABLE		0
-#define QPNP_GPIO_MASTER_ENABLE			1
-
-/**
- * struct qpnp_gpio_cfg - structure to specify gpio configurtion values
- * @direction:		indicates whether the gpio should be input, output, or
- *			both. Should be of the type QPNP_GPIO_DIR_*
- * @output_type:	indicates gpio should be configured as CMOS or open
- *			drain. Should be of the type QPNP_GPIO_OUT_BUF_*
- * @invert:		Invert the signal of the gpio line -
- *			QPNP_GPIO_INVERT_DISABLE or QPNP_GPIO_INVERT_ENABLE
- * @pull:		Indicates whether a pull up or pull down should be
- *			applied. If a pullup is required the current strength
- *			needs to be specified. Current values of 30uA, 1.5uA,
- *			31.5uA, 1.5uA with 30uA boost are supported. This value
- *			should be one of the QPNP_GPIO_PULL_*
- * @vin_sel:		specifies the voltage level when the output is set to 1.
- *			For an input gpio specifies the voltage level at which
- *			the input is interpreted as a logical 1.
- * @out_strength:	the amount of current supplied for an output gpio,
- *			should be of the type QPNP_GPIO_STRENGTH_*
- * @source_sel:		choose alternate function for the gpio. Certain gpios
- *			can be paired (shorted) with each other. Some gpio pin
- *			can act as alternate functions. This parameter should
- *			be of type QPNP_GPIO_SRC_*.
- * @master_en:		QPNP_GPIO_MASTER_ENABLE = Enable features within the
- *			GPIO block based on configurations.
- *			QPNP_GPIO_MASTER_DISABLE = Completely disable the GPIO
- *			block and let the pin float with high impedance
- *			regardless of other settings.
- */
-struct qpnp_gpio_cfg {
-	unsigned int direction;
-	unsigned int output_type;
-	unsigned int invert;
-	unsigned int pull;
-	unsigned int vin_sel;
-	unsigned int out_strength;
-	unsigned int src_select;
-	unsigned int master_en;
-};
-
-/**
- * qpnp_gpio_config - Apply gpio configuration for Linux gpio
- * @gpio: Linux gpio number to configure.
- * @param: parameters to configure.
- *
- * This routine takes a Linux gpio number that corresponds with a
- * PMIC gpio and applies the configuration specified in 'param'.
- * This gpio number can be ascertained by of_get_gpio_flags() or
- * the qpnp_gpio_map_gpio() API.
- */
-int qpnp_gpio_config(int gpio, struct qpnp_gpio_cfg *param);
-
-/**
- * qpnp_gpio_map_gpio - Obtain Linux GPIO number from device spec
- * @slave_id: slave_id of the spmi_device for the gpio in question.
- * @pmic_gpio: PMIC gpio number to lookup.
- *
- * This routine is used in legacy configurations that do not support
- * Device Tree. If you are using Device Tree, you should not use this.
- * For such cases, use of_get_gpio() instead.
- */
-int qpnp_gpio_map_gpio(uint16_t slave_id, uint32_t pmic_gpio);
diff --git a/include/linux/qpnp/pin.h b/include/linux/qpnp/pin.h
new file mode 100644
index 0000000..fa9c30f
--- /dev/null
+++ b/include/linux/qpnp/pin.h
@@ -0,0 +1,190 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Mode select */
+#define QPNP_PIN_MODE_DIG_IN			0
+#define QPNP_PIN_MODE_DIG_OUT			1
+#define QPNP_PIN_MODE_DIG_IN_OUT		2
+#define QPNP_PIN_MODE_BIDIR			3
+#define QPNP_PIN_MODE_AIN			4
+#define QPNP_PIN_MODE_AOUT			5
+#define QPNP_PIN_MODE_SINK			6
+
+/* Invert source select (GPIO, MPP) */
+#define QPNP_PIN_INVERT_DISABLE			0
+#define QPNP_PIN_INVERT_ENABLE			1
+
+/* Output type (GPIO) */
+#define QPNP_PIN_OUT_BUF_CMOS			0
+#define QPNP_PIN_OUT_BUF_OPEN_DRAIN_NMOS	1
+#define QPNP_PIN_OUT_BUF_OPEN_DRAIN_PMOS	2
+
+/* Voltage select (GPIO, MPP) */
+#define QPNP_PIN_VIN0				0
+#define QPNP_PIN_VIN1				1
+#define QPNP_PIN_VIN2				2
+#define QPNP_PIN_VIN3				3
+#define QPNP_PIN_VIN4				4
+#define QPNP_PIN_VIN5				5
+#define QPNP_PIN_VIN6				6
+#define QPNP_PIN_VIN7				7
+
+/* Pull Up Values (GPIO) */
+#define QPNP_PIN_GPIO_PULL_UP_30		0
+#define QPNP_PIN_GPIO_PULL_UP_1P5		1
+#define QPNP_PIN_GPIO_PULL_UP_31P5		2
+#define QPNP_PIN_GPIO_PULL_UP_1P5_30		3
+#define QPNP_PIN_GPIO_PULL_DN			4
+#define QPNP_PIN_GPIO_PULL_NO			5
+
+/* Pull Up Values (MPP) */
+#define QPNP_PIN_MPP_PULL_UP_0P6KOHM		0
+#define QPNP_PIN_MPP_PULL_UP_OPEN		1
+#define QPNP_PIN_MPP_PULL_UP_10KOHM		2
+#define QPNP_PIN_MPP_PULL_UP_30KOHM		3
+
+/* Out Strength (GPIO) */
+#define QPNP_PIN_OUT_STRENGTH_LOW		1
+#define QPNP_PIN_OUT_STRENGTH_MED		2
+#define QPNP_PIN_OUT_STRENGTH_HIGH		3
+
+/* Source Select (GPIO) / Enable Select (MPP) */
+#define QPNP_PIN_SEL_FUNC_CONSTANT		0
+#define QPNP_PIN_SEL_FUNC_PAIRED		1
+#define QPNP_PIN_SEL_FUNC_1			2
+#define QPNP_PIN_SEL_FUNC_2			3
+#define QPNP_PIN_SEL_DTEST1			4
+#define QPNP_PIN_SEL_DTEST2			5
+#define QPNP_PIN_SEL_DTEST3			6
+#define QPNP_PIN_SEL_DTEST4			7
+
+/* Master enable (GPIO, MPP) */
+#define QPNP_PIN_MASTER_DISABLE			0
+#define QPNP_PIN_MASTER_ENABLE			1
+
+/* Analog Output (MPP) */
+#define QPNP_PIN_AOUT_1V25			0
+#define QPNP_PIN_AOUT_0V625			1
+#define QPNP_PIN_AOUT_0V3125			2
+#define QPNP_PIN_AOUT_MPP			3
+#define QPNP_PIN_AOUT_ABUS1			4
+#define QPNP_PIN_AOUT_ABUS2			5
+#define QPNP_PIN_AOUT_ABUS3			6
+#define QPNP_PIN_AOUT_ABUS4			7
+
+/* Analog Input (MPP) */
+#define QPNP_PIN_AIN_AMUX_CH5			0
+#define QPNP_PIN_AIN_AMUX_CH6			1
+#define QPNP_PIN_AIN_AMUX_CH7			2
+#define QPNP_PIN_AIN_AMUX_CH8			3
+#define QPNP_PIN_AIN_AMUX_ABUS1			4
+#define QPNP_PIN_AIN_AMUX_ABUS2			5
+#define QPNP_PIN_AIN_AMUX_ABUS3			6
+#define QPNP_PIN_AIN_AMUX_ABUS4			7
+
+/* Current Sink (MPP) */
+#define QPNP_PIN_CS_OUT_5MA			0
+#define QPNP_PIN_CS_OUT_10MA			1
+#define QPNP_PIN_CS_OUT_15MA			2
+#define QPNP_PIN_CS_OUT_20MA			3
+#define QPNP_PIN_CS_OUT_25MA			4
+#define QPNP_PIN_CS_OUT_30MA			5
+#define QPNP_PIN_CS_OUT_35MA			6
+#define QPNP_PIN_CS_OUT_40MA			7
+
+/**
+ * struct qpnp_pin_cfg - structure to specify pin configurtion values
+ * @mode:		indicates whether the pin should be input, output, or
+ *			both for gpios. mpp pins also support bidirectional,
+ *			analog in, analog out and current sink. This value
+ *			should be of type QPNP_PIN_MODE_*.
+ * @output_type:	indicates pin should be configured as CMOS or open
+ *			drain. Should be of the type QPNP_PIN_OUT_BUF_*. This
+ *			setting applies for gpios only.
+ * @invert:		Invert the signal of the line -
+ *			QPNP_PIN_INVERT_DISABLE or QPNP_PIN_INVERT_ENABLE.
+ * @pull:		This parameter should be programmed to different values
+ *			depending on whether it's GPIO or MPP.
+ *			For GPIO, it indicates whether a pull up or pull down
+ *			should be applied. If a pullup is required the
+ *			current strength needs to be specified.
+ *			Current values of 30uA, 1.5uA, 31.5uA, 1.5uA with 30uA
+ *			boost are supported. This value should be one of
+ *			the QPNP_PIN_GPIO_PULL_*. Note that the hardware ignores
+ *			this configuration if the GPIO is not set to input or
+ *			output open-drain mode.
+ *			For MPP, it indicates whether a pullup should be
+ *			applied for bidirectitional mode only. The hardware
+ *			ignores the configuration when operating in other modes.
+ *			This value should be one of the QPNP_PIN_MPP_PULL_*.
+ * @vin_sel:		specifies the voltage level when the output is set to 1.
+ *			For an input gpio specifies the voltage level at which
+ *			the input is interpreted as a logical 1.
+ * @out_strength:	the amount of current supplied for an output gpio,
+ *			should be of the type QPNP_PIN_STRENGTH_*.
+ * @select:		select alternate function for the pin. Certain pins
+ *			can be paired (shorted) with each other. Some pins
+ *			can act as alternate functions. In the context of
+ *			gpio, this acts as a source select. For mpps,
+ *			this is an enable select.
+ *			This parameter should be of type QPNP_PIN_SEL_*.
+ * @master_en:		QPNP_PIN_MASTER_ENABLE = Enable features within the
+ *			pin block based on configurations.
+ *			QPNP_PIN_MASTER_DISABLE = Completely disable the pin
+ *			block and let the pin float with high impedance
+ *			regardless of other settings.
+ * @aout_ref:		Set the analog output reference. This parameter should
+ *			be of type QPNP_PIN_AOUT_*. This parameter only applies
+ *			to mpp pins.
+ * @ain_route:		Set the source for analog input. This parameter
+ *			should be of type QPNP_PIN_AIN_*. This parameter only
+ *			applies to mpp pins.
+ * @cs_out:		Set the the amount of current to sync in mA. This
+ *			parameter should be of type QPNP_PIN_CS_OUT_*. This
+ *			parameter only applies to mpp pins.
+ */
+struct qpnp_pin_cfg {
+	int mode;
+	int output_type;
+	int invert;
+	int pull;
+	int vin_sel;
+	int out_strength;
+	int select;
+	int master_en;
+	int aout_ref;
+	int ain_route;
+	int cs_out;
+};
+
+/**
+ * qpnp_pin_config - Apply pin configuration for Linux gpio
+ * @gpio: Linux gpio number to configure.
+ * @param: parameters to configure.
+ *
+ * This routine takes a Linux gpio number that corresponds with a
+ * PMIC pin and applies the configuration specified in 'param'.
+ * This gpio number can be ascertained by of_get_gpio_flags() or
+ * the qpnp_pin_map_gpio() API.
+ */
+int qpnp_pin_config(int gpio, struct qpnp_pin_cfg *param);
+
+/**
+ * qpnp_pin_map - Obtain Linux GPIO number from device spec
+ * @name: Name assigned by the 'label' binding for the primary node.
+ * @pmic_pin: PMIC pin number to lookup.
+ *
+ * This routine is used in legacy configurations that do not support
+ * Device Tree. If you are using Device Tree, you should not use this.
+ * For such cases, use of_get_gpio() or friends instead.
+ */
+int qpnp_pin_map(const char *name, uint32_t pmic_pin);
diff --git a/include/linux/qseecom.h b/include/linux/qseecom.h
index 62b5efe..0fcf96f 100644
--- a/include/linux/qseecom.h
+++ b/include/linux/qseecom.h
@@ -107,6 +107,14 @@
 	unsigned int qseos_version; /* in */
 };
 
+/*
+ * struct qseecom_qseos_app_load_query - verify if app is loaded in qsee
+ * @app_name[MAX_APP_NAME_SIZE]-  name of the app.
+ */
+struct qseecom_qseos_app_load_query {
+	char app_name[MAX_APP_NAME_SIZE]; /* in */
+};
+
 #define QSEECOM_IOC_MAGIC    0x97
 
 
@@ -152,4 +160,8 @@
 #define QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ \
 	_IO(QSEECOM_IOC_MAGIC, 14)
 
+#define QSEECOM_IOCTL_APP_LOADED_QUERY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 15, struct qseecom_qseos_app_load_query)
+
+
 #endif /* __QSEECOM_H_ */
diff --git a/include/linux/smux.h b/include/linux/smux.h
index 308f969..24a6371 100644
--- a/include/linux/smux.h
+++ b/include/linux/smux.h
@@ -77,6 +77,8 @@
 	SMUX_TIOCM_UPDATE,
 	SMUX_LOW_WM_HIT,      /* @metadata is NULL */
 	SMUX_HIGH_WM_HIT,     /* @metadata is NULL */
+	SMUX_RX_RETRY_HIGH_WM_HIT,  /* @metadata is NULL */
+	SMUX_RX_RETRY_LOW_WM_HIT,   /* @metadata is NULL */
 };
 
 /**
@@ -86,6 +88,7 @@
 	SMUX_CH_OPTION_LOCAL_LOOPBACK = 1 << 0,
 	SMUX_CH_OPTION_REMOTE_LOOPBACK = 1 << 1,
 	SMUX_CH_OPTION_REMOTE_TX_STOP = 1 << 2,
+	SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP = 1 << 3,
 };
 
 /**
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index 927978a..f94b5c5 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -92,11 +92,19 @@
  * @num_resources: number of resources for this device node
  * @resources: array of resources for this device_node
  * @of_node: device_node of the resource in question
+ * @label: name used to reference the device from the driver
+ *
+ * Note that we explicitly add a 'label' pointer here since per
+ * the ePAPR 2.2.2, the device_node->name should be generic and not
+ * reflect precise programming model. Thus label enables a
+ * platform specific name to be assigned with the 'label' binding to
+ * allow for unique query names.
  */
 struct spmi_resource {
 	struct resource		*resource;
 	u32			num_resources;
 	struct device_node	*of_node;
+	const char		*label;
 };
 
 /**
@@ -108,7 +116,8 @@
  *  @dev: Driver model representation of the device.
  *  @name: Name of driver to use with this device.
  *  @ctrl: SPMI controller managing the bus hosting this device.
- *  @dev_node: array of SPMI resources - one entry per device_node.
+ *  @res: SPMI resource for the primary node
+ *  @dev_node: array of SPMI resources when used with spmi-dev-container.
  *  @num_dev_node: number of device_node structures.
  *  @sid: Slave Identifier.
  */
@@ -116,6 +125,7 @@
 	struct device		dev;
 	const char		*name;
 	struct spmi_controller	*ctrl;
+	struct spmi_resource	res;
 	struct spmi_resource	*dev_node;
 	u32			num_dev_node;
 	u8			sid;
@@ -124,10 +134,12 @@
 
 /**
  * struct spmi_boardinfo: Declare board info for SPMI device bringup.
+ * @name: Name of driver to use with this device.
  * @slave_id: slave identifier.
  * @spmi_device: device to be registered with the SPMI framework.
  * @of_node: pointer to the OpenFirmware device node.
- * @dev_node: one spmi_resource for each device_node.
+ * @res: SPMI resource for the primary node
+ * @dev_node: array of SPMI resources when used with spmi-dev-container.
  * @num_dev_node: number of device_node structures.
  * @platform_data: goes to spmi_device.dev.platform_data
  */
@@ -135,6 +147,7 @@
 	char			name[SPMI_NAME_SIZE];
 	uint8_t			slave_id;
 	struct device_node	*of_node;
+	struct spmi_resource	res;
 	struct spmi_resource	*dev_node;
 	u32			num_dev_node;
 	const void		*platform_data;
@@ -417,4 +430,49 @@
  * -ETIMEDOUT if the SPMI transaction times out.
  */
 extern int spmi_command_shutdown(struct spmi_controller *ctrl, u8 sid);
+
+/**
+ * spmi_for_each_container_dev - iterate over the array of devnode resources.
+ * @res: spmi_resource pointer used as the array cursor
+ * @spmi_dev: spmi_device to iterate
+ *
+ * Only useable in spmi-dev-container configurations.
+ */
+#define spmi_for_each_container_dev(res, spmi_dev)			      \
+	for (res = ((spmi_dev)->dev_node ? &(spmi_dev)->dev_node[0] : NULL);  \
+	     (res - (spmi_dev)->dev_node) < (spmi_dev)->num_dev_node; res++)
+
+extern struct resource *spmi_get_resource(struct spmi_device *dev,
+				      struct spmi_resource *node,
+				      unsigned int type, unsigned int res_num);
+
+struct resource *spmi_get_resource_byname(struct spmi_device *dev,
+					  struct spmi_resource *node,
+					  unsigned int type,
+					  const char *name);
+
+extern int spmi_get_irq(struct spmi_device *dev, struct spmi_resource *node,
+						 unsigned int res_num);
+
+extern int spmi_get_irq_byname(struct spmi_device *dev,
+			       struct spmi_resource *node, const char *name);
+
+/**
+ * spmi_get_node_name - return device name for spmi node
+ * @dev: spmi device handle
+ *
+ * Get the primary node name of a spmi_device coresponding with
+ * with the 'label' binding.
+ *
+ * Returns NULL if no primary dev name has been assigned to this spmi_device.
+ */
+static inline const char *spmi_get_primary_dev_name(struct spmi_device *dev)
+{
+	if (dev->res.label)
+		return dev->res.label;
+	return NULL;
+}
+
+struct spmi_resource *spmi_get_dev_container_byname(struct spmi_device *dev,
+						    const char *label);
 #endif
diff --git a/include/linux/test-iosched.h b/include/linux/test-iosched.h
new file mode 100644
index 0000000..8054409
--- /dev/null
+++ b/include/linux/test-iosched.h
@@ -0,0 +1,233 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * The test scheduler allows to test the block device by dispatching
+ * specific requests according to the test case and declare PASS/FAIL
+ * according to the requests completion error code.
+ * Each test is exposed via debugfs and can be triggered by writing to
+ * the debugfs file.
+ *
+ */
+
+#ifndef _LINUX_TEST_IOSCHED_H
+#define _LINUX_TEST_IOSCHED_H
+
+/*
+ * Patterns definitions for read/write requests data
+ */
+#define TEST_PATTERN_SEQUENTIAL	-1
+#define TEST_PATTERN_5A		0x5A5A5A5A
+#define TEST_PATTERN_FF		0xFFFFFFFF
+#define TEST_NO_PATTERN		0xDEADBEEF
+#define BIO_U32_SIZE 1024
+
+struct test_data;
+
+typedef int (prepare_test_fn) (struct test_data *);
+typedef int (run_test_fn) (struct test_data *);
+typedef int (check_test_result_fn) (struct test_data *);
+typedef int (post_test_fn) (struct test_data *);
+typedef char* (get_test_case_str_fn) (struct test_data *);
+typedef void (blk_dev_test_init_fn) (void);
+typedef void (blk_dev_test_exit_fn) (void);
+
+/**
+ * enum test_state - defines the state of the test
+ */
+enum test_state {
+	TEST_IDLE,
+	TEST_RUNNING,
+	TEST_COMPLETED,
+};
+
+/**
+ * enum test_results - defines the success orfailure of the test
+ */
+enum test_results {
+	TEST_NO_RESULT,
+	TEST_FAILED,
+	TEST_PASSED,
+	TEST_NOT_SUPPORTED,
+};
+
+/**
+ * enum req_unique_type - defines a unique request type
+ */
+enum req_unique_type {
+	REQ_UNIQUE_NONE,
+	REQ_UNIQUE_DISCARD,
+	REQ_UNIQUE_FLUSH,
+};
+
+/**
+ * struct test_debug - debugfs directories
+ * @debug_root:		The test-iosched debugfs root directory
+ * @debug_utils_root:	test-iosched debugfs utils root
+ *			directory
+ * @debug_tests_root:	test-iosched debugfs tests root
+ *			directory
+ * @debug_test_result:	Exposes the test result to the user
+ *			space
+ * @start_sector:	The start sector for read/write requests
+ */
+struct test_debug {
+	struct dentry *debug_root;
+	struct dentry *debug_utils_root;
+	struct dentry *debug_tests_root;
+	struct dentry *debug_test_result;
+	struct dentry *start_sector;
+};
+
+/**
+ * struct test_request - defines a test request
+ * @queuelist:		The test requests list
+ * @bios_buffer:	Write/read requests data buffer
+ * @buf_size:		Write/read requests data buffer size (in
+ *			bytes)
+ * @rq:			A block request, to be dispatched
+ * @req_completed:	A flag to indicate if the request was
+ *			completed
+ * @req_result:		Keeps the error code received in the
+ *			request completion callback
+ * @is_err_expected:	A flag to indicate if the request should
+ *			fail
+ * @wr_rd_data_pattern:	A pattern written to the write data
+ *			buffer. Can be used in read requests to
+ *			verify the data
+ * @req_id:		A unique ID to identify a test request
+ *			to ease the debugging of the test cases
+ */
+struct test_request {
+	struct list_head queuelist;
+	unsigned int *bios_buffer;
+	int buf_size;
+	struct request *rq;
+	bool req_completed;
+	int req_result;
+	int is_err_expected;
+	int wr_rd_data_pattern;
+	int req_id;
+};
+
+/**
+ * struct test_info - specific test information
+ * @testcase:		The current running test case
+ * @timeout_msec:	Test specific test timeout
+ * @buf_size:		Write/read requests data buffer size (in
+ *			bytes)
+ * @prepare_test_fn:	Test specific test preparation callback
+ * @run_test_fn:	Test specific test running callback
+ * @check_test_result_fn: Test specific test result checking
+ *			callback
+ * @get_test_case_str_fn: Test specific function to get the test name
+ * @data:		Test specific private data
+ */
+struct test_info {
+	int testcase;
+	unsigned timeout_msec;
+	prepare_test_fn *prepare_test_fn;
+	run_test_fn *run_test_fn;
+	check_test_result_fn *check_test_result_fn;
+	post_test_fn *post_test_fn;
+	get_test_case_str_fn *get_test_case_str_fn;
+	void *data;
+};
+
+/**
+ * struct blk_dev_test_type - identifies block device test
+ * @list:	list head pointer
+ * @init_fn:	block device test init callback
+ * @exit_fn:	block device test exit callback
+ */
+struct blk_dev_test_type {
+	struct list_head list;
+	blk_dev_test_init_fn *init_fn;
+	blk_dev_test_exit_fn *exit_fn;
+};
+
+/**
+ * struct test_data - global test iosched data
+ * @queue:		The test IO scheduler requests list
+ * @test_queue:		The test requests list
+ * @next_req:		Points to the next request to be
+ *			dispatched from the test requests list
+ * @wait_q:		A wait queue for waiting for the test
+ *			requests completion
+ * @test_state:		Indicates if there is a running test.
+ *			Used for dispatch function
+ * @test_result:	Indicates if the test passed or failed
+ * @debug:		The test debugfs entries
+ * @req_q:		The block layer request queue
+ * @num_of_write_bios:	The number of write BIOs added to the test requests.
+ *			Used to calcualte the sector number of
+ *			new BIOs.
+ * @start_sector:	The address of the first sector that can
+ *			be accessed by the test
+ * @timeout_timer:	A timer to verify test completion in
+ *			case of non-completed requests
+ * @wr_rd_next_req_id:	A unique ID to identify WRITE/READ
+ *			request to ease the debugging of the
+ *			test cases
+ * @unique_next_req_id:	A unique ID to identify
+ *			FLUSH/DISCARD/SANITIZE request to ease
+ *			the debugging of the test cases
+ * @lock:		A lock to verify running a single test
+ *			at a time
+ * @test_info:		A specific test data to be set by the
+ *			test invokation function
+ * @ignore_round:	A boolean variable indicating that a
+ *			test round was disturbed by an external
+ *			flush request, therefore disqualifying
+ *			the results
+ */
+struct test_data {
+	struct list_head queue;
+	struct list_head test_queue;
+	struct test_request *next_req;
+	wait_queue_head_t wait_q;
+	enum test_state test_state;
+	enum test_results test_result;
+	struct test_debug debug;
+	struct request_queue *req_q;
+	int num_of_write_bios;
+	u32 start_sector;
+	struct timer_list timeout_timer;
+	int wr_rd_next_req_id;
+	int unique_next_req_id;
+	spinlock_t lock;
+	struct test_info test_info;
+	bool fs_wr_reqs_during_test;
+	bool ignore_round;
+};
+
+extern int test_iosched_start_test(struct test_info *t_info);
+extern void test_iosched_mark_test_completion(void);
+extern int test_iosched_add_unique_test_req(int is_err_expcted,
+		enum req_unique_type req_unique,
+		int start_sec, int nr_sects, rq_end_io_fn *end_req_io);
+extern int test_iosched_add_wr_rd_test_req(int is_err_expcted,
+	      int direction, int start_sec,
+	      int num_bios, int pattern, rq_end_io_fn *end_req_io);
+
+extern struct dentry *test_iosched_get_debugfs_tests_root(void);
+extern struct dentry *test_iosched_get_debugfs_utils_root(void);
+
+extern struct request_queue *test_iosched_get_req_queue(void);
+
+extern void test_iosched_set_test_result(int);
+
+void test_iosched_set_ignore_round(bool ignore_round);
+
+void test_iosched_register(struct blk_dev_test_type *bdt);
+
+void test_iosched_unregister(struct blk_dev_test_type *bdt);
+
+#endif /* _LINUX_TEST_IOSCHED_H */
diff --git a/include/linux/usb/android.h b/include/linux/usb/android.h
index 6d3c3ad..bf65ebb 100644
--- a/include/linux/usb/android.h
+++ b/include/linux/usb/android.h
@@ -20,6 +20,7 @@
 struct android_usb_platform_data {
 	int (*update_pid_and_serial_num)(uint32_t, const char *);
 	u32 swfi_latency;
+	u8 usb_core_id;
 };
 
 #endif	/* __LINUX_USB_ANDROID_H */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 1a945e7..2e51781 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -506,6 +506,8 @@
  * @name: Identifies the controller hardware type.  Used in diagnostics
  *	and sometimes configuration.
  * @dev: Driver model state for this abstract device.
+ * @usb_core_id: Identifies the usb core controlled by this usb_gadget.
+ *		 Used in case of more then one core operates concurrently.
  *
  * Gadgets have a mostly-portable "gadget driver" implementing device
  * functions, handling all usb configurations and interfaces.  Gadget
@@ -542,6 +544,7 @@
 	unsigned			otg_srp_reqd:1;
 	const char			*name;
 	struct device			dev;
+	u8				usb_core_id;
 };
 
 static inline void set_gadget_data(struct usb_gadget *gadget, void *data)
@@ -793,6 +796,8 @@
  * @suspend: Invoked on USB suspend.  May be called in_interrupt.
  * @resume: Invoked on USB resume.  May be called in_interrupt.
  * @driver: Driver model state for this driver.
+ * @usb_core_id: Identifies the usb core controlled by this usb_gadget_driver.
+ *               Used in case of more then one core operates concurrently.
  *
  * Devices are disabled till a gadget driver successfully bind()s, which
  * means the driver will handle setup() requests needed to enumerate (and
@@ -850,6 +855,8 @@
 
 	/* FIXME support safe rmmod */
 	struct device_driver	driver;
+
+	u8			usb_core_id;
 };
 
 
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index d9ec332..dc13bd9 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -212,6 +212,7 @@
 #define	HCD_MEMORY	0x0001		/* HC regs use memory (else I/O) */
 #define	HCD_LOCAL_MEM	0x0002		/* HC needs local memory */
 #define	HCD_SHARED	0x0004		/* Two (or more) usb_hcds share HW */
+#define	HCD_OLD_ENUM	0x0008		/* HC supports short enumeration */
 #define	HCD_USB11	0x0010		/* USB 1.1 */
 #define	HCD_USB2	0x0020		/* USB 2.0 */
 #define	HCD_USB3	0x0040		/* USB 3.0 */
@@ -348,6 +349,9 @@
 	/* to log completion events*/
 	void	(*log_urb_complete)(struct urb *urb, char * event,
 			unsigned extra);
+	void	(*dump_regs)(struct usb_hcd *);
+	void	(*enable_ulpi_control)(struct usb_hcd *hcd, u32 linestate);
+	void	(*disable_ulpi_control)(struct usb_hcd *hcd);
 };
 
 extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index 9da1999..68c1ffc 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -137,6 +137,9 @@
  *			or more downstream ports. Capable of supplying
  *			IDEV_CHG_MAX irrespective of devices connected on
  *			accessory ports.
+ * USB_PROPRIETARY_CHARGER A proprietary charger pull DP and DM to specific
+ *			voltages between 2.0-3.3v for identification.
+ *
  */
 enum usb_chg_type {
 	USB_INVALID_CHARGER = 0,
@@ -147,6 +150,7 @@
 	USB_ACA_B_CHARGER,
 	USB_ACA_C_CHARGER,
 	USB_ACA_DOCK_CHARGER,
+	USB_PROPRIETARY_CHARGER,
 };
 
 /**
diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h
index 8a05136..516f764 100644
--- a/include/linux/usb/msm_hsusb_hw.h
+++ b/include/linux/usb/msm_hsusb_hw.h
@@ -33,6 +33,8 @@
 #define PORTSC_PHCD            (1 << 23) /* phy suspend mode */
 #define PORTSC_PTS_MASK         (3 << 30)
 #define PORTSC_PTS_ULPI         (3 << 30)
+#define PORTSC_LS               (3 << 10)
+#define PORTSC_LS_DM            (1 << 10)
 #define PORTSC_CSC              (1 << 1)
 #define PORTSC_CCS              (1 << 0)
 
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 63ebdea..42f7349 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -2296,10 +2296,13 @@
 #define V4L2_EVENT_PRIVATE_START		0x08000000
 
 #define V4L2_EVENT_MSM_VIDC_START	(V4L2_EVENT_PRIVATE_START + 0x00001000)
-#define V4L2_EVENT_MSM_VIDC_FLUSH_DONE	(V4L2_EVENT_PRIVATE_START + 1)
-#define V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED	\
-		(V4L2_EVENT_PRIVATE_START + 2)
-#define V4L2_EVENT_MSM_VIDC_CLOSE_DONE	(V4L2_EVENT_PRIVATE_START + 3)
+#define V4L2_EVENT_MSM_VIDC_FLUSH_DONE	(V4L2_EVENT_MSM_VIDC_START + 1)
+#define V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED_SUFFICIENT	\
+		(V4L2_EVENT_MSM_VIDC_START + 2)
+#define V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED_INSUFFICIENT	\
+		(V4L2_EVENT_MSM_VIDC_START + 3)
+#define V4L2_EVENT_MSM_VIDC_CLOSE_DONE	(V4L2_EVENT_MSM_VIDC_START + 4)
+
 
 /* Payload for V4L2_EVENT_VSYNC */
 struct v4l2_event_vsync {
diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h
index 3308243..81b6a40 100644
--- a/include/media/msm_camera.h
+++ b/include/media/msm_camera.h
@@ -29,6 +29,8 @@
 
 #include <linux/ion.h>
 
+#define BIT(nr)   (1UL << (nr))
+
 #define MSM_CAM_IOCTL_MAGIC 'm'
 
 #define MSM_CAM_IOCTL_GET_SENSOR_INFO \
@@ -446,12 +448,12 @@
 #define CMD_VFE_BUFFER_RELEASE 51
 #define CMD_VFE_PROCESS_IRQ 52
 
-#define CMD_AXI_CFG_PRIM		0xc1
-#define CMD_AXI_CFG_PRIM_ALL_CHNLS	0xc2
-#define CMD_AXI_CFG_SEC			0xc4
-#define CMD_AXI_CFG_SEC_ALL_CHNLS	0xc8
-#define CMD_AXI_CFG_TERT1		0xd0
-
+#define CMD_AXI_CFG_PRIM               BIT(8)
+#define CMD_AXI_CFG_PRIM_ALL_CHNLS     BIT(9)
+#define CMD_AXI_CFG_SEC                BIT(10)
+#define CMD_AXI_CFG_SEC_ALL_CHNLS      BIT(11)
+#define CMD_AXI_CFG_TERT1              BIT(12)
+#define CMD_AXI_CFG_TERT2              BIT(13)
 
 #define CMD_AXI_START  0xE1
 #define CMD_AXI_STOP   0xE2
@@ -551,25 +553,31 @@
 #define OUTPUT_ZSL_ALL_CHNLS 10
 #define LAST_AXI_OUTPUT_MODE_ENUM = OUTPUT_ZSL_ALL_CHNLS
 
-#define OUTPUT_PRIM		0xC1
-#define OUTPUT_PRIM_ALL_CHNLS	0xC2
-#define OUTPUT_SEC		0xC4
-#define OUTPUT_SEC_ALL_CHNLS	0xC8
-#define OUTPUT_TERT1		0xD0
+#define OUTPUT_PRIM              BIT(8)
+#define OUTPUT_PRIM_ALL_CHNLS    BIT(9)
+#define OUTPUT_SEC               BIT(10)
+#define OUTPUT_SEC_ALL_CHNLS     BIT(11)
+#define OUTPUT_TERT1             BIT(12)
+#define OUTPUT_TERT2             BIT(13)
+
 
 
 #define MSM_FRAME_PREV_1	0
 #define MSM_FRAME_PREV_2	1
 #define MSM_FRAME_ENC		2
 
-#define OUTPUT_TYPE_P    (1<<0)
-#define OUTPUT_TYPE_T    (1<<1)
-#define OUTPUT_TYPE_S    (1<<2)
-#define OUTPUT_TYPE_V    (1<<3)
-#define OUTPUT_TYPE_L    (1<<4)
-#define OUTPUT_TYPE_ST_L (1<<5)
-#define OUTPUT_TYPE_ST_R (1<<6)
-#define OUTPUT_TYPE_ST_D (1<<7)
+#define OUTPUT_TYPE_P    BIT(0)
+#define OUTPUT_TYPE_T    BIT(1)
+#define OUTPUT_TYPE_S    BIT(2)
+#define OUTPUT_TYPE_V    BIT(3)
+#define OUTPUT_TYPE_L    BIT(4)
+#define OUTPUT_TYPE_ST_L BIT(5)
+#define OUTPUT_TYPE_ST_R BIT(6)
+#define OUTPUT_TYPE_ST_D BIT(7)
+#define OUTPUT_TYPE_R    BIT(8)
+#define OUTPUT_TYPE_R1   BIT(9)
+
+
 
 struct fd_roi_info {
 	void *info;
@@ -685,7 +693,13 @@
 	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+4)
 #define MSM_V4L2_EXT_CAPTURE_MODE_RAW \
 	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+5)
-#define MSM_V4L2_EXT_CAPTURE_MODE_MAX (MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+6)
+#define MSM_V4L2_EXT_CAPTURE_MODE_RDI \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+6)
+#define MSM_V4L2_EXT_CAPTURE_MODE_RDI1 \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+7)
+#define MSM_V4L2_EXT_CAPTURE_MODE_RDI2 \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+8)
+#define MSM_V4L2_EXT_CAPTURE_MODE_MAX (MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+9)
 
 
 #define MSM_V4L2_PID_MOTION_ISO              V4L2_CID_PRIVATE_BASE
@@ -1659,4 +1673,86 @@
 #define MSM_IRQROUTER_CFG_COMPIRQ \
 	_IOWR('V', BASE_VIDIOC_PRIVATE, void __user *)
 
+#define MAX_NUM_CPP_STRIPS 8
+
+enum msm_cpp_frame_type {
+	MSM_CPP_OFFLINE_FRAME,
+	MSM_CPP_REALTIME_FRAME,
+};
+
+struct msm_cpp_frame_strip_info {
+	int scale_v_en;
+	int scale_h_en;
+
+	int upscale_v_en;
+	int upscale_h_en;
+
+	int src_start_x;
+	int src_end_x;
+	int src_start_y;
+	int src_end_y;
+
+	/* Padding is required for upscaler because it does not
+	 * pad internally like other blocks, also needed for rotation
+	 * rotation expects all the blocks in the stripe to be the same size
+	 * Padding is done such that all the extra padded pixels
+	 * are on the right and bottom
+	*/
+	int pad_bottom;
+	int pad_top;
+	int pad_right;
+	int pad_left;
+
+	int v_init_phase;
+	int h_init_phase;
+	int h_phase_step;
+	int v_phase_step;
+
+	int prescale_crop_width_first_pixel;
+	int prescale_crop_width_last_pixel;
+	int prescale_crop_height_first_line;
+	int prescale_crop_height_last_line;
+
+	int postscale_crop_height_first_line;
+	int postscale_crop_height_last_line;
+	int postscale_crop_width_first_pixel;
+	int postscale_crop_width_last_pixel;
+
+	int dst_start_x;
+	int dst_end_x;
+	int dst_start_y;
+	int dst_end_y;
+
+	int bytes_per_pixel;
+	unsigned int source_address;
+	unsigned int destination_address;
+	unsigned int src_stride;
+	unsigned int dst_stride;
+	int rotate_270;
+	int horizontal_flip;
+	int vertical_flip;
+	int scale_output_width;
+	int scale_output_height;
+};
+
+struct msm_cpp_frame_info_t {
+	int32_t frame_id;
+	uint32_t inst_id;
+	uint32_t client_id;
+	enum msm_cpp_frame_type frame_type;
+	uint32_t num_strips;
+	struct msm_cpp_frame_strip_info *strip_info;
+};
+
+#define VIDIOC_MSM_CPP_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_GET_EVENTPAYLOAD \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_GET_INST_INFO \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 2, struct msm_camera_v4l2_ioctl_t)
+
+#define V4L2_EVENT_CPP_FRAME_DONE  (V4L2_EVENT_PRIVATE_START + 0)
+
 #endif /* __LINUX_MSM_CAMERA_H */
diff --git a/include/media/msm_isp.h b/include/media/msm_isp.h
index 93f6c8b..6547795 100644
--- a/include/media/msm_isp.h
+++ b/include/media/msm_isp.h
@@ -60,7 +60,8 @@
 #define MSG_ID_OUTPUT_SECONDARY         41
 #define MSG_ID_STATS_COMPOSITE          42
 #define MSG_ID_OUTPUT_TERTIARY1         43
-
+#define MSG_ID_STOP_LS_ACK              44
+#define MSG_ID_OUTPUT_TERTIARY2         45
 
 /* ISP command IDs */
 #define VFE_CMD_DUMMY_0                                 0
@@ -317,20 +318,19 @@
 	/* TBD: 3D related */
 };
 
-#define VFE_OUTPUTS_MAIN_AND_PREVIEW	BIT(0)
-#define VFE_OUTPUTS_MAIN_AND_VIDEO	BIT(1)
-#define VFE_OUTPUTS_MAIN_AND_THUMB	BIT(2)
-#define VFE_OUTPUTS_THUMB_AND_MAIN	BIT(3)
-#define VFE_OUTPUTS_PREVIEW_AND_VIDEO	BIT(4)
-#define VFE_OUTPUTS_VIDEO_AND_PREVIEW	BIT(5)
-#define VFE_OUTPUTS_PREVIEW		BIT(6)
-#define VFE_OUTPUTS_VIDEO		BIT(7)
-#define VFE_OUTPUTS_RAW			BIT(8)
-#define VFE_OUTPUTS_JPEG_AND_THUMB	BIT(9)
-#define VFE_OUTPUTS_THUMB_AND_JPEG	BIT(10)
-#define VFE_OUTPUTS_RDI0	BIT(11)
-
-
+#define VFE_OUTPUTS_MAIN_AND_PREVIEW    BIT(0)
+#define VFE_OUTPUTS_MAIN_AND_VIDEO      BIT(1)
+#define VFE_OUTPUTS_MAIN_AND_THUMB      BIT(2)
+#define VFE_OUTPUTS_THUMB_AND_MAIN      BIT(3)
+#define VFE_OUTPUTS_PREVIEW_AND_VIDEO   BIT(4)
+#define VFE_OUTPUTS_VIDEO_AND_PREVIEW   BIT(5)
+#define VFE_OUTPUTS_PREVIEW             BIT(6)
+#define VFE_OUTPUTS_VIDEO               BIT(7)
+#define VFE_OUTPUTS_RAW                 BIT(8)
+#define VFE_OUTPUTS_JPEG_AND_THUMB      BIT(9)
+#define VFE_OUTPUTS_THUMB_AND_JPEG      BIT(10)
+#define VFE_OUTPUTS_RDI0                BIT(11)
+#define VFE_OUTPUTS_RDI1                BIT(12)
 
 struct msm_frame_info {
 	uint32_t image_mode;
diff --git a/include/media/radio-iris.h b/include/media/radio-iris.h
index b5e8f2e..25a1d84 100644
--- a/include/media/radio-iris.h
+++ b/include/media/radio-iris.h
@@ -489,6 +489,29 @@
 	__u8    in_det_out;
 } __packed;
 
+#define CLKSPURID_INDEX0	0
+#define CLKSPURID_INDEX1	5
+#define CLKSPURID_INDEX2	10
+#define CLKSPURID_INDEX3	15
+#define CLKSPURID_INDEX4	20
+#define CLKSPURID_INDEX5	25
+
+#define MAX_SPUR_FREQ_LIMIT	30
+#define CKK_SPUR		0x3B
+#define SPUR_DATA_SIZE		0x4
+#define SPUR_ENTRIES_PER_ID	0x5
+
+#define COMPUTE_SPUR(val)         ((((val) - (76000)) / (50)))
+#define GET_FREQ(val, bit)        ((bit == 1) ? ((val) >> 8) : ((val) & 0xFF))
+#define GET_SPUR_ENTRY_LEVEL(val) ((val) / (5))
+
+struct hci_fm_spur_data {
+	__u32	freq[MAX_SPUR_FREQ_LIMIT];
+	__s8	rmssi[MAX_SPUR_FREQ_LIMIT];
+	__u8	enable[MAX_SPUR_FREQ_LIMIT];
+} __packed;
+
+
 /* HCI dev events */
 #define RADIO_HCI_DEV_REG			1
 #define RADIO_HCI_DEV_WRITE			2
@@ -572,6 +595,10 @@
 	V4L2_CID_PRIVATE_INTF_HIGH_THRESHOLD,
 	V4L2_CID_PRIVATE_SINR_THRESHOLD,
 	V4L2_CID_PRIVATE_SINR_SAMPLES,
+	V4L2_CID_PRIVATE_SPUR_FREQ,
+	V4L2_CID_PRIVATE_SPUR_FREQ_RMSSI,
+	V4L2_CID_PRIVATE_SPUR_SELECTION,
+	V4L2_CID_PRIVATE_UPDATE_SPUR_TABLE,
 
 	/*using private CIDs under userclass*/
 	V4L2_CID_PRIVATE_IRIS_READ_DEFAULT = 0x00980928,
@@ -680,6 +707,14 @@
 	RDS_AF_JUMP,
 };
 
+enum spur_entry_levels {
+	ENTRY_0,
+	ENTRY_1,
+	ENTRY_2,
+	ENTRY_3,
+	ENTRY_4,
+	ENTRY_5,
+};
 
 /* Band limits */
 #define REGION_US_EU_BAND_LOW              87500
@@ -774,6 +809,7 @@
 #define RDS_SYNC_INTR   (1 << 1)
 #define AUDIO_CTRL_INTR (1 << 2)
 #define AF_JUMP_ENABLE  (1 << 4)
+
 int hci_def_data_read(struct hci_fm_def_data_rd_req *arg,
 	struct radio_hci_dev *hdev);
 int hci_def_data_write(struct hci_fm_def_data_wr_req *arg,
diff --git a/include/media/tavarua.h b/include/media/tavarua.h
index 9943287..adbdada 100644
--- a/include/media/tavarua.h
+++ b/include/media/tavarua.h
@@ -395,6 +395,22 @@
 
 #define	FM_TX_PWR_LVL_0		0 /* Lowest power lvl that can be set for Tx */
 #define	FM_TX_PWR_LVL_MAX	7 /* Max power lvl for Tx */
+
+/* Tone Generator control value */
+#define TONE_GEN_CTRL_BYTE		 0x00
+#define TONE_CHANNEL_EN_AND_SCALING_BYTE 0x01
+#define TONE_LEFT_FREQ_BYTE		 0x02
+#define TONE_RIGHT_FREQ_BYTE		 0x03
+#define TONE_LEFT_PHASE			 0x04
+#define TONE_RIGHT_PHASE		 0x05
+
+#define TONE_LEFT_CH_ENABLED		 0x01
+#define TONE_RIGHT_CH_ENABLED		 0x02
+#define TONE_LEFT_RIGHT_CH_ENABLED	 (TONE_LEFT_CH_ENABLED\
+						 | TONE_RIGHT_CH_ENABLED)
+
+#define TONE_SCALING_SHIFT		 0x02
+
 /* Transfer */
 enum tavarua_xfr_ctrl_t {
 	RDS_PS_0 = 0x01,
@@ -453,6 +469,7 @@
 	PHY_CONFIG,
 	PHY_TXBLOCK,
 	PHY_TCB,
+	XFR_EXT,
 	XFR_PEEK_MODE = 0x40,
 	XFR_POKE_MODE = 0xC0,
 	TAVARUA_XFR_CTRL_MAX
@@ -503,6 +520,7 @@
 	TWELVE_BYTE,
 	THIRTEEN_BYTE
 };
+
 #define XFR_READ		(0)
 #define XFR_WRITE		(1)
 #define XFR_MODE_OFFSET		(0)
@@ -531,4 +549,28 @@
 	__u8   data[XFR_REG_NUM];
 } __packed;
 
+enum Internal_tone_gen_vals {
+	ONE_KHZ_LR_EQUA_0DBFS = 1,
+	ONE_KHZ_LEFTONLY_EQUA_0DBFS,
+	ONE_KHZ_RIGHTONLY_EQUA_0DBFS,
+	ONE_KHZ_LR_EQUA_l8DBFS,
+	FIFTEEN_KHZ_LR_EQUA_l8DBFS
+};
+
+enum Tone_scaling_indexes {
+	TONE_SCALE_IND_0,
+	TONE_SCALE_IND_1,
+	TONE_SCALE_IND_2,
+	TONE_SCALE_IND_3,
+	TONE_SCALE_IND_4,
+	TONE_SCALE_IND_5,
+	TONE_SCALE_IND_6,
+	TONE_SCALE_IND_7,
+	TONE_SCALE_IND_8,
+	TONE_SCALE_IND_9,
+	TONE_SCALE_IND_10,
+	TONE_SCALE_IND_11,
+	TONE_SCALE_IND_12
+};
+
 #endif /* __LINUX_TAVARUA_H */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 9a03a12..e312ab3 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -534,6 +534,7 @@
 
 	__u16		tx_win;
 	__u16		tx_win_max;
+	__u16		ack_win;
 	__u8		max_tx;
 	__u8		amp_pref;
 	__u16		remote_tx_win;
@@ -653,6 +654,7 @@
 #define L2CAP_ATT_MTU_RSP			0x03
 #define L2CAP_ATT_RESPONSE_BIT			0x01
 #define L2CAP_ATT_INDICATE			0x1D
+#define L2CAP_ATT_CONFIRM			0x1E
 #define L2CAP_ATT_NOT_SUPPORTED			0x06
 
 #define __delta_seq(x, y, pi) ((x) >= (y) ? (x) - (y) : \
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 695fea9..8e8778a 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -2322,6 +2322,9 @@
 } __packed;
 
 #define ASM_END_POINT_DEVICE_MATRIX     0
+
+#define PCM_CHANNEL_NULL 0
+
 /* Front left channel. */
 #define PCM_CHANNEL_FL    1
 
@@ -2444,7 +2447,7 @@
 } __packed;
 
 struct asm_stream_cmd_set_encdec_param {
-		u32                  param_id;
+	u32                  param_id;
 	/* ID of the parameter. */
 
 	u32                  param_size;
@@ -2573,9 +2576,6 @@
  * - 6 -- 5.1 content
  */
 
-	u16          reserved;
-	/* Reserved. Clients must set this field to zero. */
-
 	u16          total_size_of_PCE_bits;
 /* greater or equal to zero. * -In case of RAW formats and
  * channel config = 0 (PCE), client can send * the bit stream
@@ -2986,6 +2986,8 @@
 	u16          enc_options;
 	/* Options used during encoding. */
 
+	u16          reserved;
+
 } __packed;
 
 #define ASM_MEDIA_FMT_WMA_V8                    0x00010D91
@@ -4495,7 +4497,6 @@
 struct asm_dec_out_chan_map_param {
 	struct apr_hdr hdr;
 	struct asm_stream_cmd_set_encdec_param  encdec;
-	struct asm_enc_cfg_blk_param_v2	encblk;
 	u32                 num_channels;
 /* Number of decoder output channels.
  * Supported values: 0 to #MAX_CHAN_MAP_CHANNELS
diff --git a/include/sound/apr_audio.h b/include/sound/apr_audio.h
index 431dedf..c770f13 100644
--- a/include/sound/apr_audio.h
+++ b/include/sound/apr_audio.h
@@ -1127,6 +1127,7 @@
 #define AC3_DECODER  0x00010BF6
 #define EAC3_DECODER 0x00010C3C
 #define DTS	0x00010D88
+#define DTS_LBR	0x00010DBB
 #define ATRAC	0x00010D89
 #define MAT	0x00010D8A
 #define G711_ALAW_FS 0x00010BF7
@@ -1145,6 +1146,13 @@
 #define ASM_ENCDEC_IMMDIATE_DECODE 0x00010C14
 #define ASM_ENCDEC_CFG_BLK         0x00010C2C
 
+#define ASM_STREAM_CMD_OPEN_READ_COMPRESSED               0x00010D95
+struct asm_stream_cmd_open_read_compressed {
+	struct apr_hdr hdr;
+	u32            uMode;
+	u32            frame_per_buf;
+} __packed;
+
 #define ASM_STREAM_CMD_OPEN_WRITE                        0x00010BCA
 struct asm_stream_cmd_open_write {
 	struct apr_hdr hdr;
@@ -1185,6 +1193,17 @@
 	u16	afe_port_id;
 } __packed;
 
+#define ADM_CMD_CONNECT_AFE_PORT_V2 0x00010332
+
+struct adm_cmd_connect_afe_port_v2 {
+	struct apr_hdr     hdr;
+	u8	mode; /*mode represent the interface is for RX or TX*/
+	u8	session_id; /*ASM session ID*/
+	u16	afe_port_id;
+	u32	num_channels;
+	u32	sampleing_rate;
+} __packed;
+
 #define ASM_STREAM_CMD_SET_ENCDEC_PARAM                  0x00010C10
 #define ASM_STREAM_CMD_GET_ENCDEC_PARAM                  0x00010C11
 #define ASM_ENCDEC_CFG_BLK_ID				 0x00010C2C
diff --git a/include/sound/compress_offload.h b/include/sound/compress_offload.h
index 9769dea..e59d29c 100644
--- a/include/sound/compress_offload.h
+++ b/include/sound/compress_offload.h
@@ -123,6 +123,16 @@
 };
 
 /**
+ * struct snd_compr_audio_info: compressed input audio information
+ * @frame_size: legth of the encoded frame with valid data
+ * @reserved: reserved for furture use
+ */
+struct snd_compr_audio_info {
+	uint32_t frame_size;
+	uint32_t reserved[15];
+};
+
+/**
  * compress path ioctl definitions
  * SNDRV_COMPRESS_GET_CAPS: Query capability of DSP
  * SNDRV_COMPRESS_GET_CODEC_CAPS: Query capability of a codec
diff --git a/include/sound/compress_params.h b/include/sound/compress_params.h
index 5aa7b09..75558bf 100644
--- a/include/sound/compress_params.h
+++ b/include/sound/compress_params.h
@@ -70,10 +70,14 @@
 #define SND_AUDIOCODEC_IEC61937              ((__u32) 0x0000000B)
 #define SND_AUDIOCODEC_G723_1                ((__u32) 0x0000000C)
 #define SND_AUDIOCODEC_G729                  ((__u32) 0x0000000D)
-#define SND_AUDIOCODEC_AC3		     ((__u32) 0x0000000E)
-#define SND_AUDIOCODEC_DTS		     ((__u32) 0x0000000F)
-#define SND_AUDIOCODEC_AC3_PASS_THROUGH		((__u32) 0x00000010)
-#define SND_AUDIOCODEC_WMA_PRO		     ((__u32) 0x00000011)
+#define SND_AUDIOCODEC_AC3                   ((__u32) 0x0000000E)
+#define SND_AUDIOCODEC_DTS                   ((__u32) 0x0000000F)
+#define SND_AUDIOCODEC_AC3_PASS_THROUGH      ((__u32) 0x00000010)
+#define SND_AUDIOCODEC_WMA_PRO               ((__u32) 0x00000011)
+#define SND_AUDIOCODEC_DTS_PASS_THROUGH      ((__u32) 0x00000012)
+#define SND_AUDIOCODEC_DTS_LBR               ((__u32) 0x00000013)
+#define SND_AUDIOCODEC_DTS_TRANSCODE_LOOPBACK ((__u32) 0x00000014)
+
 /*
  * Profile and modes are listed with bit masks. This allows for a
  * more compact representation of fields that will not evolve
@@ -241,6 +245,8 @@
 	__u32 bits_per_sample;
 	__u32 channelmask;
 	__u32 encodeopt;
+	__u32 encodeopt1;
+	__u32 encodeopt2;
 };
 
 
diff --git a/include/sound/q6adm.h b/include/sound/q6adm.h
index 56594d4..8e15955 100644
--- a/include/sound/q6adm.h
+++ b/include/sound/q6adm.h
@@ -43,6 +43,8 @@
 int adm_connect_afe_port(int mode, int session_id, int port_id);
 int adm_disconnect_afe_port(int mode, int session_id, int port_id);
 
+void adm_ec_ref_rx_id(int  port_id);
+
 #ifdef CONFIG_RTAC
 int adm_get_copp_id(int port_id);
 #endif
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index 7ef15ac..2a555b2 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -64,8 +64,11 @@
 /* Enable Sample_Rate/Channel_Mode notification event from Decoder */
 #define SR_CM_NOTIFY_ENABLE	0x0004
 
-#define ASYNC_IO_MODE	0x0002
 #define SYNC_IO_MODE	0x0001
+#define ASYNC_IO_MODE	0x0002
+#define NT_MODE        0x0400
+
+
 #define NO_TIMESTAMP    0xFF00
 #define SET_TIMESTAMP   0x0000
 
@@ -230,6 +233,9 @@
 int q6asm_set_encdec_chan_map(struct audio_client *ac,
 		uint32_t num_channels);
 
+int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac,
+			uint32_t rate, uint32_t channels);
+
 int q6asm_enable_sbrps(struct audio_client *ac,
 			uint32_t sbr_ps);
 
diff --git a/include/sound/q6asm.h b/include/sound/q6asm.h
index ee90797..84e3150 100644
--- a/include/sound/q6asm.h
+++ b/include/sound/q6asm.h
@@ -48,6 +48,7 @@
 #define FORMAT_ATRAC	0x0016
 #define FORMAT_MAT	0x0017
 #define FORMAT_AAC	0x0018
+#define FORMAT_DTS_LBR 0x0019
 
 #define ENCDEC_SBCBITRATE   0x0001
 #define ENCDEC_IMMEDIATE_DECODE 0x0002
@@ -180,6 +181,8 @@
 
 int q6asm_open_read(struct audio_client *ac, uint32_t format);
 
+int q6asm_open_read_compressed(struct audio_client *ac, uint32_t format);
+
 int q6asm_open_write(struct audio_client *ac, uint32_t format);
 
 int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format);
@@ -239,6 +242,9 @@
 int q6asm_enc_cfg_blk_pcm(struct audio_client *ac,
 			uint32_t rate, uint32_t channels);
 
+int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac,
+			uint32_t rate, uint32_t channels);
+
 int q6asm_enc_cfg_blk_multi_ch_pcm(struct audio_client *ac,
 			uint32_t rate, uint32_t channels);
 
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 0aa96d3..0e0ba5f 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1,312 +1,780 @@
+#include <linux/debugfs.h>
+#include <linux/hardirq.h>
+#include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/irqdesc.h>
 #include <linux/irqdomain.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/seq_file.h>
 #include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/fs.h>
+
+#define IRQ_DOMAIN_MAP_LEGACY 0 /* driver allocated fixed range of irqs.
+				 * ie. legacy 8259, gets irqs 1..15 */
+#define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */
+#define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */
+#define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */
 
 static LIST_HEAD(irq_domain_list);
 static DEFINE_MUTEX(irq_domain_mutex);
 
-/**
- * irq_domain_add() - Register an irq_domain
- * @domain: ptr to initialized irq_domain structure
- *
- * Adds a irq_domain structure.  The irq_domain must at a minimum be
- * initialized with an ops structure pointer, and either a ->to_irq hook or
- * a valid irq_base value.  The irq range must be mutually exclusive with
- * domains already registered. Everything else is optional.
- */
-int irq_domain_add(struct irq_domain *domain)
-{
-	struct irq_domain *curr;
-	uint32_t d_highirq = domain->irq_base + domain->nr_irq - 1;
+static DEFINE_MUTEX(revmap_trees_mutex);
+static struct irq_domain *irq_default_domain;
 
-	if (!domain->nr_irq)
-		return -EINVAL;
+/**
+ * irq_domain_alloc() - Allocate a new irq_domain data structure
+ * @of_node: optional device-tree node of the interrupt controller
+ * @revmap_type: type of reverse mapping to use
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Allocates and initialize and irq_domain structure.  Caller is expected to
+ * register allocated irq_domain with irq_domain_register().  Returns pointer
+ * to IRQ domain, or NULL on failure.
+ */
+static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
+					   unsigned int revmap_type,
+					   const struct irq_domain_ops *ops,
+					   void *host_data)
+{
+	struct irq_domain *domain;
+
+	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+	if (WARN_ON(!domain))
+		return NULL;
+
+	/* Fill structure */
+	domain->revmap_type = revmap_type;
+	domain->ops = ops;
+	domain->host_data = host_data;
+	domain->of_node = of_node_get(of_node);
+
+	return domain;
+}
+
+static void irq_domain_add(struct irq_domain *domain)
+{
+	mutex_lock(&irq_domain_mutex);
+	list_add(&domain->link, &irq_domain_list);
+	mutex_unlock(&irq_domain_mutex);
+	pr_debug("irq: Allocated domain of type %d @0x%p\n",
+		 domain->revmap_type, domain);
+}
+
+static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
+					     irq_hw_number_t hwirq)
+{
+	irq_hw_number_t first_hwirq = domain->revmap_data.legacy.first_hwirq;
+	int size = domain->revmap_data.legacy.size;
+
+	if (WARN_ON(hwirq < first_hwirq || hwirq >= first_hwirq + size))
+		return 0;
+	return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq;
+}
+
+/**
+ * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
+ * @of_node: pointer to interrupt controller's device tree node.
+ * @size: total number of irqs in legacy mapping
+ * @first_irq: first number of irq block assigned to the domain
+ * @first_hwirq: first hwirq number to use for the translation. Should normally
+ *               be '0', but a positive integer can be used if the effective
+ *               hwirqs numbering does not begin at zero.
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Note: the map() callback will be called before this function returns
+ * for all legacy interrupts except 0 (which is always the invalid irq for
+ * a legacy controller).
+ */
+struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
+					 unsigned int size,
+					 unsigned int first_irq,
+					 irq_hw_number_t first_hwirq,
+					 const struct irq_domain_ops *ops,
+					 void *host_data)
+{
+	struct irq_domain *domain;
+	unsigned int i;
+
+	domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data);
+	if (!domain)
+		return NULL;
+
+	domain->revmap_data.legacy.first_irq = first_irq;
+	domain->revmap_data.legacy.first_hwirq = first_hwirq;
+	domain->revmap_data.legacy.size = size;
 
 	mutex_lock(&irq_domain_mutex);
-	/* insert in ascending order of domain->irq_base */
-	list_for_each_entry(curr, &irq_domain_list, list) {
-		uint32_t c_highirq = curr->irq_base + curr->nr_irq - 1;
-		if (domain->irq_base < curr->irq_base &&
-		    d_highirq < curr->irq_base) {
-			break;
-		}
-		if (d_highirq <= c_highirq) {
+	/* Verify that all the irqs are available */
+	for (i = 0; i < size; i++) {
+		int irq = first_irq + i;
+		struct irq_data *irq_data = irq_get_irq_data(irq);
+
+		if (WARN_ON(!irq_data || irq_data->domain)) {
 			mutex_unlock(&irq_domain_mutex);
-			return -EINVAL;
+			of_node_put(domain->of_node);
+			kfree(domain);
+			return NULL;
 		}
 	}
-	list_add_tail(&domain->list, &curr->list);
+
+	/* Claim all of the irqs before registering a legacy domain */
+	for (i = 0; i < size; i++) {
+		struct irq_data *irq_data = irq_get_irq_data(first_irq + i);
+		irq_data->hwirq = first_hwirq + i;
+		irq_data->domain = domain;
+	}
 	mutex_unlock(&irq_domain_mutex);
 
+	for (i = 0; i < size; i++) {
+		int irq = first_irq + i;
+		int hwirq = first_hwirq + i;
+
+		/* IRQ0 gets ignored */
+		if (!irq)
+			continue;
+
+		/* Legacy flags are left to default at this point,
+		 * one can then use irq_create_mapping() to
+		 * explicitly change them
+		 */
+		ops->map(domain, irq, hwirq);
+
+		/* Clear norequest flags */
+		irq_clear_status_flags(irq, IRQ_NOREQUEST);
+	}
+
+	irq_domain_add(domain);
+	return domain;
+}
+
+/**
+ * irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain.
+ * @of_node: pointer to interrupt controller's device tree node.
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ */
+struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
+					 unsigned int size,
+					 const struct irq_domain_ops *ops,
+					 void *host_data)
+{
+	struct irq_domain *domain;
+	unsigned int *revmap;
+
+	revmap = kzalloc(sizeof(*revmap) * size, GFP_KERNEL);
+	if (WARN_ON(!revmap))
+		return NULL;
+
+	domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data);
+	if (!domain) {
+		kfree(revmap);
+		return NULL;
+	}
+	domain->revmap_data.linear.size = size;
+	domain->revmap_data.linear.revmap = revmap;
+	irq_domain_add(domain);
+	return domain;
+}
+
+struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
+					 unsigned int max_irq,
+					 const struct irq_domain_ops *ops,
+					 void *host_data)
+{
+	struct irq_domain *domain = irq_domain_alloc(of_node,
+					IRQ_DOMAIN_MAP_NOMAP, ops, host_data);
+	if (domain) {
+		domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
+		irq_domain_add(domain);
+	}
+	return domain;
+}
+
+/**
+ * irq_domain_add_tree()
+ * @of_node: pointer to interrupt controller's device tree node.
+ * @ops: map/unmap domain callbacks
+ *
+ * Note: The radix tree will be allocated later during boot automatically
+ * (the reverse mapping will use the slow path until that happens).
+ */
+struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
+					 const struct irq_domain_ops *ops,
+					 void *host_data)
+{
+	struct irq_domain *domain = irq_domain_alloc(of_node,
+					IRQ_DOMAIN_MAP_TREE, ops, host_data);
+	if (domain) {
+		INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
+		irq_domain_add(domain);
+	}
+	return domain;
+}
+
+/**
+ * irq_find_host() - Locates a domain for a given device node
+ * @node: device-tree node of the interrupt controller
+ */
+struct irq_domain *irq_find_host(struct device_node *node)
+{
+	struct irq_domain *h, *found = NULL;
+	int rc;
+
+	/* We might want to match the legacy controller last since
+	 * it might potentially be set to match all interrupts in
+	 * the absence of a device node. This isn't a problem so far
+	 * yet though...
+	 */
+	mutex_lock(&irq_domain_mutex);
+	list_for_each_entry(h, &irq_domain_list, link) {
+		if (h->ops->match)
+			rc = h->ops->match(h, node);
+		else
+			rc = (h->of_node != NULL) && (h->of_node == node);
+
+		if (rc) {
+			found = h;
+			break;
+		}
+	}
+	mutex_unlock(&irq_domain_mutex);
+	return found;
+}
+EXPORT_SYMBOL_GPL(irq_find_host);
+
+/**
+ * irq_set_default_host() - Set a "default" irq domain
+ * @domain: default domain pointer
+ *
+ * For convenience, it's possible to set a "default" domain that will be used
+ * whenever NULL is passed to irq_create_mapping(). It makes life easier for
+ * platforms that want to manipulate a few hard coded interrupt numbers that
+ * aren't properly represented in the device-tree.
+ */
+void irq_set_default_host(struct irq_domain *domain)
+{
+	pr_debug("irq: Default domain set to @0x%p\n", domain);
+
+	irq_default_domain = domain;
+}
+
+static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
+			    irq_hw_number_t hwirq)
+{
+	struct irq_data *irq_data = irq_get_irq_data(virq);
+
+	irq_data->hwirq = hwirq;
+	irq_data->domain = domain;
+	if (domain->ops->map(domain, virq, hwirq)) {
+		pr_debug("irq: -> mapping failed, freeing\n");
+		irq_data->domain = NULL;
+		irq_data->hwirq = 0;
+		return -1;
+	}
+
+	irq_clear_status_flags(virq, IRQ_NOREQUEST);
+
 	return 0;
 }
 
 /**
- * irq_domain_register() - Register an entire irq_domain
- * @domain: ptr to initialized irq_domain structure
+ * irq_create_direct_mapping() - Allocate an irq for direct mapping
+ * @domain: domain to allocate the irq for or NULL for default domain
  *
- * Registers the entire irq_domain.  The irq_domain must at a minimum be
- * initialized with an ops structure pointer, and either a ->to_irq hook or
- * a valid irq_base value.  Everything else is optional.
+ * This routine is used for irq controllers which can choose the hardware
+ * interrupt numbers they generate. In such a case it's simplest to use
+ * the linux irq as the hardware interrupt number.
  */
-void irq_domain_register(struct irq_domain *domain)
+unsigned int irq_create_direct_mapping(struct irq_domain *domain)
 {
-	struct irq_data *d;
-	int hwirq, irq;
+	unsigned int virq;
 
-	irq_domain_for_each_irq(domain, hwirq, irq) {
-		d = irq_get_irq_data(irq);
-		if (!d) {
-			WARN(1, "error: assigning domain to non existant irq_desc");
-			return;
-		}
-		if (d->domain) {
-			/* things are broken; just report, don't clean up */
-			WARN(1, "error: irq_desc already assigned to a domain");
-			return;
-		}
-		d->domain = domain;
-		d->hwirq = hwirq;
+	if (domain == NULL)
+		domain = irq_default_domain;
+
+	BUG_ON(domain == NULL);
+	WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP);
+
+	virq = irq_alloc_desc_from(1, 0);
+	if (!virq) {
+		pr_debug("irq: create_direct virq allocation failed\n");
+		return 0;
 	}
+	if (virq >= domain->revmap_data.nomap.max_irq) {
+		pr_err("ERROR: no free irqs available below %i maximum\n",
+			domain->revmap_data.nomap.max_irq);
+		irq_free_desc(virq);
+		return 0;
+	}
+	pr_debug("irq: create_direct obtained virq %d\n", virq);
+
+	if (irq_setup_virq(domain, virq, virq)) {
+		irq_free_desc(virq);
+		return 0;
+	}
+
+	return virq;
 }
 
 /**
- * irq_domain_register_irq() - Register an irq_domain
- * @domain: ptr to initialized irq_domain structure
- * @hwirq: irq_domain hwirq to register
+ * irq_create_mapping() - Map a hardware interrupt into linux irq space
+ * @domain: domain owning this hardware interrupt or NULL for default domain
+ * @hwirq: hardware irq number in that domain space
  *
- * Registers a specific hwirq within the irq_domain.  The irq_domain
- * must at a minimum be initialized with an ops structure pointer, and
- * either a ->to_irq hook or a valid irq_base value.  Everything else is
- * optional.
+ * Only one mapping per hardware interrupt is permitted. Returns a linux
+ * irq number.
+ * If the sense/trigger is to be specified, set_irq_type() should be called
+ * on the number returned from that call.
  */
-void irq_domain_register_irq(struct irq_domain *domain, int hwirq)
+unsigned int irq_create_mapping(struct irq_domain *domain,
+				irq_hw_number_t hwirq)
 {
-	struct irq_data *d;
+	unsigned int hint;
+	int virq;
 
-	d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
-	if (!d) {
-		WARN(1, "error: assigning domain to non existant irq_desc");
-		return;
+	pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
+
+	/* Look for default domain if nececssary */
+	if (domain == NULL)
+		domain = irq_default_domain;
+	if (domain == NULL) {
+		printk(KERN_WARNING "irq_create_mapping called for"
+		       " NULL domain, hwirq=%lx\n", hwirq);
+		WARN_ON(1);
+		return 0;
 	}
-	if (d->domain) {
-		/* things are broken; just report, don't clean up */
-		WARN(1, "error: irq_desc already assigned to a domain");
-		return;
+	pr_debug("irq: -> using domain @%p\n", domain);
+
+	/* Check if mapping already exists */
+	virq = irq_find_mapping(domain, hwirq);
+	if (virq) {
+		pr_debug("irq: -> existing mapping on virq %d\n", virq);
+		return virq;
 	}
-	d->domain = domain;
-	d->hwirq = hwirq;
-}
 
-/**
- * irq_domain_del() - Removes a irq_domain from the system
- * @domain: ptr to registered irq_domain.
- */
-void irq_domain_del(struct irq_domain *domain)
-{
-	mutex_lock(&irq_domain_mutex);
-	list_del(&domain->list);
-	mutex_unlock(&irq_domain_mutex);
-}
+	/* Get a virtual interrupt number */
+	if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
+		return irq_domain_legacy_revmap(domain, hwirq);
 
-/**
- * irq_domain_unregister() - Unregister an irq_domain
- * @domain: ptr to registered irq_domain.
- */
-void irq_domain_unregister(struct irq_domain *domain)
-{
-	struct irq_data *d;
-	int hwirq, irq;
-
-	/* Clear the irq_domain assignments */
-	irq_domain_for_each_irq(domain, hwirq, irq) {
-		d = irq_get_irq_data(irq);
-		d->domain = NULL;
+	/* Allocate a virtual interrupt number */
+	hint = hwirq % nr_irqs;
+	if (hint == 0)
+		hint++;
+	virq = irq_alloc_desc_from(hint, 0);
+	if (virq <= 0)
+		virq = irq_alloc_desc_from(1, 0);
+	if (virq <= 0) {
+		pr_debug("irq: -> virq allocation failed\n");
+		return 0;
 	}
-}
 
-/**
- * irq_domain_unregister_irq() - Unregister a hwirq within a irq_domain
- * @domain: ptr to registered irq_domain.
- * @hwirq: irq_domain hwirq to unregister.
- */
-void irq_domain_unregister_irq(struct irq_domain *domain, int hwirq)
-{
-	struct irq_data *d;
-
-	/* Clear the irq_domain assignment */
-	d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
-	d->domain = NULL;
-}
-
-/**
- * irq_domain_find_free_range() - Find an available irq range
- * @from: lowest logical irq number to request from
- * @cnt: number of interrupts to search for
- *
- * Finds an available logical irq range from the domains specified
- * on the system. The from parameter can be used to allocate a range
- * at least as great as the specified irq number.
- */
-int irq_domain_find_free_range(unsigned int from, unsigned int cnt)
-{
-	struct irq_domain *curr, *prev = NULL;
-
-	if (list_empty(&irq_domain_list))
-		return from;
-
-	list_for_each_entry(curr, &irq_domain_list, list) {
-		if (prev == NULL) {
-			if ((from + cnt - 1) < curr->irq_base)
-				return from;
-		} else {
-			uint32_t p_next_irq = prev->irq_base + prev->nr_irq;
-			uint32_t start_irq;
-			if (from >= curr->irq_base)
-				continue;
-			if (from < p_next_irq)
-				start_irq = p_next_irq;
-			else
-				start_irq = from;
-			if ((curr->irq_base - start_irq) >= cnt)
-				return p_next_irq;
-		}
-		prev = curr;
+	if (irq_setup_virq(domain, virq, hwirq)) {
+		if (domain->revmap_type != IRQ_DOMAIN_MAP_LEGACY)
+			irq_free_desc(virq);
+		return 0;
 	}
-	curr = list_entry(curr->list.prev, struct irq_domain, list);
 
-	return from > curr->irq_base + curr->nr_irq ?
-	       from : curr->irq_base + curr->nr_irq;
+	pr_debug("irq: irq %lu on domain %s mapped to virtual irq %u\n",
+		hwirq, domain->of_node ? domain->of_node->full_name : "null", virq);
+
+	return virq;
 }
+EXPORT_SYMBOL_GPL(irq_create_mapping);
 
-#if defined(CONFIG_OF_IRQ)
-/**
- * irq_create_of_mapping() - Map a linux irq number from a DT interrupt spec
- *
- * Used by the device tree interrupt mapping code to translate a device tree
- * interrupt specifier to a valid linux irq number.  Returns either a valid
- * linux IRQ number or 0.
- *
- * When the caller no longer need the irq number returned by this function it
- * should arrange to call irq_dispose_mapping().
- */
 unsigned int irq_create_of_mapping(struct device_node *controller,
 				   const u32 *intspec, unsigned int intsize)
 {
 	struct irq_domain *domain;
-	unsigned long hwirq;
-	unsigned int irq, type;
-	int rc = -EINVAL;
+	irq_hw_number_t hwirq;
+	unsigned int type = IRQ_TYPE_NONE;
+	unsigned int virq;
 
-	/* Find a domain which can translate the irq spec */
-	mutex_lock(&irq_domain_mutex);
-	list_for_each_entry(domain, &irq_domain_list, list) {
-		if (!domain->ops->dt_translate)
-			continue;
-
-		rc = domain->ops->dt_translate(domain, controller,
-					intspec, intsize, &hwirq, &type);
-		if (rc == 0)
-			break;
-	}
-	mutex_unlock(&irq_domain_mutex);
-
-	if (rc != 0)
+	domain = controller ? irq_find_host(controller) : irq_default_domain;
+	if (!domain) {
+#ifdef CONFIG_MIPS
+		/*
+		 * Workaround to avoid breaking interrupt controller drivers
+		 * that don't yet register an irq_domain.  This is temporary
+		 * code. ~~~gcl, Feb 24, 2012
+		 *
+		 * Scheduled for removal in Linux v3.6.  That should be enough
+		 * time.
+		 */
+		if (intsize > 0)
+			return intspec[0];
+#endif
+		printk(KERN_WARNING "irq: no irq domain found for %s !\n",
+		       controller->full_name);
 		return 0;
+	}
 
-	irq = irq_domain_to_irq(domain, hwirq);
-	if (type != IRQ_TYPE_NONE)
-		irq_set_irq_type(irq, type);
-	pr_debug("%s: mapped hwirq=%i to irq=%i, flags=%x\n",
-		 controller->full_name, (int)hwirq, irq, type);
-	return irq;
+	/* If domain has no translation, then we assume interrupt line */
+	if (domain->ops->xlate == NULL)
+		hwirq = intspec[0];
+	else {
+		if (domain->ops->xlate(domain, controller, intspec, intsize,
+				     &hwirq, &type))
+			return 0;
+	}
+
+	/* Create mapping */
+	virq = irq_create_mapping(domain, hwirq);
+	if (!virq)
+		return virq;
+
+	/* Set type if specified and different than the current one */
+	if (type != IRQ_TYPE_NONE &&
+	    type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
+		irq_set_irq_type(virq, type);
+	return virq;
 }
 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
 
 /**
- * irq_dispose_mapping() - Discard a mapping created by irq_create_of_mapping()
- * @irq: linux irq number to be discarded
- *
- * Calling this function indicates the caller no longer needs a reference to
- * the linux irq number returned by a prior call to irq_create_of_mapping().
+ * irq_dispose_mapping() - Unmap an interrupt
+ * @virq: linux irq number of the interrupt to unmap
  */
-void irq_dispose_mapping(unsigned int irq)
+void irq_dispose_mapping(unsigned int virq)
 {
-	/*
-	 * nothing yet; will be filled when support for dynamic allocation of
-	 * irq_descs is added to irq_domain
-	 */
+	struct irq_data *irq_data = irq_get_irq_data(virq);
+	struct irq_domain *domain;
+	irq_hw_number_t hwirq;
+
+	if (!virq || !irq_data)
+		return;
+
+	domain = irq_data->domain;
+	if (WARN_ON(domain == NULL))
+		return;
+
+	/* Never unmap legacy interrupts */
+	if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
+		return;
+
+	irq_set_status_flags(virq, IRQ_NOREQUEST);
+
+	/* remove chip and handler */
+	irq_set_chip_and_handler(virq, NULL, NULL);
+
+	/* Make sure it's completed */
+	synchronize_irq(virq);
+
+	/* Tell the PIC about it */
+	if (domain->ops->unmap)
+		domain->ops->unmap(domain, virq);
+	smp_mb();
+
+	/* Clear reverse map */
+	hwirq = irq_data->hwirq;
+	switch(domain->revmap_type) {
+	case IRQ_DOMAIN_MAP_LINEAR:
+		if (hwirq < domain->revmap_data.linear.size)
+			domain->revmap_data.linear.revmap[hwirq] = 0;
+		break;
+	case IRQ_DOMAIN_MAP_TREE:
+		mutex_lock(&revmap_trees_mutex);
+		radix_tree_delete(&domain->revmap_data.tree, hwirq);
+		mutex_unlock(&revmap_trees_mutex);
+		break;
+	}
+
+	irq_free_desc(virq);
 }
 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
 
-int irq_domain_simple_dt_translate(struct irq_domain *d,
-			    struct device_node *controller,
-			    const u32 *intspec, unsigned int intsize,
-			    unsigned long *out_hwirq, unsigned int *out_type)
+/**
+ * irq_find_mapping() - Find a linux irq from an hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * This is a slow path, for use by generic code. It's expected that an
+ * irq controller implementation directly calls the appropriate low level
+ * mapping function.
+ */
+unsigned int irq_find_mapping(struct irq_domain *domain,
+			      irq_hw_number_t hwirq)
 {
-	if (d->of_node != controller)
-		return -EINVAL;
-	if (intsize < 1)
-		return -EINVAL;
-	if (d->nr_irq && ((intspec[0] < d->hwirq_base) ||
-	    (intspec[0] >= d->hwirq_base + d->nr_irq)))
-		return -EINVAL;
+	unsigned int i;
+	unsigned int hint = hwirq % nr_irqs;
 
-	*out_hwirq = intspec[0];
-	*out_type = IRQ_TYPE_NONE;
-	if (intsize > 1)
-		*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+	/* Look for default domain if nececssary */
+	if (domain == NULL)
+		domain = irq_default_domain;
+	if (domain == NULL)
+		return 0;
+
+	/* legacy -> bail early */
+	if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
+		return irq_domain_legacy_revmap(domain, hwirq);
+
+	/* Slow path does a linear search of the map */
+	if (hint == 0)
+		hint = 1;
+	i = hint;
+	do {
+		struct irq_data *data = irq_get_irq_data(i);
+		if (data && (data->domain == domain) && (data->hwirq == hwirq))
+			return i;
+		i++;
+		if (i >= nr_irqs)
+			i = 1;
+	} while(i != hint);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(irq_find_mapping);
+
+/**
+ * irq_radix_revmap_lookup() - Find a linux irq from a hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * This is a fast path, for use by irq controller code that uses radix tree
+ * revmaps
+ */
+unsigned int irq_radix_revmap_lookup(struct irq_domain *domain,
+				     irq_hw_number_t hwirq)
+{
+	struct irq_data *irq_data;
+
+	if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
+		return irq_find_mapping(domain, hwirq);
+
+	/*
+	 * Freeing an irq can delete nodes along the path to
+	 * do the lookup via call_rcu.
+	 */
+	rcu_read_lock();
+	irq_data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
+	rcu_read_unlock();
+
+	/*
+	 * If found in radix tree, then fine.
+	 * Else fallback to linear lookup - this should not happen in practice
+	 * as it means that we failed to insert the node in the radix tree.
+	 */
+	return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq);
+}
+
+/**
+ * irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping.
+ * @domain: domain owning this hardware interrupt
+ * @virq: linux irq number
+ * @hwirq: hardware irq number in that domain space
+ *
+ * This is for use by irq controllers that use a radix tree reverse
+ * mapping for fast lookup.
+ */
+void irq_radix_revmap_insert(struct irq_domain *domain, unsigned int virq,
+			     irq_hw_number_t hwirq)
+{
+	struct irq_data *irq_data = irq_get_irq_data(virq);
+
+	if (WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
+		return;
+
+	if (virq) {
+		mutex_lock(&revmap_trees_mutex);
+		radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
+		mutex_unlock(&revmap_trees_mutex);
+	}
+}
+
+/**
+ * irq_linear_revmap() - Find a linux irq from a hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * This is a fast path, for use by irq controller code that uses linear
+ * revmaps. It does fallback to the slow path if the revmap doesn't exist
+ * yet and will create the revmap entry with appropriate locking
+ */
+unsigned int irq_linear_revmap(struct irq_domain *domain,
+			       irq_hw_number_t hwirq)
+{
+	unsigned int *revmap;
+
+	if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR))
+		return irq_find_mapping(domain, hwirq);
+
+	/* Check revmap bounds */
+	if (unlikely(hwirq >= domain->revmap_data.linear.size))
+		return irq_find_mapping(domain, hwirq);
+
+	/* Check if revmap was allocated */
+	revmap = domain->revmap_data.linear.revmap;
+	if (unlikely(revmap == NULL))
+		return irq_find_mapping(domain, hwirq);
+
+	/* Fill up revmap with slow path if no mapping found */
+	if (unlikely(!revmap[hwirq]))
+		revmap[hwirq] = irq_find_mapping(domain, hwirq);
+
+	return revmap[hwirq];
+}
+
+#ifdef CONFIG_IRQ_DOMAIN_DEBUG
+static int virq_debug_show(struct seq_file *m, void *private)
+{
+	unsigned long flags;
+	struct irq_desc *desc;
+	const char *p;
+	static const char none[] = "none";
+	void *data;
+	int i;
+
+	seq_printf(m, "%-5s  %-7s  %-15s  %-*s  %s\n", "irq", "hwirq",
+		      "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
+		      "domain name");
+
+	for (i = 1; i < nr_irqs; i++) {
+		desc = irq_to_desc(i);
+		if (!desc)
+			continue;
+
+		raw_spin_lock_irqsave(&desc->lock, flags);
+
+		if (desc->action && desc->action->handler) {
+			struct irq_chip *chip;
+
+			seq_printf(m, "%5d  ", i);
+			seq_printf(m, "0x%05lx  ", desc->irq_data.hwirq);
+
+			chip = irq_desc_get_chip(desc);
+			if (chip && chip->name)
+				p = chip->name;
+			else
+				p = none;
+			seq_printf(m, "%-15s  ", p);
+
+			data = irq_desc_get_chip_data(desc);
+			seq_printf(m, data ? "0x%p  " : "  %p  ", data);
+
+			if (desc->irq_data.domain && desc->irq_data.domain->of_node)
+				p = desc->irq_data.domain->of_node->full_name;
+			else
+				p = none;
+			seq_printf(m, "%s\n", p);
+		}
+
+		raw_spin_unlock_irqrestore(&desc->lock, flags);
+	}
+
+	return 0;
+}
+
+static int virq_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, virq_debug_show, inode->i_private);
+}
+
+static const struct file_operations virq_debug_fops = {
+	.open = virq_debug_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int __init irq_debugfs_init(void)
+{
+	if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
+				 NULL, &virq_debug_fops) == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+__initcall(irq_debugfs_init);
+#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
+
+int irq_domain_simple_map(struct irq_domain *d, unsigned int irq,
+			  irq_hw_number_t hwirq)
+{
 	return 0;
 }
 
 /**
- * irq_domain_create_simple() - Set up a 'simple' translation range
+ * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
+ *
+ * Device Tree IRQ specifier translation function which works with one cell
+ * bindings where the cell value maps directly to the hwirq number.
  */
-void irq_domain_add_simple(struct device_node *controller, int irq_base)
+int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
+			     const u32 *intspec, unsigned int intsize,
+			     unsigned long *out_hwirq, unsigned int *out_type)
 {
-	struct irq_domain *domain;
-	int rc;
-
-	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
-	if (!domain) {
-		WARN_ON(1);
-		return;
-	}
-
-	domain->irq_base = irq_base;
-	domain->of_node = of_node_get(controller);
-	domain->ops = &irq_domain_simple_ops;
-	rc = irq_domain_add(domain);
-	if (rc) {
-		WARN(1, "Unable to create irq domain\n");
-		return;
-	}
-	irq_domain_register(domain);
+	if (WARN_ON(intsize < 1))
+		return -EINVAL;
+	*out_hwirq = intspec[0];
+	*out_type = IRQ_TYPE_NONE;
+	return 0;
 }
-EXPORT_SYMBOL_GPL(irq_domain_add_simple);
+EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
 
+/**
+ * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
+ *
+ * Device Tree IRQ specifier translation function which works with two cell
+ * bindings where the cell values map directly to the hwirq number
+ * and linux irq flags.
+ */
+int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
+			const u32 *intspec, unsigned int intsize,
+			irq_hw_number_t *out_hwirq, unsigned int *out_type)
+{
+	if (WARN_ON(intsize < 2))
+		return -EINVAL;
+	*out_hwirq = intspec[0];
+	*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
+
+/**
+ * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
+ *
+ * Device Tree IRQ specifier translation function which works with either one
+ * or two cell bindings where the cell values map directly to the hwirq number
+ * and linux irq flags.
+ *
+ * Note: don't use this function unless your interrupt controller explicitly
+ * supports both one and two cell bindings.  For the majority of controllers
+ * the _onecell() or _twocell() variants above should be used.
+ */
+int irq_domain_xlate_onetwocell(struct irq_domain *d,
+				struct device_node *ctrlr,
+				const u32 *intspec, unsigned int intsize,
+				unsigned long *out_hwirq, unsigned int *out_type)
+{
+	if (WARN_ON(intsize < 1))
+		return -EINVAL;
+	*out_hwirq = intspec[0];
+	*out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
+
+const struct irq_domain_ops irq_domain_simple_ops = {
+	.map = irq_domain_simple_map,
+	.xlate = irq_domain_xlate_onetwocell,
+};
+EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
+
+#ifdef CONFIG_OF_IRQ
 void irq_domain_generate_simple(const struct of_device_id *match,
 				u64 phys_base, unsigned int irq_start)
 {
 	struct device_node *node;
-	pr_info("looking for phys_base=%llx, irq_start=%i\n",
+	pr_debug("looking for phys_base=%llx, irq_start=%i\n",
 		(unsigned long long) phys_base, (int) irq_start);
 	node = of_find_matching_node_by_address(NULL, match, phys_base);
 	if (node)
-		irq_domain_add_simple(node, irq_start);
-	else
-		pr_info("no node found\n");
+		irq_domain_add_legacy(node, 32, irq_start, 0,
+				      &irq_domain_simple_ops, NULL);
 }
 EXPORT_SYMBOL_GPL(irq_domain_generate_simple);
-#endif /* CONFIG_OF_IRQ */
-
-struct irq_domain_ops irq_domain_simple_ops = {
-#ifdef CONFIG_OF_IRQ
-	.dt_translate = irq_domain_simple_dt_translate,
-#endif /* CONFIG_OF_IRQ */
-};
-EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
+#endif
diff --git a/kernel/panic.c b/kernel/panic.c
index b47ca87..8c6babc 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -81,6 +81,14 @@
 	int state = 0;
 
 	/*
+	 * Disable local interrupts. This will prevent panic_smp_self_stop
+	 * from deadlocking the first cpu that invokes the panic, since
+	 * there is nothing to prevent an interrupt handler (that runs
+	 * after the panic_lock is acquired) from invoking panic again.
+	 */
+	local_irq_disable();
+
+	/*
 	 * It's possible to come here directly from a panic-assertion and
 	 * not have preempt disabled. Some functions called from here want
 	 * preempt to be disabled. No point enabling it later though...
diff --git a/mm/Kconfig b/mm/Kconfig
index cad244c..84489cd 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -198,7 +198,7 @@
 config MIGRATION
 	bool "Page migration"
 	def_bool y
-	depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION
+	depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA
 	help
 	  Allows the migration of the physical location of pages of processes
 	  while the virtual addresses are not changed. This is useful in
diff --git a/mm/Makefile b/mm/Makefile
index 50ec00e..8aada89 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -13,7 +13,7 @@
 			   readahead.o swap.o truncate.o vmscan.o shmem.o \
 			   prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
 			   page_isolation.o mm_init.o mmu_context.o percpu.o \
-			   $(mmu-y)
+			   compaction.o $(mmu-y)
 obj-y += init-mm.o
 
 ifdef CONFIG_NO_BOOTMEM
@@ -32,7 +32,6 @@
 obj-$(CONFIG_SPARSEMEM)	+= sparse.o
 obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
 obj-$(CONFIG_SLOB) += slob.o
-obj-$(CONFIG_COMPACTION) += compaction.o
 obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
 obj-$(CONFIG_KSM) += ksm.o
 obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
diff --git a/mm/compaction.c b/mm/compaction.c
index 74a8c82..da7d35e 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -16,30 +16,11 @@
 #include <linux/sysfs.h>
 #include "internal.h"
 
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+
 #define CREATE_TRACE_POINTS
 #include <trace/events/compaction.h>
 
-/*
- * compact_control is used to track pages being migrated and the free pages
- * they are being migrated to during memory compaction. The free_pfn starts
- * at the end of a zone and migrate_pfn begins at the start. Movable pages
- * are moved to the end of a zone during a compaction run and the run
- * completes when free_pfn <= migrate_pfn
- */
-struct compact_control {
-	struct list_head freepages;	/* List of free pages to migrate to */
-	struct list_head migratepages;	/* List of pages being migrated */
-	unsigned long nr_freepages;	/* Number of isolated free pages */
-	unsigned long nr_migratepages;	/* Number of pages to migrate */
-	unsigned long free_pfn;		/* isolate_freepages search base */
-	unsigned long migrate_pfn;	/* isolate_migratepages search base */
-	bool sync;			/* Synchronous migration */
-
-	int order;			/* order a direct compactor needs */
-	int migratetype;		/* MOVABLE, RECLAIMABLE etc */
-	struct zone *zone;
-};
-
 static unsigned long release_freepages(struct list_head *freelist)
 {
 	struct page *page, *next;
@@ -54,24 +35,35 @@
 	return count;
 }
 
-/* Isolate free pages onto a private freelist. Must hold zone->lock */
-static unsigned long isolate_freepages_block(struct zone *zone,
-				unsigned long blockpfn,
-				struct list_head *freelist)
+static void map_pages(struct list_head *list)
 {
-	unsigned long zone_end_pfn, end_pfn;
+	struct page *page;
+
+	list_for_each_entry(page, list, lru) {
+		arch_alloc_page(page, 0);
+		kernel_map_pages(page, 1, 1);
+	}
+}
+
+static inline bool migrate_async_suitable(int migratetype)
+{
+	return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
+}
+
+/*
+ * Isolate free pages onto a private freelist. Caller must hold zone->lock.
+ * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
+ * pages inside of the pageblock (even though it may still end up isolating
+ * some pages).
+ */
+static unsigned long isolate_freepages_block(unsigned long blockpfn,
+				unsigned long end_pfn,
+				struct list_head *freelist,
+				bool strict)
+{
 	int nr_scanned = 0, total_isolated = 0;
 	struct page *cursor;
 
-	/* Get the last PFN we should scan for free pages at */
-	zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
-	end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
-
-	/* Find the first usable PFN in the block to initialse page cursor */
-	for (; blockpfn < end_pfn; blockpfn++) {
-		if (pfn_valid_within(blockpfn))
-			break;
-	}
 	cursor = pfn_to_page(blockpfn);
 
 	/* Isolate free pages. This assumes the block is valid */
@@ -79,15 +71,23 @@
 		int isolated, i;
 		struct page *page = cursor;
 
-		if (!pfn_valid_within(blockpfn))
+		if (!pfn_valid_within(blockpfn)) {
+			if (strict)
+				return 0;
 			continue;
+		}
 		nr_scanned++;
 
-		if (!PageBuddy(page))
+		if (!PageBuddy(page)) {
+			if (strict)
+				return 0;
 			continue;
+		}
 
 		/* Found a free page, break it into order-0 pages */
 		isolated = split_free_page(page);
+		if (!isolated && strict)
+			return 0;
 		total_isolated += isolated;
 		for (i = 0; i < isolated; i++) {
 			list_add(&page->lru, freelist);
@@ -105,114 +105,71 @@
 	return total_isolated;
 }
 
-/* Returns true if the page is within a block suitable for migration to */
-static bool suitable_migration_target(struct page *page)
-{
-
-	int migratetype = get_pageblock_migratetype(page);
-
-	/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
-	if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
-		return false;
-
-	/* If the page is a large free page, then allow migration */
-	if (PageBuddy(page) && page_order(page) >= pageblock_order)
-		return true;
-
-	/* If the block is MIGRATE_MOVABLE, allow migration */
-	if (migratetype == MIGRATE_MOVABLE)
-		return true;
-
-	/* Otherwise skip the block */
-	return false;
-}
-
-/*
- * Based on information in the current compact_control, find blocks
- * suitable for isolating free pages from and then isolate them.
+/**
+ * isolate_freepages_range() - isolate free pages.
+ * @start_pfn: The first PFN to start isolating.
+ * @end_pfn:   The one-past-last PFN.
+ *
+ * Non-free pages, invalid PFNs, or zone boundaries within the
+ * [start_pfn, end_pfn) range are considered errors, cause function to
+ * undo its actions and return zero.
+ *
+ * Otherwise, function returns one-past-the-last PFN of isolated page
+ * (which may be greater then end_pfn if end fell in a middle of
+ * a free page).
  */
-static void isolate_freepages(struct zone *zone,
-				struct compact_control *cc)
+unsigned long
+isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
 {
-	struct page *page;
-	unsigned long high_pfn, low_pfn, pfn;
-	unsigned long flags;
-	int nr_freepages = cc->nr_freepages;
-	struct list_head *freelist = &cc->freepages;
+	unsigned long isolated, pfn, block_end_pfn, flags;
+	struct zone *zone = NULL;
+	LIST_HEAD(freelist);
 
-	/*
-	 * Initialise the free scanner. The starting point is where we last
-	 * scanned from (or the end of the zone if starting). The low point
-	 * is the end of the pageblock the migration scanner is using.
-	 */
-	pfn = cc->free_pfn;
-	low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+	if (pfn_valid(start_pfn))
+		zone = page_zone(pfn_to_page(start_pfn));
 
-	/*
-	 * Take care that if the migration scanner is at the end of the zone
-	 * that the free scanner does not accidentally move to the next zone
-	 * in the next isolation cycle.
-	 */
-	high_pfn = min(low_pfn, pfn);
-
-	/*
-	 * Isolate free pages until enough are available to migrate the
-	 * pages on cc->migratepages. We stop searching if the migrate
-	 * and free page scanners meet or enough free pages are isolated.
-	 */
-	for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
-					pfn -= pageblock_nr_pages) {
-		unsigned long isolated;
-
-		if (!pfn_valid(pfn))
-			continue;
+	for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
+		if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
+			break;
 
 		/*
-		 * Check for overlapping nodes/zones. It's possible on some
-		 * configurations to have a setup like
-		 * node0 node1 node0
-		 * i.e. it's possible that all pages within a zones range of
-		 * pages do not belong to a single zone.
+		 * On subsequent iterations ALIGN() is actually not needed,
+		 * but we keep it that we not to complicate the code.
 		 */
-		page = pfn_to_page(pfn);
-		if (page_zone(page) != zone)
-			continue;
+		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+		block_end_pfn = min(block_end_pfn, end_pfn);
 
-		/* Check the block is suitable for migration */
-		if (!suitable_migration_target(page))
-			continue;
-
-		/*
-		 * Found a block suitable for isolating free pages from. Now
-		 * we disabled interrupts, double check things are ok and
-		 * isolate the pages. This is to minimise the time IRQs
-		 * are disabled
-		 */
-		isolated = 0;
 		spin_lock_irqsave(&zone->lock, flags);
-		if (suitable_migration_target(page)) {
-			isolated = isolate_freepages_block(zone, pfn, freelist);
-			nr_freepages += isolated;
-		}
+		isolated = isolate_freepages_block(pfn, block_end_pfn,
+						   &freelist, true);
 		spin_unlock_irqrestore(&zone->lock, flags);
 
 		/*
-		 * Record the highest PFN we isolated pages from. When next
-		 * looking for free pages, the search will restart here as
-		 * page migration may have returned some pages to the allocator
+		 * In strict mode, isolate_freepages_block() returns 0 if
+		 * there are any holes in the block (ie. invalid PFNs or
+		 * non-free pages).
 		 */
-		if (isolated)
-			high_pfn = max(high_pfn, pfn);
+		if (!isolated)
+			break;
+
+		/*
+		 * If we managed to isolate pages, it is always (1 << n) *
+		 * pageblock_nr_pages for some non-negative n.  (Max order
+		 * page may span two pageblocks).
+		 */
 	}
 
 	/* split_free_page does not map the pages */
-	list_for_each_entry(page, freelist, lru) {
-		arch_alloc_page(page, 0);
-		kernel_map_pages(page, 1, 1);
+	map_pages(&freelist);
+
+	if (pfn < end_pfn) {
+		/* Loop terminated early, cleanup. */
+		release_freepages(&freelist);
+		return 0;
 	}
 
-	cc->free_pfn = high_pfn;
-	cc->nr_freepages = nr_freepages;
+	/* We don't use freelists for anything. */
+	return pfn;
 }
 
 /* Update the number of anon and file isolated pages in the zone */
@@ -243,38 +200,34 @@
 	return isolated > (inactive + active) / 2;
 }
 
-/* possible outcome of isolate_migratepages */
-typedef enum {
-	ISOLATE_ABORT,		/* Abort compaction now */
-	ISOLATE_NONE,		/* No pages isolated, continue scanning */
-	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
-} isolate_migrate_t;
-
-/*
- * Isolate all pages that can be migrated from the block pointed to by
- * the migrate scanner within compact_control.
+/**
+ * isolate_migratepages_range() - isolate all migrate-able pages in range.
+ * @zone:	Zone pages are in.
+ * @cc:		Compaction control structure.
+ * @low_pfn:	The first PFN of the range.
+ * @end_pfn:	The one-past-the-last PFN of the range.
+ *
+ * Isolate all pages that can be migrated from the range specified by
+ * [low_pfn, end_pfn).  Returns zero if there is a fatal signal
+ * pending), otherwise PFN of the first page that was not scanned
+ * (which may be both less, equal to or more then end_pfn).
+ *
+ * Assumes that cc->migratepages is empty and cc->nr_migratepages is
+ * zero.
+ *
+ * Apart from cc->migratepages and cc->nr_migratetypes this function
+ * does not modify any cc's fields, in particular it does not modify
+ * (or read for that matter) cc->migrate_pfn.
  */
-static isolate_migrate_t isolate_migratepages(struct zone *zone,
-					struct compact_control *cc)
+unsigned long
+isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+			   unsigned long low_pfn, unsigned long end_pfn)
 {
-	unsigned long low_pfn, end_pfn;
 	unsigned long last_pageblock_nr = 0, pageblock_nr;
 	unsigned long nr_scanned = 0, nr_isolated = 0;
 	struct list_head *migratelist = &cc->migratepages;
 	isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE;
 
-	/* Do not scan outside zone boundaries */
-	low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
-
-	/* Only scan within a pageblock boundary */
-	end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
-
-	/* Do not cross the free scanner or scan within a memory hole */
-	if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
-		cc->migrate_pfn = end_pfn;
-		return ISOLATE_NONE;
-	}
-
 	/*
 	 * Ensure that there are not too many pages isolated from the LRU
 	 * list by either parallel reclaimers or compaction. If there are,
@@ -283,12 +236,12 @@
 	while (unlikely(too_many_isolated(zone))) {
 		/* async migration should just abort */
 		if (!cc->sync)
-			return ISOLATE_ABORT;
+			return 0;
 
 		congestion_wait(BLK_RW_ASYNC, HZ/10);
 
 		if (fatal_signal_pending(current))
-			return ISOLATE_ABORT;
+			return 0;
 	}
 
 	/* Time to isolate some pages for migration */
@@ -351,7 +304,7 @@
 		 */
 		pageblock_nr = low_pfn >> pageblock_order;
 		if (!cc->sync && last_pageblock_nr != pageblock_nr &&
-				get_pageblock_migratetype(page) != MIGRATE_MOVABLE) {
+		    !migrate_async_suitable(get_pageblock_migratetype(page))) {
 			low_pfn += pageblock_nr_pages;
 			low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
 			last_pageblock_nr = pageblock_nr;
@@ -396,11 +349,124 @@
 	acct_isolated(zone, cc);
 
 	spin_unlock_irq(&zone->lru_lock);
-	cc->migrate_pfn = low_pfn;
 
 	trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
 
-	return ISOLATE_SUCCESS;
+	return low_pfn;
+}
+
+#endif /* CONFIG_COMPACTION || CONFIG_CMA */
+#ifdef CONFIG_COMPACTION
+
+/* Returns true if the page is within a block suitable for migration to */
+static bool suitable_migration_target(struct page *page)
+{
+
+	int migratetype = get_pageblock_migratetype(page);
+
+	/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
+	if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
+		return false;
+
+	/* If the page is a large free page, then allow migration */
+	if (PageBuddy(page) && page_order(page) >= pageblock_order)
+		return true;
+
+	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
+	if (migrate_async_suitable(migratetype))
+		return true;
+
+	/* Otherwise skip the block */
+	return false;
+}
+
+/*
+ * Based on information in the current compact_control, find blocks
+ * suitable for isolating free pages from and then isolate them.
+ */
+static void isolate_freepages(struct zone *zone,
+				struct compact_control *cc)
+{
+	struct page *page;
+	unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
+	unsigned long flags;
+	int nr_freepages = cc->nr_freepages;
+	struct list_head *freelist = &cc->freepages;
+
+	/*
+	 * Initialise the free scanner. The starting point is where we last
+	 * scanned from (or the end of the zone if starting). The low point
+	 * is the end of the pageblock the migration scanner is using.
+	 */
+	pfn = cc->free_pfn;
+	low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+
+	/*
+	 * Take care that if the migration scanner is at the end of the zone
+	 * that the free scanner does not accidentally move to the next zone
+	 * in the next isolation cycle.
+	 */
+	high_pfn = min(low_pfn, pfn);
+
+	zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+
+	/*
+	 * Isolate free pages until enough are available to migrate the
+	 * pages on cc->migratepages. We stop searching if the migrate
+	 * and free page scanners meet or enough free pages are isolated.
+	 */
+	for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
+					pfn -= pageblock_nr_pages) {
+		unsigned long isolated;
+
+		if (!pfn_valid(pfn))
+			continue;
+
+		/*
+		 * Check for overlapping nodes/zones. It's possible on some
+		 * configurations to have a setup like
+		 * node0 node1 node0
+		 * i.e. it's possible that all pages within a zones range of
+		 * pages do not belong to a single zone.
+		 */
+		page = pfn_to_page(pfn);
+		if (page_zone(page) != zone)
+			continue;
+
+		/* Check the block is suitable for migration */
+		if (!suitable_migration_target(page))
+			continue;
+
+		/*
+		 * Found a block suitable for isolating free pages from. Now
+		 * we disabled interrupts, double check things are ok and
+		 * isolate the pages. This is to minimise the time IRQs
+		 * are disabled
+		 */
+		isolated = 0;
+		spin_lock_irqsave(&zone->lock, flags);
+		if (suitable_migration_target(page)) {
+			end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
+			isolated = isolate_freepages_block(pfn, end_pfn,
+							   freelist, false);
+			nr_freepages += isolated;
+		}
+		spin_unlock_irqrestore(&zone->lock, flags);
+
+		/*
+		 * Record the highest PFN we isolated pages from. When next
+		 * looking for free pages, the search will restart here as
+		 * page migration may have returned some pages to the allocator
+		 */
+		if (isolated)
+			high_pfn = max(high_pfn, pfn);
+	}
+
+	/* split_free_page does not map the pages */
+	map_pages(freelist);
+
+	cc->free_pfn = high_pfn;
+	cc->nr_freepages = nr_freepages;
 }
 
 /*
@@ -449,6 +515,44 @@
 	cc->nr_freepages = nr_freepages;
 }
 
+/* possible outcome of isolate_migratepages */
+typedef enum {
+	ISOLATE_ABORT,		/* Abort compaction now */
+	ISOLATE_NONE,		/* No pages isolated, continue scanning */
+	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
+} isolate_migrate_t;
+
+/*
+ * Isolate all pages that can be migrated from the block pointed to by
+ * the migrate scanner within compact_control.
+ */
+static isolate_migrate_t isolate_migratepages(struct zone *zone,
+					struct compact_control *cc)
+{
+	unsigned long low_pfn, end_pfn;
+
+	/* Do not scan outside zone boundaries */
+	low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
+
+	/* Only scan within a pageblock boundary */
+	end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
+
+	/* Do not cross the free scanner or scan within a memory hole */
+	if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
+		cc->migrate_pfn = end_pfn;
+		return ISOLATE_NONE;
+	}
+
+	/* Perform the isolation */
+	low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
+	if (!low_pfn)
+		return ISOLATE_ABORT;
+
+	cc->migrate_pfn = low_pfn;
+
+	return ISOLATE_SUCCESS;
+}
+
 static int compact_finished(struct zone *zone,
 			    struct compact_control *cc)
 {
@@ -795,3 +899,5 @@
 	return device_remove_file(&node->dev, &dev_attr_compact);
 }
 #endif /* CONFIG_SYSFS && CONFIG_NUMA */
+
+#endif /* CONFIG_COMPACTION */
diff --git a/mm/internal.h b/mm/internal.h
index 2189af4..aee4761 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -100,6 +100,39 @@
 extern bool is_free_buddy_page(struct page *page);
 #endif
 
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+
+/*
+ * in mm/compaction.c
+ */
+/*
+ * compact_control is used to track pages being migrated and the free pages
+ * they are being migrated to during memory compaction. The free_pfn starts
+ * at the end of a zone and migrate_pfn begins at the start. Movable pages
+ * are moved to the end of a zone during a compaction run and the run
+ * completes when free_pfn <= migrate_pfn
+ */
+struct compact_control {
+	struct list_head freepages;	/* List of free pages to migrate to */
+	struct list_head migratepages;	/* List of pages being migrated */
+	unsigned long nr_freepages;	/* Number of isolated free pages */
+	unsigned long nr_migratepages;	/* Number of pages to migrate */
+	unsigned long free_pfn;		/* isolate_freepages search base */
+	unsigned long migrate_pfn;	/* isolate_migratepages search base */
+	bool sync;			/* Synchronous migration */
+
+	int order;			/* order a direct compactor needs */
+	int migratetype;		/* MOVABLE, RECLAIMABLE etc */
+	struct zone *zone;
+};
+
+unsigned long
+isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn);
+unsigned long
+isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+			   unsigned long low_pfn, unsigned long end_pfn);
+
+#endif
 
 /*
  * function for dealing with page's order in buddy system.
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 97cc273..c99ad4e 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1404,7 +1404,7 @@
 		/* Not a free page */
 		ret = 1;
 	}
-	unset_migratetype_isolate(p);
+	unset_migratetype_isolate(p, MIGRATE_MOVABLE);
 	unlock_memory_hotplug();
 	return ret;
 }
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index b031f96..a0e1819 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -959,7 +959,7 @@
 	nr_pages = end_pfn - start_pfn;
 
 	/* set above range as isolated */
-	ret = start_isolate_page_range(start_pfn, end_pfn);
+	ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
 	if (ret)
 		goto out;
 
@@ -1024,7 +1024,7 @@
 	   We cannot do rollback at this point. */
 	offline_isolated_pages(start_pfn, end_pfn);
 	/* reset pagetype flags and makes migrate type to be MOVABLE */
-	undo_isolate_page_range(start_pfn, end_pfn);
+	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
 	/* removal success */
 	if (offlined_pages > zone->present_pages)
 		zone->present_pages = 0;
@@ -1056,7 +1056,7 @@
 		start_pfn, end_pfn);
 	memory_notify(MEM_CANCEL_OFFLINE, &arg);
 	/* pushback to free area */
-	undo_isolate_page_range(start_pfn, end_pfn);
+	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
 
 out:
 	unlock_memory_hotplug();
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d5d8541..c34cbbd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -57,6 +57,7 @@
 #include <linux/ftrace_event.h>
 #include <linux/memcontrol.h>
 #include <linux/prefetch.h>
+#include <linux/migrate.h>
 #include <linux/page-debug-flags.h>
 
 #include <asm/tlbflush.h>
@@ -520,10 +521,10 @@
  * free pages of length of (1 << order) and marked with _mapcount -2. Page's
  * order is recorded in page_private(page) field.
  * So when we are allocating or freeing one, we can derive the state of the
- * other.  That is, if we allocate a small block, and both were   
- * free, the remainder of the region must be split into blocks.   
+ * other.  That is, if we allocate a small block, and both were
+ * free, the remainder of the region must be split into blocks.
  * If a block is freed, and its buddy is also free, then this
- * triggers coalescing into a block of larger size.            
+ * triggers coalescing into a block of larger size.
  *
  * -- wli
  */
@@ -756,6 +757,24 @@
 	__free_pages(page, order);
 }
 
+#ifdef CONFIG_CMA
+/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
+void __init init_cma_reserved_pageblock(struct page *page)
+{
+	unsigned i = pageblock_nr_pages;
+	struct page *p = page;
+
+	do {
+		__ClearPageReserved(p);
+		set_page_count(p, 0);
+	} while (++p, --i);
+
+	set_page_refcounted(page);
+	set_pageblock_migratetype(page, MIGRATE_CMA);
+	__free_pages(page, pageblock_order);
+	totalram_pages += pageblock_nr_pages;
+}
+#endif
 
 /*
  * The order of subdivision here is critical for the IO subsystem.
@@ -881,11 +900,17 @@
  * This array describes the order lists are fallen back to when
  * the free lists for the desirable migrate type are depleted
  */
-static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
-	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
-	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
-	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
-	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
+static int fallbacks[MIGRATE_TYPES][4] = {
+	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,     MIGRATE_RESERVE },
+	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,     MIGRATE_RESERVE },
+#ifdef CONFIG_CMA
+	[MIGRATE_MOVABLE]     = { MIGRATE_CMA,         MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
+	[MIGRATE_CMA]         = { MIGRATE_RESERVE }, /* Never used */
+#else
+	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,   MIGRATE_RESERVE },
+#endif
+	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE }, /* Never used */
+	[MIGRATE_ISOLATE]     = { MIGRATE_RESERVE }, /* Never used */
 };
 
 /*
@@ -980,12 +1005,12 @@
 	/* Find the largest possible block of pages in the other list */
 	for (current_order = MAX_ORDER-1; current_order >= order;
 						--current_order) {
-		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
+		for (i = 0;; i++) {
 			migratetype = fallbacks[start_migratetype][i];
 
 			/* MIGRATE_RESERVE handled later if necessary */
 			if (migratetype == MIGRATE_RESERVE)
-				continue;
+				break;
 
 			area = &(zone->free_area[current_order]);
 			if (list_empty(&area->free_list[migratetype]))
@@ -1000,11 +1025,18 @@
 			 * pages to the preferred allocation list. If falling
 			 * back for a reclaimable kernel allocation, be more
 			 * aggressive about taking ownership of free pages
+			 *
+			 * On the other hand, never change migration
+			 * type of MIGRATE_CMA pageblocks nor move CMA
+			 * pages on different free lists. We don't
+			 * want unmovable pages to be allocated from
+			 * MIGRATE_CMA areas.
 			 */
-			if (unlikely(current_order >= (pageblock_order >> 1)) ||
-					start_migratetype == MIGRATE_RECLAIMABLE ||
-					page_group_by_mobility_disabled) {
-				unsigned long pages;
+			if (!is_migrate_cma(migratetype) &&
+			    (unlikely(current_order >= pageblock_order / 2) ||
+			     start_migratetype == MIGRATE_RECLAIMABLE ||
+			     page_group_by_mobility_disabled)) {
+				int pages;
 				pages = move_freepages_block(zone, page,
 								start_migratetype);
 
@@ -1022,11 +1054,14 @@
 			rmv_page_order(page);
 
 			/* Take ownership for orders >= pageblock_order */
-			if (current_order >= pageblock_order)
+			if (current_order >= pageblock_order &&
+			    !is_migrate_cma(migratetype))
 				change_pageblock_range(page, current_order,
 							start_migratetype);
 
-			expand(zone, page, order, current_order, area, migratetype);
+			expand(zone, page, order, current_order, area,
+			       is_migrate_cma(migratetype)
+			     ? migratetype : start_migratetype);
 
 			trace_mm_page_alloc_extfrag(page, order, current_order,
 				start_migratetype, migratetype);
@@ -1068,17 +1103,17 @@
 	return page;
 }
 
-/* 
+/*
  * Obtain a specified number of elements from the buddy allocator, all under
  * a single hold of the lock, for efficiency.  Add them to the supplied list.
  * Returns the number of new pages which were placed at *list.
  */
-static int rmqueue_bulk(struct zone *zone, unsigned int order, 
+static int rmqueue_bulk(struct zone *zone, unsigned int order,
 			unsigned long count, struct list_head *list,
 			int migratetype, int cold)
 {
-	int i;
-	
+	int mt = migratetype, i;
+
 	spin_lock(&zone->lock);
 	for (i = 0; i < count; ++i) {
 		struct page *page = __rmqueue(zone, order, migratetype);
@@ -1098,7 +1133,12 @@
 			list_add(&page->lru, list);
 		else
 			list_add_tail(&page->lru, list);
-		set_page_private(page, migratetype);
+		if (IS_ENABLED(CONFIG_CMA)) {
+			mt = get_pageblock_migratetype(page);
+			if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
+				mt = migratetype;
+		}
+		set_page_private(page, mt);
 		list = &page->lru;
 	}
 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
@@ -1378,8 +1418,12 @@
 
 	if (order >= pageblock_order - 1) {
 		struct page *endpage = page + (1 << order) - 1;
-		for (; page < endpage; page += pageblock_nr_pages)
-			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+		for (; page < endpage; page += pageblock_nr_pages) {
+			int mt = get_pageblock_migratetype(page);
+			if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
+				set_pageblock_migratetype(page,
+							  MIGRATE_MOVABLE);
+		}
 	}
 
 	return 1 << order;
@@ -2094,16 +2138,13 @@
 }
 #endif /* CONFIG_COMPACTION */
 
-/* The really slow allocator path where we enter direct reclaim */
-static inline struct page *
-__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
-	struct zonelist *zonelist, enum zone_type high_zoneidx,
-	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
-	int migratetype, unsigned long *did_some_progress)
+/* Perform direct synchronous page reclaim */
+static int
+__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
+		  nodemask_t *nodemask)
 {
-	struct page *page = NULL;
 	struct reclaim_state reclaim_state;
-	bool drained = false;
+	int progress;
 
 	cond_resched();
 
@@ -2114,7 +2155,7 @@
 	reclaim_state.reclaimed_slab = 0;
 	current->reclaim_state = &reclaim_state;
 
-	*did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
+	progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
 
 	current->reclaim_state = NULL;
 	lockdep_clear_current_reclaim_state();
@@ -2122,6 +2163,21 @@
 
 	cond_resched();
 
+	return progress;
+}
+
+/* The really slow allocator path where we enter direct reclaim */
+static inline struct page *
+__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
+	struct zonelist *zonelist, enum zone_type high_zoneidx,
+	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+	int migratetype, unsigned long *did_some_progress)
+{
+	struct page *page = NULL;
+	bool drained = false;
+
+	*did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
+					       nodemask);
 	if (unlikely(!(*did_some_progress)))
 		return NULL;
 
@@ -4309,7 +4365,7 @@
 	init_waitqueue_head(&pgdat->kswapd_wait);
 	pgdat->kswapd_max_order = 0;
 	pgdat_page_cgroup_init(pgdat);
-	
+
 	for (j = 0; j < MAX_NR_ZONES; j++) {
 		struct zone *zone = pgdat->node_zones + j;
 		unsigned long size, realsize, memmap_pages;
@@ -4987,14 +5043,7 @@
 	calculate_totalreserve_pages();
 }
 
-/**
- * setup_per_zone_wmarks - called when min_free_kbytes changes
- * or when memory is hot-{added|removed}
- *
- * Ensures that the watermark[min,low,high] values for each zone are set
- * correctly with respect to min_free_kbytes.
- */
-void setup_per_zone_wmarks(void)
+static void __setup_per_zone_wmarks(void)
 {
 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
 	unsigned long lowmem_pages = 0;
@@ -5041,6 +5090,11 @@
 
 		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
 		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
+
+		zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
+		zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
+		zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);
+
 		setup_zone_migrate_reserve(zone);
 		spin_unlock_irqrestore(&zone->lock, flags);
 	}
@@ -5049,6 +5103,20 @@
 	calculate_totalreserve_pages();
 }
 
+/**
+ * setup_per_zone_wmarks - called when min_free_kbytes changes
+ * or when memory is hot-{added|removed}
+ *
+ * Ensures that the watermark[min,low,high] values for each zone are set
+ * correctly with respect to min_free_kbytes.
+ */
+void setup_per_zone_wmarks(void)
+{
+	mutex_lock(&zonelists_mutex);
+	__setup_per_zone_wmarks();
+	mutex_unlock(&zonelists_mutex);
+}
+
 /*
  * The inactive anon list should be small enough that the VM never has to
  * do too much work, but large enough that each inactive page has a chance
@@ -5423,14 +5491,16 @@
 __count_immobile_pages(struct zone *zone, struct page *page, int count)
 {
 	unsigned long pfn, iter, found;
+	int mt;
+
 	/*
 	 * For avoiding noise data, lru_add_drain_all() should be called
 	 * If ZONE_MOVABLE, the zone never contains immobile pages
 	 */
 	if (zone_idx(zone) == ZONE_MOVABLE)
 		return true;
-
-	if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
+	mt = get_pageblock_migratetype(page);
+	if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
 		return true;
 
 	pfn = page_to_pfn(page);
@@ -5547,7 +5617,7 @@
 	return ret;
 }
 
-void unset_migratetype_isolate(struct page *page)
+void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 {
 	struct zone *zone;
 	unsigned long flags;
@@ -5555,12 +5625,259 @@
 	spin_lock_irqsave(&zone->lock, flags);
 	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
 		goto out;
-	set_pageblock_migratetype(page, MIGRATE_MOVABLE);
-	move_freepages_block(zone, page, MIGRATE_MOVABLE);
+	set_pageblock_migratetype(page, migratetype);
+	move_freepages_block(zone, page, migratetype);
 out:
 	spin_unlock_irqrestore(&zone->lock, flags);
 }
 
+#ifdef CONFIG_CMA
+
+static unsigned long pfn_max_align_down(unsigned long pfn)
+{
+	return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
+			     pageblock_nr_pages) - 1);
+}
+
+static unsigned long pfn_max_align_up(unsigned long pfn)
+{
+	return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
+				pageblock_nr_pages));
+}
+
+static struct page *
+__alloc_contig_migrate_alloc(struct page *page, unsigned long private,
+			     int **resultp)
+{
+	return alloc_page(GFP_HIGHUSER_MOVABLE);
+}
+
+/* [start, end) must belong to a single zone. */
+static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
+{
+	/* This function is based on compact_zone() from compaction.c. */
+
+	unsigned long pfn = start;
+	unsigned int tries = 0;
+	int ret = 0;
+
+	struct compact_control cc = {
+		.nr_migratepages = 0,
+		.order = -1,
+		.zone = page_zone(pfn_to_page(start)),
+		.sync = true,
+	};
+	INIT_LIST_HEAD(&cc.migratepages);
+
+	migrate_prep_local();
+
+	while (pfn < end || !list_empty(&cc.migratepages)) {
+		if (fatal_signal_pending(current)) {
+			ret = -EINTR;
+			break;
+		}
+
+		if (list_empty(&cc.migratepages)) {
+			cc.nr_migratepages = 0;
+			pfn = isolate_migratepages_range(cc.zone, &cc,
+							 pfn, end);
+			if (!pfn) {
+				ret = -EINTR;
+				break;
+			}
+			tries = 0;
+		} else if (++tries == 5) {
+			ret = ret < 0 ? ret : -EBUSY;
+			break;
+		}
+
+		ret = migrate_pages(&cc.migratepages,
+				    __alloc_contig_migrate_alloc,
+				    0, false, true);
+	}
+
+	putback_lru_pages(&cc.migratepages);
+	return ret > 0 ? 0 : ret;
+}
+
+/*
+ * Update zone's cma pages counter used for watermark level calculation.
+ */
+static inline void __update_cma_watermarks(struct zone *zone, int count)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&zone->lock, flags);
+	zone->min_cma_pages += count;
+	spin_unlock_irqrestore(&zone->lock, flags);
+	setup_per_zone_wmarks();
+}
+
+/*
+ * Trigger memory pressure bump to reclaim some pages in order to be able to
+ * allocate 'count' pages in single page units. Does similar work as
+ *__alloc_pages_slowpath() function.
+ */
+static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
+{
+	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+	struct zonelist *zonelist = node_zonelist(0, gfp_mask);
+	int did_some_progress = 0;
+	int order = 1;
+
+	/*
+	 * Increase level of watermarks to force kswapd do his job
+	 * to stabilise at new watermark level.
+	 */
+	__update_cma_watermarks(zone, count);
+
+	/* Obey watermarks as if the page was being allocated */
+	while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) {
+		wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));
+
+		did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
+						      NULL);
+		if (!did_some_progress) {
+			/* Exhausted what can be done so it's blamo time */
+			out_of_memory(zonelist, gfp_mask, order, NULL, false);
+		}
+	}
+
+	/* Restore original watermark levels. */
+	__update_cma_watermarks(zone, -count);
+
+	return count;
+}
+
+/**
+ * alloc_contig_range() -- tries to allocate given range of pages
+ * @start:	start PFN to allocate
+ * @end:	one-past-the-last PFN to allocate
+ * @migratetype:	migratetype of the underlaying pageblocks (either
+ *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
+ *			in range must have the same migratetype and it must
+ *			be either of the two.
+ *
+ * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
+ * aligned, however it's the caller's responsibility to guarantee that
+ * we are the only thread that changes migrate type of pageblocks the
+ * pages fall in.
+ *
+ * The PFN range must belong to a single zone.
+ *
+ * Returns zero on success or negative error code.  On success all
+ * pages which PFN is in [start, end) are allocated for the caller and
+ * need to be freed with free_contig_range().
+ */
+int alloc_contig_range(unsigned long start, unsigned long end,
+		       unsigned migratetype)
+{
+	struct zone *zone = page_zone(pfn_to_page(start));
+	unsigned long outer_start, outer_end;
+	int ret = 0, order;
+
+	/*
+	 * What we do here is we mark all pageblocks in range as
+	 * MIGRATE_ISOLATE.  Because pageblock and max order pages may
+	 * have different sizes, and due to the way page allocator
+	 * work, we align the range to biggest of the two pages so
+	 * that page allocator won't try to merge buddies from
+	 * different pageblocks and change MIGRATE_ISOLATE to some
+	 * other migration type.
+	 *
+	 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
+	 * migrate the pages from an unaligned range (ie. pages that
+	 * we are interested in).  This will put all the pages in
+	 * range back to page allocator as MIGRATE_ISOLATE.
+	 *
+	 * When this is done, we take the pages in range from page
+	 * allocator removing them from the buddy system.  This way
+	 * page allocator will never consider using them.
+	 *
+	 * This lets us mark the pageblocks back as
+	 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
+	 * aligned range but not in the unaligned, original range are
+	 * put back to page allocator so that buddy can use them.
+	 */
+
+	ret = start_isolate_page_range(pfn_max_align_down(start),
+				       pfn_max_align_up(end), migratetype);
+	if (ret)
+		goto done;
+
+	ret = __alloc_contig_migrate_range(start, end);
+	if (ret)
+		goto done;
+
+	/*
+	 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
+	 * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
+	 * more, all pages in [start, end) are free in page allocator.
+	 * What we are going to do is to allocate all pages from
+	 * [start, end) (that is remove them from page allocator).
+	 *
+	 * The only problem is that pages at the beginning and at the
+	 * end of interesting range may be not aligned with pages that
+	 * page allocator holds, ie. they can be part of higher order
+	 * pages.  Because of this, we reserve the bigger range and
+	 * once this is done free the pages we are not interested in.
+	 *
+	 * We don't have to hold zone->lock here because the pages are
+	 * isolated thus they won't get removed from buddy.
+	 */
+
+	lru_add_drain_all();
+	drain_all_pages();
+
+	order = 0;
+	outer_start = start;
+	while (!PageBuddy(pfn_to_page(outer_start))) {
+		if (++order >= MAX_ORDER) {
+			ret = -EBUSY;
+			goto done;
+		}
+		outer_start &= ~0UL << order;
+	}
+
+	/* Make sure the range is really isolated. */
+	if (test_pages_isolated(outer_start, end)) {
+		pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
+		       outer_start, end);
+		ret = -EBUSY;
+		goto done;
+	}
+
+	/*
+	 * Reclaim enough pages to make sure that contiguous allocation
+	 * will not starve the system.
+	 */
+	__reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);
+
+	/* Grab isolated pages from freelists. */
+	outer_end = isolate_freepages_range(outer_start, end);
+	if (!outer_end) {
+		ret = -EBUSY;
+		goto done;
+	}
+
+	/* Free head and tail (if any) */
+	if (start != outer_start)
+		free_contig_range(outer_start, start - outer_start);
+	if (end != outer_end)
+		free_contig_range(end, outer_end - end);
+
+done:
+	undo_isolate_page_range(pfn_max_align_down(start),
+				pfn_max_align_up(end), migratetype);
+	return ret;
+}
+
+void free_contig_range(unsigned long pfn, unsigned nr_pages)
+{
+	for (; nr_pages--; ++pfn)
+		__free_page(pfn_to_page(pfn));
+}
+#endif
+
 #ifdef CONFIG_MEMORY_HOTREMOVE
 /*
  * All pages in the range must be isolated before calling this.
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 4ae42bb..c9f0477 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -24,6 +24,7 @@
  * to be MIGRATE_ISOLATE.
  * @start_pfn: The lower PFN of the range to be isolated.
  * @end_pfn: The upper PFN of the range to be isolated.
+ * @migratetype: migrate type to set in error recovery.
  *
  * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
  * the range will never be allocated. Any free pages and pages freed in the
@@ -32,8 +33,8 @@
  * start_pfn/end_pfn must be aligned to pageblock_order.
  * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
  */
-int
-start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
+int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+			     unsigned migratetype)
 {
 	unsigned long pfn;
 	unsigned long undo_pfn;
@@ -56,7 +57,7 @@
 	for (pfn = start_pfn;
 	     pfn < undo_pfn;
 	     pfn += pageblock_nr_pages)
-		unset_migratetype_isolate(pfn_to_page(pfn));
+		unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
 
 	return -EBUSY;
 }
@@ -64,8 +65,8 @@
 /*
  * Make isolated pages available again.
  */
-int
-undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
+int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+			    unsigned migratetype)
 {
 	unsigned long pfn;
 	struct page *page;
@@ -77,7 +78,7 @@
 		page = __first_valid_page(pfn, pageblock_nr_pages);
 		if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
 			continue;
-		unset_migratetype_isolate(page);
+		unset_migratetype_isolate(page, migratetype);
 	}
 	return 0;
 }
@@ -86,7 +87,7 @@
  * all pages in [start_pfn...end_pfn) must be in the same zone.
  * zone->lock must be held before call this.
  *
- * Returns 1 if all pages in the range is isolated.
+ * Returns 1 if all pages in the range are isolated.
  */
 static int
 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 7db1b9b..0dad31dc 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -613,6 +613,9 @@
 	"Reclaimable",
 	"Movable",
 	"Reserve",
+#ifdef CONFIG_CMA
+	"CMA",
+#endif
 	"Isolate",
 };
 
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 980b846..733dc22 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1993,10 +1993,10 @@
 				frames_to_ack = 0;
 		}
 
-		/* Ack now if the tx window is 3/4ths full.
+		/* Ack now if the window is 3/4ths full.
 		 * Calculate without mul or div
 		 */
-		threshold = pi->tx_win;
+		threshold = pi->ack_win;
 		threshold += threshold << 1;
 		threshold >>= 2;
 
@@ -3105,6 +3105,7 @@
 		pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
 		pi->extended_control = 0;
 	}
+	pi->ack_win = pi->tx_win;
 }
 
 static void l2cap_aggregate_fs(struct hci_ext_fs *cur,
@@ -3290,7 +3291,7 @@
 	struct l2cap_conf_rfc rfc = { .mode = pi->mode };
 	void *ptr = req->data;
 
-	BT_DBG("sk %p", sk);
+	BT_DBG("sk %p mode %d", sk, pi->mode);
 
 	if (pi->num_conf_req || pi->num_conf_rsp)
 		goto done;
@@ -3316,7 +3317,6 @@
 		if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
 				!(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
 			break;
-
 		rfc.txwin_size      = 0;
 		rfc.max_transmit    = 0;
 		rfc.retrans_timeout = 0;
@@ -3466,6 +3466,9 @@
 
 	BT_DBG("sk %p", sk);
 
+	if (pi->omtu > mtu)
+		mtu = pi->omtu;
+
 	while (len >= L2CAP_CONF_OPT_SIZE) {
 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
 
@@ -3567,6 +3570,8 @@
 	if (pi->mode != rfc.mode) {
 		result = L2CAP_CONF_UNACCEPT;
 		rfc.mode = pi->mode;
+		if (mtu > L2CAP_DEFAULT_MTU)
+			pi->omtu = mtu;
 
 		if (pi->num_conf_rsp == 1)
 			return -ECONNREFUSED;
@@ -3844,10 +3849,7 @@
 			break;
 
 		case L2CAP_CONF_EXT_WINDOW:
-			pi->tx_win = val;
-
-			if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
-				pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
+			pi->ack_win = min_t(u16, val, pi->ack_win);
 
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW,
 					2, pi->tx_win);
@@ -3869,6 +3871,10 @@
 			pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
 			pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
 			pi->mps    = le16_to_cpu(rfc.max_pdu_size);
+			if (!pi->extended_control) {
+				pi->ack_win = min_t(u16, pi->ack_win,
+						    rfc.txwin_size);
+			}
 			break;
 		case L2CAP_MODE_STREAMING:
 			pi->mps    = le16_to_cpu(rfc.max_pdu_size);
@@ -3901,6 +3907,7 @@
 	int type, olen;
 	unsigned long val;
 	struct l2cap_conf_rfc rfc;
+	u16 txwin_ext = pi->ack_win;
 
 	BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
 
@@ -3909,6 +3916,7 @@
 	rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
 	rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
 	rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
+	rfc.txwin_size = min_t(u16, pi->ack_win, L2CAP_DEFAULT_TX_WINDOW);
 
 	if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
 		return;
@@ -3920,16 +3928,22 @@
 		case L2CAP_CONF_RFC:
 			if (olen == sizeof(rfc))
 				memcpy(&rfc, (void *)val, olen);
-			goto done;
+			break;
+		case L2CAP_CONF_EXT_WINDOW:
+			txwin_ext = val;
+			break;
 		}
 	}
 
-done:
 	switch (rfc.mode) {
 	case L2CAP_MODE_ERTM:
 		pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
 		pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
 		pi->mps    = le16_to_cpu(rfc.max_pdu_size);
+		if (pi->extended_control)
+			pi->ack_win = min_t(u16, pi->ack_win, txwin_ext);
+		else
+			pi->ack_win = min_t(u16, pi->ack_win, rfc.txwin_size);
 		break;
 	case L2CAP_MODE_STREAMING:
 		pi->mps    = le16_to_cpu(rfc.max_pdu_size);
@@ -7249,14 +7263,31 @@
 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid,
 							struct sk_buff *skb)
 {
-	struct sock *sk;
+	struct sock *sk = NULL;
 	struct sk_buff *skb_rsp;
 	struct l2cap_hdr *lh;
 	int dir;
-	u8 mtu_rsp[] = {L2CAP_ATT_MTU_RSP, 23, 0};
 	u8 err_rsp[] = {L2CAP_ATT_ERROR, 0x00, 0x00, 0x00,
 						L2CAP_ATT_NOT_SUPPORTED};
 
+	if (skb->data[0] == L2CAP_ATT_MTU_REQ) {
+		u8 mtu_rsp[] = {L2CAP_ATT_MTU_RSP, 23, 0};
+
+		skb_rsp = bt_skb_alloc(sizeof(mtu_rsp) + L2CAP_HDR_SIZE,
+								GFP_ATOMIC);
+		if (!skb_rsp)
+			goto drop;
+
+		lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
+		lh->len = cpu_to_le16(sizeof(mtu_rsp));
+		lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
+		memcpy(skb_put(skb_rsp, sizeof(mtu_rsp)), mtu_rsp,
+							sizeof(mtu_rsp));
+		hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
+
+		goto free_skb;
+	}
+
 	dir = (skb->data[0] & L2CAP_ATT_RESPONSE_BIT) ? 0 : 1;
 
 	sk = l2cap_find_sock_by_fixed_cid_and_dir(cid, conn->src,
@@ -7277,28 +7308,30 @@
 	if (l2cap_pi(sk)->imtu < skb->len)
 		goto drop;
 
-	if (skb->data[0] == L2CAP_ATT_MTU_REQ) {
-		skb_rsp = bt_skb_alloc(sizeof(mtu_rsp) + L2CAP_HDR_SIZE,
-								GFP_ATOMIC);
-		if (!skb_rsp)
-			goto drop;
-
-		lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
-		lh->len = cpu_to_le16(sizeof(mtu_rsp));
-		lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
-		memcpy(skb_put(skb_rsp, sizeof(mtu_rsp)), mtu_rsp,
-							sizeof(mtu_rsp));
-		hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
-
-		goto free_skb;
-	}
-
 	if (!sock_queue_rcv_skb(sk, skb))
 		goto done;
 
 drop:
-	if (skb->data[0] & L2CAP_ATT_RESPONSE_BIT &&
-			skb->data[0] != L2CAP_ATT_INDICATE)
+	if (skb->data[0] != L2CAP_ATT_INDICATE)
+		goto not_indicate;
+
+	/* If this is an incoming Indication, we are required to confirm */
+
+	skb_rsp = bt_skb_alloc(sizeof(u8) + L2CAP_HDR_SIZE, GFP_ATOMIC);
+	if (!skb_rsp)
+		goto free_skb;
+
+	lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
+	lh->len = cpu_to_le16(sizeof(u8));
+	lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
+	err_rsp[0] = L2CAP_ATT_CONFIRM;
+	memcpy(skb_put(skb_rsp, sizeof(u8)), err_rsp, sizeof(u8));
+	hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
+	goto free_skb;
+
+not_indicate:
+	if (skb->data[0] & L2CAP_ATT_RESPONSE_BIT ||
+			skb->data[0] == L2CAP_ATT_CONFIRM)
 		goto free_skb;
 
 	/* If this is an incoming PDU that requires a response, respond with
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 0ad1633..3fa4a02 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1174,7 +1174,7 @@
 static int l2cap_sock_release(struct socket *sock)
 {
 	struct sock *sk = sock->sk;
-	struct sock *srv_sk = NULL;
+	struct sock *sk2 = NULL;
 	int err;
 
 	BT_DBG("sock %p, sk %p", sock, sk);
@@ -1182,15 +1182,16 @@
 	if (!sk)
 		return 0;
 
-	/* If this is an ATT Client socket, find the matching Server */
-	if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA && !l2cap_pi(sk)->incoming)
-		srv_sk = l2cap_find_sock_by_fixed_cid_and_dir(L2CAP_CID_LE_DATA,
-					&bt_sk(sk)->src, &bt_sk(sk)->dst, 1);
+	/* If this is an ATT socket, find it's matching server/client */
+	if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA)
+		sk2 = l2cap_find_sock_by_fixed_cid_and_dir(L2CAP_CID_LE_DATA,
+					&bt_sk(sk)->src, &bt_sk(sk)->dst,
+					l2cap_pi(sk)->incoming ? 0 : 1);
 
-	/* If server socket found, request tear down */
-	BT_DBG("client:%p server:%p", sk, srv_sk);
-	if (srv_sk)
-		l2cap_sock_set_timer(srv_sk, 1);
+	/* If matching socket found, request tear down */
+	BT_DBG("sock:%p companion:%p", sk, sk2);
+	if (sk2)
+		l2cap_sock_set_timer(sk2, 1);
 
 	err = l2cap_sock_shutdown(sock, 2);
 
@@ -1270,6 +1271,7 @@
 	pi->scid = 0;
 	pi->dcid = 0;
 	pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
+	pi->ack_win = pi->tx_win;
 	pi->extended_control = 0;
 
 	pi->local_conf.fcs = pi->fcs;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index ac85423..dce8491 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -418,6 +418,7 @@
 static int update_class(struct hci_dev *hdev)
 {
 	u8 cod[3];
+	int err = 0;
 
 	BT_DBG("%s", hdev->name);
 
@@ -431,7 +432,12 @@
 	if (memcmp(cod, hdev->dev_class, 3) == 0)
 		return 0;
 
-	return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
+	err =  hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
+
+	if (err == 0)
+		memcpy(hdev->dev_class, cod, 3);
+
+	return err;
 }
 
 static int set_limited_discoverable(struct sock *sk, u16 index,
@@ -1000,12 +1006,12 @@
 	hdev->major_class |= cp->major & MGMT_MAJOR_CLASS_MASK;
 	hdev->minor_class = cp->minor;
 
-	if (test_bit(HCI_UP, &hdev->flags))
+	if (test_bit(HCI_UP, &hdev->flags)) {
 		err = update_class(hdev);
-	else
-		err = 0;
-
-	if (err == 0)
+		if (err == 0)
+			err = cmd_complete(sk, index,
+		MGMT_OP_SET_DEV_CLASS, hdev->dev_class, sizeof(u8)*3);
+	} else
 		err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
 
 	hci_dev_unlock_bh(hdev);
@@ -2131,9 +2137,10 @@
 
 	err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
 
-	if (err < 0)
+	if (err < 0) {
 		mgmt_pending_remove(cmd);
-	else if (lmp_le_capable(hdev)) {
+		hdev->disco_state = SCAN_IDLE;
+	} else if (lmp_le_capable(hdev)) {
 		cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, index);
 		if (!cmd)
 			mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index,
@@ -2145,7 +2152,8 @@
 		del_timer(&hdev->disco_timer);
 		mod_timer(&hdev->disco_timer,
 				jiffies + msecs_to_jiffies(20000));
-	}
+	} else
+		hdev->disco_state = SCAN_BR;
 
 failed:
 	hci_dev_unlock_bh(hdev);
@@ -2189,9 +2197,7 @@
 			err = cmd_complete(sk, index, MGMT_OP_STOP_DISCOVERY,
 								NULL, 0);
 		}
-	}
-
-	if (err < 0)
+	} else if (state == SCAN_BR)
 		err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
 
 	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, index);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 5034393..947bd85 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2779,7 +2779,7 @@
 
 	pcm_file = file->private_data;
 
-	if (((cmd >> 8) & 0xff) != 'A')
+	if ((((cmd >> 8) & 0xff) != 'A') && (((cmd >> 8) & 0xff) != 'C'))
 		return -ENOTTY;
 
 	return snd_pcm_capture_ioctl1(file, pcm_file->substream, cmd,
diff --git a/sound/soc/codecs/msm_stub.c b/sound/soc/codecs/msm_stub.c
index 0a3157f..7e603b4 100644
--- a/sound/soc/codecs/msm_stub.c
+++ b/sound/soc/codecs/msm_stub.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,6 +12,7 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
@@ -44,6 +45,11 @@
 
 static int __devinit msm_stub_dev_probe(struct platform_device *pdev)
 {
+	if (pdev->dev.of_node)
+		dev_set_name(&pdev->dev, "%s.%d", "msm-stub-codec", 1);
+
+	dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev));
+
 	return snd_soc_register_codec(&pdev->dev,
 	&soc_msm_stub, msm_stub_dais, ARRAY_SIZE(msm_stub_dais));
 }
@@ -53,11 +59,16 @@
 	snd_soc_unregister_codec(&pdev->dev);
 	return 0;
 }
+static const struct of_device_id msm_stub_codec_dt_match[] = {
+	{ .compatible = "qcom,msm-stub-codec", },
+	{}
+};
 
 static struct platform_driver msm_stub_driver = {
 	.driver = {
 		.name = "msm-stub-codec",
 		.owner = THIS_MODULE,
+		.of_match_table = msm_stub_codec_dt_match,
 	},
 	.probe = msm_stub_dev_probe,
 	.remove = __devexit_p(msm_stub_dev_remove),
diff --git a/sound/soc/codecs/wcd9304.c b/sound/soc/codecs/wcd9304.c
index be31953..d9a8ae0 100644
--- a/sound/soc/codecs/wcd9304.c
+++ b/sound/soc/codecs/wcd9304.c
@@ -283,12 +283,16 @@
 
 	ear_pa_gain = snd_soc_read(codec, SITAR_A_RX_EAR_GAIN);
 
-	ear_pa_gain = ear_pa_gain >> 5;
+	ear_pa_gain &= 0xE0;
 
 	if (ear_pa_gain == 0x00) {
 		ucontrol->value.integer.value[0] = 0;
-	} else if (ear_pa_gain == 0x04) {
+	} else if (ear_pa_gain == 0x80) {
 		ucontrol->value.integer.value[0] = 1;
+	} else if (ear_pa_gain == 0xA0) {
+		ucontrol->value.integer.value[0] = 2;
+	} else if (ear_pa_gain == 0xE0) {
+		ucontrol->value.integer.value[0] = 3;
 	} else  {
 		pr_err("%s: ERROR: Unsupported Ear Gain = 0x%x\n",
 				__func__, ear_pa_gain);
@@ -316,11 +320,17 @@
 	case 1:
 		ear_pa_gain = 0x80;
 		break;
+	case 2:
+		ear_pa_gain = 0xA0;
+		break;
+	case 3:
+		ear_pa_gain = 0xE0;
+		break;
 	default:
 		return -EINVAL;
 	}
 
-	snd_soc_write(codec, SITAR_A_RX_EAR_GAIN, ear_pa_gain);
+	snd_soc_update_bits(codec, SITAR_A_RX_EAR_GAIN, 0xE0, ear_pa_gain);
 	return 0;
 }
 
@@ -491,9 +501,11 @@
 	return 0;
 }
 
-static const char *sitar_ear_pa_gain_text[] = {"POS_6_DB", "POS_2_DB"};
+static const char * const sitar_ear_pa_gain_text[] = {"POS_6_DB",
+					"POS_2_DB", "NEG_2P5_DB", "NEG_12_DB"};
+
 static const struct soc_enum sitar_ear_pa_gain_enum[] = {
-		SOC_ENUM_SINGLE_EXT(2, sitar_ear_pa_gain_text),
+		SOC_ENUM_SINGLE_EXT(4, sitar_ear_pa_gain_text),
 };
 
 /*cut of frequency for high pass filter*/
@@ -1445,7 +1457,8 @@
 	struct snd_kcontrol *kcontrol, int event)
 {
 	struct snd_soc_codec *codec = w->codec;
-	u16 dec_reset_reg;
+	u16 dec_reset_reg, gain_reg;
+	u8 current_gain;
 
 	pr_debug("%s %d\n", __func__, event);
 
@@ -1462,6 +1475,12 @@
 			1 << w->shift);
 		snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift, 0x0);
 		break;
+	case SND_SOC_DAPM_POST_PMU:
+		/* Reprogram the digital gain after power up of Decimator */
+		gain_reg = SITAR_A_CDC_TX1_VOL_CTL_GAIN + (8 * w->shift);
+		current_gain = snd_soc_read(codec, gain_reg);
+		snd_soc_write(codec, gain_reg, current_gain);
+		break;
 	}
 	return 0;
 }
@@ -1470,6 +1489,8 @@
 	struct snd_kcontrol *kcontrol, int event)
 {
 	struct snd_soc_codec *codec = w->codec;
+	u16 gain_reg;
+	u8 current_gain;
 
 	pr_debug("%s %d %s\n", __func__, event, w->name);
 
@@ -1480,6 +1501,11 @@
 		snd_soc_update_bits(codec, SITAR_A_CDC_CLK_RX_RESET_CTL,
 			1 << w->shift, 0x0);
 		break;
+	case SND_SOC_DAPM_POST_PMU:
+		/* Reprogram gain after power up interpolator */
+		gain_reg = SITAR_A_CDC_RX1_VOL_CTL_B2_CTL + (8 * w->shift);
+		current_gain = snd_soc_read(codec, gain_reg);
+		snd_soc_write(codec, gain_reg, current_gain);
 	}
 	return 0;
 }
@@ -1792,11 +1818,14 @@
 			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
 	SND_SOC_DAPM_MIXER_E("RX1 MIX1", SITAR_A_CDC_CLK_RX_B1_CTL, 0, 0, NULL,
-		0, sitar_codec_reset_interpolator, SND_SOC_DAPM_PRE_PMU),
+		0, sitar_codec_reset_interpolator,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
 	SND_SOC_DAPM_MIXER_E("RX2 MIX1", SITAR_A_CDC_CLK_RX_B1_CTL, 1, 0, NULL,
-		0, sitar_codec_reset_interpolator, SND_SOC_DAPM_PRE_PMU),
+		0, sitar_codec_reset_interpolator,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
 	SND_SOC_DAPM_MIXER_E("RX3 MIX1", SITAR_A_CDC_CLK_RX_B1_CTL, 2, 0, NULL,
-		0, sitar_codec_reset_interpolator, SND_SOC_DAPM_PRE_PMU),
+		0, sitar_codec_reset_interpolator,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
 
 	SND_SOC_DAPM_MUX("DAC1 MUX", SND_SOC_NOPM, 0, 0,
 		&rx_dac1_mux),
@@ -1868,13 +1897,17 @@
 		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
 	SND_SOC_DAPM_MUX_E("DEC1 MUX", SITAR_A_CDC_CLK_TX_CLK_EN_B1_CTL, 0, 0,
-		&dec1_mux, sitar_codec_enable_dec, SND_SOC_DAPM_PRE_PMU),
+		&dec1_mux, sitar_codec_enable_dec,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
 	SND_SOC_DAPM_MUX_E("DEC2 MUX", SITAR_A_CDC_CLK_TX_CLK_EN_B1_CTL, 1, 0,
-		&dec2_mux, sitar_codec_enable_dec, SND_SOC_DAPM_PRE_PMU),
+		&dec2_mux, sitar_codec_enable_dec,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
 	SND_SOC_DAPM_MUX_E("DEC3 MUX", SITAR_A_CDC_CLK_TX_CLK_EN_B1_CTL, 2, 0,
-		&dec3_mux, sitar_codec_enable_dec, SND_SOC_DAPM_PRE_PMU),
+		&dec3_mux, sitar_codec_enable_dec,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
 	SND_SOC_DAPM_MUX_E("DEC4 MUX", SITAR_A_CDC_CLK_TX_CLK_EN_B1_CTL, 3, 0,
-		&dec4_mux, sitar_codec_enable_dec, SND_SOC_DAPM_PRE_PMU),
+		&dec4_mux, sitar_codec_enable_dec,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
 
 	SND_SOC_DAPM_MUX("ANC1 MUX", SND_SOC_NOPM, 0, 0, &anc1_mux),
 	SND_SOC_DAPM_MUX("ANC2 MUX", SND_SOC_NOPM, 0, 0, &anc2_mux),
@@ -2118,10 +2151,11 @@
 
 static int sitar_volatile(struct snd_soc_codec *ssc, unsigned int reg)
 {
+	int i;
+
 	/* Registers lower than 0x100 are top level registers which can be
 	* written by the Sitar core driver.
 	*/
-
 	if ((reg >= SITAR_A_CDC_MBHC_EN_CTL) || (reg < 0x100))
 		return 1;
 
@@ -2130,6 +2164,15 @@
 		(reg <= SITAR_A_CDC_IIR1_COEF_B5_CTL))
 		return 1;
 
+	for (i = 0; i < NUM_DECIMATORS; i++) {
+		if (reg == SITAR_A_CDC_TX1_VOL_CTL_GAIN + (8 * i))
+			return 1;
+	}
+
+	for (i = 0; i < NUM_INTERPOLATORS; i++) {
+		if (reg == SITAR_A_CDC_RX1_VOL_CTL_B2_CTL + (8 * i))
+			return 1;
+	}
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c
index e85e9f5..74ae595 100644
--- a/sound/soc/codecs/wcd9310.c
+++ b/sound/soc/codecs/wcd9310.c
@@ -18,6 +18,7 @@
 #include <linux/printk.h>
 #include <linux/ratelimit.h>
 #include <linux/debugfs.h>
+#include <linux/wait.h>
 #include <linux/mfd/wcd9xxx/core.h>
 #include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
 #include <linux/mfd/wcd9xxx/wcd9310_registers.h>
@@ -47,6 +48,8 @@
 #define MBHC_FW_READ_ATTEMPTS 15
 #define MBHC_FW_READ_TIMEOUT 2000000
 
+#define SLIM_CLOSE_TIMEOUT 1000
+
 enum {
 	MBHC_USE_HPHL_TRIGGER = 1,
 	MBHC_USE_MB_TRIGGER = 2
@@ -78,6 +81,8 @@
 	u32 *ch_num;
 	u32 ch_act;
 	u32 ch_tot;
+	u32 ch_mask;
+	wait_queue_head_t dai_wait;
 };
 
 #define TABLA_MCLK_RATE_12288KHZ 12288000
@@ -114,6 +119,11 @@
 static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
 static struct snd_soc_dai_driver tabla_dai[];
 static const DECLARE_TLV_DB_SCALE(aux_pga_gain, 0, 2, 0);
+static int tabla_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event);
+static int tabla_codec_enable_slimtx(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event);
+
 
 enum tabla_bandgap_type {
 	TABLA_BANDGAP_OFF = 0,
@@ -319,6 +329,12 @@
 	 */
 	struct mutex codec_resource_lock;
 
+	/* Work to perform polling on microphone voltage
+	 * in order to correct plug type once plug type
+	 * is detected as headphone
+	 */
+	struct work_struct hs_correct_plug_work_nogpio;
+
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *debugfs_poke;
 	struct dentry *debugfs_mbhc;
@@ -2192,8 +2208,17 @@
 		}
 		release_firmware(fw);
 
+		TABLA_ACQUIRE_LOCK(tabla->codec_resource_lock);
+		/* if MBHC polling is active, set TX7_MBHC_EN bit 7 */
+		if (tabla->mbhc_polling_active)
+			snd_soc_update_bits(codec, TABLA_A_TX_7_MBHC_EN, 0x80,
+					    0x80);
+		TABLA_RELEASE_LOCK(tabla->codec_resource_lock);
 		break;
 	case SND_SOC_DAPM_POST_PMD:
+		/* unset TX7_MBHC_EN bit 7 */
+		snd_soc_update_bits(codec, TABLA_A_TX_7_MBHC_EN, 0x80, 0x00);
+
 		snd_soc_write(codec, TABLA_A_CDC_CLK_ANC_RESET_CTL, 0xFF);
 		snd_soc_write(codec, TABLA_A_CDC_CLK_ANC_CLK_EN_CTL, 0);
 		break;
@@ -3931,50 +3956,277 @@
 	return 0;
 }
 
+
+static struct snd_soc_dapm_widget tabla_dapm_aif_in_widgets[] = {
+
+	SND_SOC_DAPM_AIF_IN_E("SLIM RX1", "AIF1 Playback", 0, SND_SOC_NOPM, 1,
+				0, tabla_codec_enable_slimrx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_IN_E("SLIM RX2", "AIF1 Playback", 0, SND_SOC_NOPM, 2,
+				0, tabla_codec_enable_slimrx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_IN_E("SLIM RX3", "AIF1 Playback", 0, SND_SOC_NOPM, 3,
+				0, tabla_codec_enable_slimrx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_IN_E("SLIM RX4", "AIF3 Playback", 0, SND_SOC_NOPM, 4,
+				0, tabla_codec_enable_slimrx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_IN_E("SLIM RX5", "AIF3 Playback", 0, SND_SOC_NOPM, 5,
+				0, tabla_codec_enable_slimrx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_IN_E("SLIM RX6", "AIF2 Playback", 0, SND_SOC_NOPM, 6,
+				0, tabla_codec_enable_slimrx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_IN_E("SLIM RX7", "AIF2 Playback", 0, SND_SOC_NOPM, 7,
+				0, tabla_codec_enable_slimrx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static struct snd_soc_dapm_widget tabla_dapm_aif_out_widgets[] = {
+
+	SND_SOC_DAPM_AIF_OUT_E("SLIM TX1", "AIF2 Capture", 0, SND_SOC_NOPM, 1,
+				0, tabla_codec_enable_slimtx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_OUT_E("SLIM TX2", "AIF2 Capture", 0, SND_SOC_NOPM, 2,
+				0, tabla_codec_enable_slimtx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_OUT_E("SLIM TX3", "AIF3 Capture", 0, SND_SOC_NOPM, 3,
+				0, tabla_codec_enable_slimtx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_OUT_E("SLIM TX4", "AIF2 Capture", 0, SND_SOC_NOPM, 4,
+				0, tabla_codec_enable_slimtx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_OUT_E("SLIM TX5", "AIF3 Capture", 0, SND_SOC_NOPM, 5,
+				0, tabla_codec_enable_slimtx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_OUT_E("SLIM TX6", "AIF2 Capture", 0, SND_SOC_NOPM, 6,
+				0, tabla_codec_enable_slimtx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_OUT_E("SLIM TX7", "AIF1 Capture", 0, SND_SOC_NOPM, 7,
+				0, tabla_codec_enable_slimtx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_OUT_E("SLIM TX8", "AIF1 Capture", 0, SND_SOC_NOPM, 8,
+				0, tabla_codec_enable_slimtx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_OUT_E("SLIM TX9", "AIF1 Capture", 0, SND_SOC_NOPM, 9,
+				0, tabla_codec_enable_slimtx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_AIF_OUT_E("SLIM TX10", "AIF1 Capture", 0, SND_SOC_NOPM, 10,
+				0, tabla_codec_enable_slimtx,
+				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static int tabla_set_interpolator_rate(struct snd_soc_dai *dai,
+	u8 rx_fs_rate_reg_val, u32 compander_fs, u32 sample_rate)
+{
+	u32 i, j;
+	u8 rx_mix1_inp;
+	u16 rx_mix_1_reg_1, rx_mix_1_reg_2;
+	u16 rx_fs_reg;
+	u8 rx_mix_1_reg_1_val, rx_mix_1_reg_2_val;
+	struct snd_soc_codec *codec = dai->codec;
+	struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_widget *w = tabla_dapm_aif_in_widgets;
+
+	for (i = 0; i < ARRAY_SIZE(tabla_dapm_aif_in_widgets); i++) {
+
+		if (strncmp(dai->driver->playback.stream_name, w[i].sname, 13))
+			continue;
+
+		rx_mix1_inp = w[i].shift + 4;
+
+		if ((rx_mix1_inp < 0x5) || (rx_mix1_inp > 0xB)) {
+
+			pr_err("%s: Invalid SLIM RX%u port.  widget = %s\n",
+				__func__,  rx_mix1_inp  - 4 , w[i].name);
+			return -EINVAL;
+		}
+
+		rx_mix_1_reg_1 = TABLA_A_CDC_CONN_RX1_B1_CTL;
+
+		for (j = 0; j < NUM_INTERPOLATORS; j++) {
+
+			rx_mix_1_reg_2 = rx_mix_1_reg_1 + 1;
+
+			rx_mix_1_reg_1_val = snd_soc_read(codec,
+					rx_mix_1_reg_1);
+			rx_mix_1_reg_2_val = snd_soc_read(codec,
+					rx_mix_1_reg_2);
+
+			if (((rx_mix_1_reg_1_val & 0x0F) == rx_mix1_inp) ||
+			   (((rx_mix_1_reg_1_val >> 4) & 0x0F) == rx_mix1_inp)
+			   || ((rx_mix_1_reg_2_val & 0x0F) == rx_mix1_inp)) {
+
+				rx_fs_reg = TABLA_A_CDC_RX1_B5_CTL + 8 * j;
+
+				pr_debug("%s: %s connected to RX%u\n", __func__,
+					w[i].name, j + 1);
+
+				pr_debug("%s: set RX%u sample rate to %u\n",
+					__func__, j + 1, sample_rate);
+
+				snd_soc_update_bits(codec, rx_fs_reg,
+						0xE0, rx_fs_rate_reg_val);
+
+				if (comp_rx_path[j] < COMPANDER_MAX)
+					tabla->comp_fs[comp_rx_path[j]]
+					= compander_fs;
+			}
+			if (j <= 2)
+				rx_mix_1_reg_1 += 3;
+			else
+				rx_mix_1_reg_1 += 2;
+		}
+	}
+	return 0;
+}
+
+static int tabla_set_decimator_rate(struct snd_soc_dai *dai,
+	u8 tx_fs_rate_reg_val, u32 sample_rate)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct snd_soc_dapm_widget *w = tabla_dapm_aif_out_widgets;
+
+	u32 i, tx_port;
+	u16 tx_port_reg, tx_fs_reg;
+	u8 tx_port_reg_val;
+	s8 decimator;
+
+	for (i = 0; i < ARRAY_SIZE(tabla_dapm_aif_out_widgets); i++) {
+
+		if (strncmp(dai->driver->capture.stream_name, w[i].sname, 12))
+			continue;
+
+		tx_port = w[i].shift;
+
+		if ((tx_port < 1) || (tx_port > NUM_DECIMATORS)) {
+			pr_err("%s: Invalid SLIM TX%u port.  widget = %s\n",
+				__func__, tx_port, w[i].name);
+			return -EINVAL;
+		}
+
+		tx_port_reg = TABLA_A_CDC_CONN_TX_SB_B1_CTL + (tx_port - 1);
+		tx_port_reg_val =  snd_soc_read(codec, tx_port_reg);
+
+		decimator = 0;
+
+		if ((tx_port >= 1) && (tx_port <= 6)) {
+
+			tx_port_reg_val =  tx_port_reg_val & 0x0F;
+			if (tx_port_reg_val == 0x8)
+				decimator = tx_port;
+
+		} else if ((tx_port >= 7) && (tx_port <= NUM_DECIMATORS)) {
+
+			tx_port_reg_val =  tx_port_reg_val & 0x1F;
+
+			if ((tx_port_reg_val >= 0x8) &&
+			    (tx_port_reg_val <= 0x11)) {
+
+				decimator = (tx_port_reg_val - 0x8) + 1;
+			}
+		}
+
+		if (decimator) { /* SLIM_TX port has a DEC as input */
+
+			tx_fs_reg = TABLA_A_CDC_TX1_CLK_FS_CTL +
+				8 * (decimator - 1);
+
+			pr_debug("%s: set DEC%u (-> SLIM_TX%u) rate to %u\n",
+				__func__, decimator, tx_port, sample_rate);
+
+			snd_soc_update_bits(codec, tx_fs_reg, 0x07,
+					tx_fs_rate_reg_val);
+
+		} else {
+			if ((tx_port_reg_val >= 0x1) &&
+					(tx_port_reg_val <= 0x7)) {
+
+				pr_debug("%s: RMIX%u going to SLIM TX%u\n",
+					__func__, tx_port_reg_val, tx_port);
+
+			} else if  ((tx_port_reg_val >= 0x8) &&
+					(tx_port_reg_val <= 0x11)) {
+
+				pr_err("%s: ERROR: Should not be here\n",
+						__func__);
+				pr_err("%s: ERROR: DEC connected to SLIM TX%u\n"
+						, __func__, tx_port);
+				return -EINVAL;
+
+			} else if (tx_port_reg_val == 0) {
+				pr_debug("%s: no signal to SLIM TX%u\n",
+						__func__, tx_port);
+			} else {
+				pr_err("%s: ERROR: wrong signal to SLIM TX%u\n"
+						, __func__, tx_port);
+				pr_err("%s: ERROR: wrong signal = %u\n"
+						, __func__, tx_port_reg_val);
+				return -EINVAL;
+			}
+		}
+	}
+	return 0;
+}
+
 static int tabla_hw_params(struct snd_pcm_substream *substream,
-			    struct snd_pcm_hw_params *params,
-			    struct snd_soc_dai *dai)
+		struct snd_pcm_hw_params *params,
+		struct snd_soc_dai *dai)
 {
 	struct snd_soc_codec *codec = dai->codec;
 	struct tabla_priv *tabla = snd_soc_codec_get_drvdata(dai->codec);
-	u8 path, shift;
-	u16 tx_fs_reg, rx_fs_reg;
-	u8 tx_fs_rate, rx_fs_rate, rx_state, tx_state;
+	u8 tx_fs_rate_reg_val, rx_fs_rate_reg_val;
 	u32 compander_fs;
+	int ret;
 
 	pr_debug("%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n", __func__,
-		 dai->name, dai->id, params_rate(params),
-		 params_channels(params));
+			dai->name, dai->id, params_rate(params),
+			params_channels(params));
 
 	switch (params_rate(params)) {
 	case 8000:
-		tx_fs_rate = 0x00;
-		rx_fs_rate = 0x00;
+		tx_fs_rate_reg_val = 0x00;
+		rx_fs_rate_reg_val = 0x00;
 		compander_fs = COMPANDER_FS_8KHZ;
 		break;
 	case 16000:
-		tx_fs_rate = 0x01;
-		rx_fs_rate = 0x20;
+		tx_fs_rate_reg_val = 0x01;
+		rx_fs_rate_reg_val = 0x20;
 		compander_fs = COMPANDER_FS_16KHZ;
 		break;
 	case 32000:
-		tx_fs_rate = 0x02;
-		rx_fs_rate = 0x40;
+		tx_fs_rate_reg_val = 0x02;
+		rx_fs_rate_reg_val = 0x40;
 		compander_fs = COMPANDER_FS_32KHZ;
 		break;
 	case 48000:
-		tx_fs_rate = 0x03;
-		rx_fs_rate = 0x60;
+		tx_fs_rate_reg_val = 0x03;
+		rx_fs_rate_reg_val = 0x60;
 		compander_fs = COMPANDER_FS_48KHZ;
 		break;
 	case 96000:
-		tx_fs_rate = 0x04;
-		rx_fs_rate = 0x80;
+		tx_fs_rate_reg_val = 0x04;
+		rx_fs_rate_reg_val = 0x80;
 		compander_fs = COMPANDER_FS_96KHZ;
 		break;
 	case 192000:
-		tx_fs_rate = 0x05;
-		rx_fs_rate = 0xA0;
+		tx_fs_rate_reg_val = 0x05;
+		rx_fs_rate_reg_val = 0xA0;
 		compander_fs = COMPANDER_FS_192KHZ;
 		break;
 	default:
@@ -3983,105 +4235,76 @@
 		return -EINVAL;
 	}
 
+	switch (substream->stream) {
+	case SNDRV_PCM_STREAM_CAPTURE:
 
-	/**
-	 * If current dai is a tx dai, set sample rate to
-	 * all the txfe paths that are currently not active
-	 */
-	if ((dai->id == AIF1_CAP) || (dai->id == AIF2_CAP) ||
-	    (dai->id == AIF3_CAP)) {
-
-		tx_state = snd_soc_read(codec,
-				TABLA_A_CDC_CLK_TX_CLK_EN_B1_CTL);
-
-		for (path = 1, shift = 0;
-				path <= NUM_DECIMATORS; path++, shift++) {
-
-			if (path == BITS_PER_REG + 1) {
-				shift = 0;
-				tx_state = snd_soc_read(codec,
-					TABLA_A_CDC_CLK_TX_CLK_EN_B2_CTL);
-			}
-
-			if (!(tx_state & (1 << shift))) {
-				tx_fs_reg = TABLA_A_CDC_TX1_CLK_FS_CTL
-						+ (BITS_PER_REG*(path-1));
-				snd_soc_update_bits(codec, tx_fs_reg,
-							0x07, tx_fs_rate);
-			}
+		ret = tabla_set_decimator_rate(dai, tx_fs_rate_reg_val,
+				params_rate(params));
+		if (ret < 0) {
+			pr_err("%s: set decimator rate failed %d\n", __func__,
+					ret);
+			return ret;
 		}
+
 		if (tabla->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
 			switch (params_format(params)) {
 			case SNDRV_PCM_FORMAT_S16_LE:
 				snd_soc_update_bits(codec,
-					TABLA_A_CDC_CLK_TX_I2S_CTL,
-					0x20, 0x20);
+					TABLA_A_CDC_CLK_TX_I2S_CTL, 0x20, 0x20);
 				break;
 			case SNDRV_PCM_FORMAT_S32_LE:
 				snd_soc_update_bits(codec,
-					TABLA_A_CDC_CLK_TX_I2S_CTL,
-					0x20, 0x00);
+					TABLA_A_CDC_CLK_TX_I2S_CTL, 0x20, 0x00);
 				break;
 			default:
-				pr_err("invalid format\n");
-				break;
+				pr_err("%s: invalid TX format %u\n", __func__,
+						params_format(params));
+				return -EINVAL;
 			}
 			snd_soc_update_bits(codec, TABLA_A_CDC_CLK_TX_I2S_CTL,
-						0x07, tx_fs_rate);
+					0x07, tx_fs_rate_reg_val);
 		} else {
 			tabla->dai[dai->id - 1].rate   = params_rate(params);
 		}
-	}
-	/**
-	 * TODO: Need to handle case where same RX chain takes 2 or more inputs
-	 * with varying sample rates
-	 */
+		break;
 
-	/**
-	 * If current dai is a rx dai, set sample rate to
-	 * all the rx paths that are currently not active
-	 */
-	if (dai->id == AIF1_PB || dai->id == AIF2_PB || dai->id == AIF3_PB) {
+	case SNDRV_PCM_STREAM_PLAYBACK:
 
-		rx_state = snd_soc_read(codec,
-			TABLA_A_CDC_CLK_RX_B1_CTL);
-
-		for (path = 1, shift = 0;
-				path <= NUM_INTERPOLATORS; path++, shift++) {
-
-			if (!(rx_state & (1 << shift))) {
-				rx_fs_reg = TABLA_A_CDC_RX1_B5_CTL
-						+ (BITS_PER_REG*(path-1));
-				snd_soc_update_bits(codec, rx_fs_reg,
-						0xE0, rx_fs_rate);
-				if (comp_rx_path[shift] < COMPANDER_MAX)
-					tabla->comp_fs[comp_rx_path[shift]]
-					= compander_fs;
-			}
+		ret = tabla_set_interpolator_rate(dai, rx_fs_rate_reg_val,
+				compander_fs, params_rate(params));
+		if (ret < 0) {
+			pr_err("%s: set decimator rate failed %d\n", __func__,
+					ret);
+			return ret;
 		}
+
 		if (tabla->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
 			switch (params_format(params)) {
 			case SNDRV_PCM_FORMAT_S16_LE:
 				snd_soc_update_bits(codec,
-					TABLA_A_CDC_CLK_RX_I2S_CTL,
-					0x20, 0x20);
+					TABLA_A_CDC_CLK_RX_I2S_CTL, 0x20, 0x20);
 				break;
 			case SNDRV_PCM_FORMAT_S32_LE:
 				snd_soc_update_bits(codec,
-					TABLA_A_CDC_CLK_RX_I2S_CTL,
-					0x20, 0x00);
+					TABLA_A_CDC_CLK_RX_I2S_CTL, 0x20, 0x00);
 				break;
 			default:
-				pr_err("invalid format\n");
-				break;
+				pr_err("%s: invalid RX format %u\n", __func__,
+						params_format(params));
+				return -EINVAL;
 			}
 			snd_soc_update_bits(codec, TABLA_A_CDC_CLK_RX_I2S_CTL,
-						0x03, (rx_fs_rate >> 0x05));
+					0x03, (rx_fs_rate_reg_val >> 0x05));
 		} else {
 			tabla->dai[dai->id - 1].rate   = params_rate(params);
 		}
-	}
+		break;
 
+	default:
+		pr_err("%s: Invalid stream type %d\n", __func__,
+				substream->stream);
+		return -EINVAL;
+	}
 	return 0;
 }
 
@@ -4213,6 +4436,41 @@
 	},
 };
 
+static int tabla_codec_enable_chmask(struct tabla_priv *tabla_p,
+	int event, int index)
+{
+	int  ret = 0;
+	u32 k = 0;
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		for (k = 0; k < tabla_p->dai[index].ch_tot; k++) {
+			ret = wcd9xxx_get_slave_port(
+					tabla_p->dai[index].ch_num[k]);
+			if (ret < 0) {
+				pr_err("%s: Invalid slave port ID: %d\n",
+					__func__, ret);
+				ret = -EINVAL;
+				break;
+			}
+			tabla_p->dai[index].ch_mask |= 1 << ret;
+		}
+		ret = 0;
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		ret = wait_event_timeout(tabla_p->dai[index].dai_wait,
+					(tabla_p->dai[index].ch_mask == 0),
+				msecs_to_jiffies(SLIM_CLOSE_TIMEOUT));
+		if (!ret) {
+			pr_err("%s: Slim close tx/rx wait timeout\n",
+				__func__);
+			ret = -EINVAL;
+		}
+		ret = 0;
+		break;
+	}
+	return ret;
+}
+
 static int tabla_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
 	struct snd_kcontrol *kcontrol, int event)
 {
@@ -4242,11 +4500,15 @@
 				break;
 			}
 		}
-		if (tabla_p->dai[j].ch_act == tabla_p->dai[j].ch_tot)
+		if (tabla_p->dai[j].ch_act == tabla_p->dai[j].ch_tot) {
+			ret = tabla_codec_enable_chmask(tabla_p,
+							SND_SOC_DAPM_POST_PMU,
+							j);
 			ret = wcd9xxx_cfg_slim_sch_rx(tabla,
 					tabla_p->dai[j].ch_num,
 					tabla_p->dai[j].ch_tot,
 					tabla_p->dai[j].rate);
+		}
 		break;
 	case SND_SOC_DAPM_POST_PMD:
 		for (j = 0; j < ARRAY_SIZE(tabla_dai); j++) {
@@ -4265,11 +4527,13 @@
 			ret = wcd9xxx_close_slim_sch_rx(tabla,
 						tabla_p->dai[j].ch_num,
 						tabla_p->dai[j].ch_tot);
-			usleep_range(15000, 15000);
 			tabla_p->dai[j].rate = 0;
 			memset(tabla_p->dai[j].ch_num, 0, (sizeof(u32)*
 					tabla_p->dai[j].ch_tot));
 			tabla_p->dai[j].ch_tot = 0;
+			ret = tabla_codec_enable_chmask(tabla_p,
+							SND_SOC_DAPM_POST_PMD,
+							j);
 		}
 	}
 	return ret;
@@ -4307,11 +4571,15 @@
 				break;
 			}
 		}
-		if (tabla_p->dai[j].ch_act == tabla_p->dai[j].ch_tot)
+		if (tabla_p->dai[j].ch_act == tabla_p->dai[j].ch_tot) {
+			ret = tabla_codec_enable_chmask(tabla_p,
+							SND_SOC_DAPM_POST_PMU,
+							j);
 			ret = wcd9xxx_cfg_slim_sch_tx(tabla,
 						tabla_p->dai[j].ch_num,
 						tabla_p->dai[j].ch_tot,
 						tabla_p->dai[j].rate);
+		}
 		break;
 	case SND_SOC_DAPM_POST_PMD:
 		for (j = 0; j < ARRAY_SIZE(tabla_dai); j++) {
@@ -4333,6 +4601,9 @@
 			memset(tabla_p->dai[j].ch_num, 0, (sizeof(u32)*
 					tabla_p->dai[j].ch_tot));
 			tabla_p->dai[j].ch_tot = 0;
+			ret = tabla_codec_enable_chmask(tabla_p,
+							SND_SOC_DAPM_POST_PMD,
+							j);
 		}
 	}
 	return ret;
@@ -4350,30 +4621,6 @@
 	SND_SOC_DAPM_MIXER("DAC1", TABLA_A_RX_EAR_EN, 6, 0, dac1_switch,
 		ARRAY_SIZE(dac1_switch)),
 
-	SND_SOC_DAPM_AIF_IN_E("SLIM RX1", "AIF1 Playback", 0, SND_SOC_NOPM, 0,
-				0, tabla_codec_enable_slimrx,
-				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_AIF_IN_E("SLIM RX2", "AIF1 Playback", 0, SND_SOC_NOPM, 0,
-				0, tabla_codec_enable_slimrx,
-				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_AIF_IN_E("SLIM RX3", "AIF1 Playback", 0, SND_SOC_NOPM, 0,
-				0, tabla_codec_enable_slimrx,
-				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_AIF_IN_E("SLIM RX4", "AIF3 Playback", 0, SND_SOC_NOPM, 0,
-				0, tabla_codec_enable_slimrx,
-				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_AIF_IN_E("SLIM RX5", "AIF3 Playback", 0, SND_SOC_NOPM, 0,
-				0, tabla_codec_enable_slimrx,
-				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
-	SND_SOC_DAPM_AIF_IN_E("SLIM RX6", "AIF2 Playback", 0, SND_SOC_NOPM, 0,
-				0, tabla_codec_enable_slimrx,
-				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_AIF_IN_E("SLIM RX7", "AIF2 Playback", 0, SND_SOC_NOPM, 0,
-				0, tabla_codec_enable_slimrx,
-				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
 	/* Headphone */
 	SND_SOC_DAPM_OUTPUT("HEADPHONE"),
 	SND_SOC_DAPM_PGA_E("HPHL", TABLA_A_RX_HPH_CNP_EN, 5, 0, NULL, 0,
@@ -4654,54 +4901,15 @@
 		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
 	SND_SOC_DAPM_MUX("SLIM TX1 MUX", SND_SOC_NOPM, 0, 0, &sb_tx1_mux),
-	SND_SOC_DAPM_AIF_OUT_E("SLIM TX1", "AIF2 Capture", 0, SND_SOC_NOPM, 0,
-				0, tabla_codec_enable_slimtx,
-				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
 	SND_SOC_DAPM_MUX("SLIM TX2 MUX", SND_SOC_NOPM, 0, 0, &sb_tx2_mux),
-	SND_SOC_DAPM_AIF_OUT_E("SLIM TX2", "AIF2 Capture", 0, SND_SOC_NOPM, 0,
-				0, tabla_codec_enable_slimtx,
-				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
 	SND_SOC_DAPM_MUX("SLIM TX3 MUX", SND_SOC_NOPM, 0, 0, &sb_tx3_mux),
-	SND_SOC_DAPM_AIF_OUT_E("SLIM TX3", "AIF3 Capture", 0, SND_SOC_NOPM, 0,
-				0, tabla_codec_enable_slimtx,
-				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
 	SND_SOC_DAPM_MUX("SLIM TX4 MUX", SND_SOC_NOPM, 0, 0, &sb_tx4_mux),
-	SND_SOC_DAPM_AIF_OUT_E("SLIM TX4", "AIF2 Capture", 0, SND_SOC_NOPM, 0,
-				0, tabla_codec_enable_slimtx,
-				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
 	SND_SOC_DAPM_MUX("SLIM TX5 MUX", SND_SOC_NOPM, 0, 0, &sb_tx5_mux),
-	SND_SOC_DAPM_AIF_OUT_E("SLIM TX5", "AIF3 Capture", 0, SND_SOC_NOPM, 0,
-				0, tabla_codec_enable_slimtx,
-				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
 	SND_SOC_DAPM_MUX("SLIM TX6 MUX", SND_SOC_NOPM, 0, 0, &sb_tx6_mux),
-	SND_SOC_DAPM_AIF_OUT_E("SLIM TX6", "AIF2 Capture", 0, SND_SOC_NOPM, 0,
-				0, tabla_codec_enable_slimtx,
-				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
 	SND_SOC_DAPM_MUX("SLIM TX7 MUX", SND_SOC_NOPM, 0, 0, &sb_tx7_mux),
-	SND_SOC_DAPM_AIF_OUT_E("SLIM TX7", "AIF1 Capture", 0, SND_SOC_NOPM, 0,
-				0, tabla_codec_enable_slimtx,
-				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
 	SND_SOC_DAPM_MUX("SLIM TX8 MUX", SND_SOC_NOPM, 0, 0, &sb_tx8_mux),
-	SND_SOC_DAPM_AIF_OUT_E("SLIM TX8", "AIF1 Capture", 0, SND_SOC_NOPM, 0,
-				0, tabla_codec_enable_slimtx,
-				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
 	SND_SOC_DAPM_MUX("SLIM TX9 MUX", SND_SOC_NOPM, 0, 0, &sb_tx9_mux),
-	SND_SOC_DAPM_AIF_OUT_E("SLIM TX9", "AIF1 Capture", NULL, SND_SOC_NOPM,
-			0, 0, tabla_codec_enable_slimtx,
-			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
 	SND_SOC_DAPM_MUX("SLIM TX10 MUX", SND_SOC_NOPM, 0, 0, &sb_tx10_mux),
-	SND_SOC_DAPM_AIF_OUT_E("SLIM TX10", "AIF1 Capture", NULL, SND_SOC_NOPM,
-			0, 0, tabla_codec_enable_slimtx,
-			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
 	/* Digital Mic Inputs */
 	SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0,
@@ -6085,22 +6293,24 @@
 }
 
 /* should be called under interrupt context that hold suspend */
-static void tabla_schedule_hs_detect_plug(struct tabla_priv *tabla)
+static void tabla_schedule_hs_detect_plug(struct tabla_priv *tabla,
+	struct work_struct *correct_plug_work)
 {
 	pr_debug("%s: scheduling tabla_hs_correct_gpio_plug\n", __func__);
 	tabla->hs_detect_work_stop = false;
 	wcd9xxx_lock_sleep(tabla->codec->control_data);
-	schedule_work(&tabla->hs_correct_plug_work);
+	schedule_work(correct_plug_work);
 }
 
 /* called under codec_resource_lock acquisition */
-static void tabla_cancel_hs_detect_plug(struct tabla_priv *tabla)
+static void tabla_cancel_hs_detect_plug(struct tabla_priv *tabla,
+		struct work_struct *correct_plug_work)
 {
 	pr_debug("%s: canceling hs_correct_plug_work\n", __func__);
 	tabla->hs_detect_work_stop = true;
 	wmb();
 	TABLA_RELEASE_LOCK(tabla->codec_resource_lock);
-	if (cancel_work_sync(&tabla->hs_correct_plug_work)) {
+	if (cancel_work_sync(correct_plug_work)) {
 		pr_debug("%s: hs_correct_plug_work is canceled\n", __func__);
 		wcd9xxx_unlock_sleep(tabla->codec->control_data);
 	}
@@ -6372,11 +6582,13 @@
 
 	if (plug_type == PLUG_TYPE_INVALID ||
 	    plug_type == PLUG_TYPE_GND_MIC_SWAP) {
-		tabla_schedule_hs_detect_plug(tabla);
+		tabla_schedule_hs_detect_plug(tabla,
+					&tabla->hs_correct_plug_work);
 	} else if (plug_type == PLUG_TYPE_HEADPHONE) {
 		tabla_codec_report_plug(codec, 1, SND_JACK_HEADPHONE);
 
-		tabla_schedule_hs_detect_plug(tabla);
+		tabla_schedule_hs_detect_plug(tabla,
+					&tabla->hs_correct_plug_work);
 	} else {
 		pr_debug("%s: Valid plug found, determine plug type %d\n",
 			 __func__, plug_type);
@@ -6433,6 +6645,8 @@
 		tabla_codec_report_plug(codec, 1, SND_JACK_HEADPHONE);
 		tabla_codec_cleanup_hs_polling(codec);
 		tabla_codec_enable_hs_detect(codec, 0, 0, false);
+		tabla_schedule_hs_detect_plug(tabla,
+					&tabla->hs_correct_plug_work_nogpio);
 	} else if (plug_type == PLUG_TYPE_HEADSET) {
 		pr_debug("%s: Headset detected\n", __func__);
 		tabla_codec_report_plug(codec, 1, SND_JACK_HEADSET);
@@ -6479,10 +6693,13 @@
 	int ret;
 	struct snd_soc_codec *codec = priv->codec;
 	struct wcd9xxx *core = dev_get_drvdata(priv->codec->dev->parent);
+	struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
+
+	/* Cancel possibly running hs_detect_work */
+	tabla_cancel_hs_detect_plug(tabla,
+			&tabla->hs_correct_plug_work_nogpio);
 
 	if (is_removal) {
-		/* cancel possiblely running hs detect work */
-		tabla_cancel_hs_detect_plug(priv);
 
 		/*
 		 * If headphone is removed while playback is in progress,
@@ -6696,8 +6913,9 @@
 	} while (min_us > 0);
 
 	if (removed) {
-		/* cancel possiblely running hs detect work */
-		tabla_cancel_hs_detect_plug(priv);
+		/* Cancel possibly running hs_detect_work */
+		tabla_cancel_hs_detect_plug(priv,
+					&priv->hs_correct_plug_work_nogpio);
 		/*
 		 * If this removal is not false, first check the micbias
 		 * switch status and switch it to LDOH if it is already
@@ -6792,7 +7010,8 @@
 		wmb();
 
 		/* cancel detect plug */
-		tabla_cancel_hs_detect_plug(tabla);
+		tabla_cancel_hs_detect_plug(tabla,
+					&tabla->hs_correct_plug_work);
 
 		/* Disable Mic Bias pull down and HPH Switch to GND */
 		snd_soc_update_bits(codec, tabla->mbhc_bias_regs.ctl_reg, 0x01,
@@ -6804,7 +7023,8 @@
 		wmb();
 
 		/* cancel detect plug */
-		tabla_cancel_hs_detect_plug(tabla);
+		tabla_cancel_hs_detect_plug(tabla,
+					&tabla->hs_correct_plug_work);
 
 		if (tabla->current_plug == PLUG_TYPE_HEADPHONE) {
 			tabla_codec_report_plug(codec, 0, SND_JACK_HEADPHONE);
@@ -6864,6 +7084,76 @@
 	return r;
 }
 
+static void tabla_hs_correct_plug_nogpio(struct work_struct *work)
+{
+	struct tabla_priv *tabla;
+	struct snd_soc_codec *codec;
+	unsigned long timeout;
+	int retry = 0;
+	enum tabla_mbhc_plug_type plug_type;
+	bool is_headset = false;
+
+	pr_debug("%s(): Poll Microphone voltage for %d seconds\n",
+			 __func__, TABLA_HS_DETECT_PLUG_TIME_MS / 1000);
+
+	tabla = container_of(work, struct tabla_priv,
+						 hs_correct_plug_work_nogpio);
+	codec = tabla->codec;
+
+	/* Make sure the MBHC mux is connected to MIC Path */
+	snd_soc_write(codec, TABLA_A_MBHC_SCALING_MUX_1, 0x84);
+
+	/* setup for microphone polling */
+	tabla_turn_onoff_override(codec, true);
+	tabla->mbhc_cfg.mclk_cb_fn(codec, 1, false);
+
+	timeout = jiffies + msecs_to_jiffies(TABLA_HS_DETECT_PLUG_TIME_MS);
+	while (!time_after(jiffies, timeout)) {
+		++retry;
+
+		msleep(TABLA_HS_DETECT_PLUG_INERVAL_MS);
+		TABLA_ACQUIRE_LOCK(tabla->codec_resource_lock);
+		plug_type = tabla_codec_get_plug_type(codec, false);
+		TABLA_RELEASE_LOCK(tabla->codec_resource_lock);
+
+		if (plug_type == PLUG_TYPE_HIGH_HPH
+			|| plug_type == PLUG_TYPE_INVALID) {
+
+			/* this means the plug is removed
+			 * End microphone polling and setup
+			 * for low power removal detection.
+			 */
+			pr_debug("%s(): Plug may be removed, setup removal\n",
+					 __func__);
+			break;
+		} else if (plug_type == PLUG_TYPE_HEADSET) {
+			/* Plug is corrected from headphone to headset,
+			 * report headset and end the polling
+			 */
+			is_headset = true;
+			TABLA_ACQUIRE_LOCK(tabla->codec_resource_lock);
+			tabla_turn_onoff_override(codec, false);
+			tabla_codec_report_plug(codec, 1, SND_JACK_HEADSET);
+			tabla_codec_start_hs_polling(codec);
+			TABLA_RELEASE_LOCK(tabla->codec_resource_lock);
+			pr_debug("%s(): corrected from headphone to headset\n",
+					 __func__);
+			break;
+		}
+	}
+
+	/* Undo setup for microphone polling depending
+	 * result from polling
+	 */
+	tabla->mbhc_cfg.mclk_cb_fn(codec, 0, false);
+	if (!is_headset) {
+		tabla_turn_onoff_override(codec, false);
+		tabla_codec_cleanup_hs_polling(codec);
+		tabla_codec_enable_hs_detect(codec, 0, 0, false);
+	}
+	wcd9xxx_unlock_sleep(codec->control_data);
+}
+
 static int tabla_mbhc_init_and_calibrate(struct tabla_priv *tabla)
 {
 	int ret = 0;
@@ -6876,6 +7166,8 @@
 	tabla->mbhc_cfg.mclk_cb_fn(codec, 0, false);
 	tabla_codec_calibrate_hs_polling(codec);
 	if (!tabla->mbhc_cfg.gpio) {
+		INIT_WORK(&tabla->hs_correct_plug_work_nogpio,
+				  tabla_hs_correct_plug_nogpio);
 		ret = tabla_codec_enable_hs_detect(codec, 1,
 						   MBHC_USE_MB_TRIGGER |
 						   MBHC_USE_HPHL_TRIGGER,
@@ -7014,7 +7306,8 @@
 {
 	struct tabla_priv *priv = data;
 	struct snd_soc_codec *codec = priv->codec;
-	int i, j;
+	struct tabla_priv *tabla_p = snd_soc_codec_get_drvdata(codec);
+	int i, j, port_id, k, ch_mask_temp;
 	u8 val;
 
 	for (i = 0; i < WCD9XXX_SLIM_NUM_PORT_REG; i++) {
@@ -7029,6 +7322,22 @@
 			if (val & 0x2)
 				pr_err_ratelimited("underflow error on port %x,"
 					" value %x\n", i*8 + j, val);
+			if (val & 0x4) {
+				pr_debug("%s: port %x disconnect value %x\n",
+					__func__, i*8 + j, val);
+				port_id = i*8 + j;
+				for (k = 0; k < ARRAY_SIZE(tabla_dai); k++) {
+					ch_mask_temp = 1 << port_id;
+					if (ch_mask_temp &
+						tabla_p->dai[k].ch_mask) {
+						tabla_p->dai[k].ch_mask &=
+								~ch_mask_temp;
+					if (!tabla_p->dai[k].ch_mask)
+							wake_up(
+						&tabla_p->dai[k].dai_wait);
+					}
+				}
+			}
 		}
 		wcd9xxx_interface_reg_write(codec->control_data,
 			TABLA_SLIM_PGD_PORT_INT_CLR0 + i, 0xFF);
@@ -7560,6 +7869,13 @@
 
 //	snd_soc_dapm_new_controls(dapm, tabla_dapm_widgets,
 //				  ARRAY_SIZE(tabla_dapm_widgets));
+
+	snd_soc_dapm_new_controls(dapm, tabla_dapm_aif_in_widgets,
+				  ARRAY_SIZE(tabla_dapm_aif_in_widgets));
+
+	snd_soc_dapm_new_controls(dapm, tabla_dapm_aif_out_widgets,
+				  ARRAY_SIZE(tabla_dapm_aif_out_widgets));
+
 	if (TABLA_IS_1_X(control->version))
 		snd_soc_dapm_new_controls(dapm, tabla_1_x_dapm_widgets,
 					  ARRAY_SIZE(tabla_1_x_dapm_widgets));
@@ -7678,6 +7994,7 @@
 		}
 		tabla->dai[i].ch_num = kzalloc((sizeof(unsigned int)*
 					ch_cnt), GFP_KERNEL);
+		init_waitqueue_head(&tabla->dai[i].dai_wait);
 	}
 
 #ifdef CONFIG_DEBUG_FS
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index 1125d20..9b60a56 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -106,10 +106,10 @@
 	 To add support for SoC audio on MSM8960.
 
 config SND_SOC_MSM_QDSP6V2_INTF
-	bool "SoC Q6 audio driver for MSMCOPPER"
+	bool "SoC Q6 audio driver for MSM8974"
 	depends on MSM_QDSP6_APR
 	help
-	 To add support for SoC audio on MSMCOPPER.
+	 To add support for SoC audio on MSM8974.
 	 This will enable all the platform specific
 	 interactions towards DSP. It includes asm,
 	 adm and afe interfaces on the DSP.
@@ -154,14 +154,14 @@
 	 To add support for SoC audio on MSM8960 and APQ8064 boards
 
 config SND_SOC_MSM8974
-	tristate "SoC Machine driver for MSMCOPPER boards"
-	depends on ARCH_MSMCOPPER
+	tristate "SoC Machine driver for MSM8974 boards"
+	depends on ARCH_MSM8974
 	select SND_SOC_QDSP6V2
 	select SND_SOC_MSM_STUB
 	select SND_SOC_MSM_HOSTLESS_PCM
 	select SND_DYNAMIC_MINORS
 	help
-	 To add support for SoC audio on MSMCOPPER.
+	 To add support for SoC audio on MSM8974.
 	 This will enable sound soc drivers which
 	 interfaces with DSP, also it will enable
 	 the machine drivers and the corresponding
diff --git a/sound/soc/msm/mdm9615.c b/sound/soc/msm/mdm9615.c
index 90d8723..dbe5d00 100644
--- a/sound/soc/msm/mdm9615.c
+++ b/sound/soc/msm/mdm9615.c
@@ -188,9 +188,6 @@
 #define LPAIF_SPARE_MUX_CTL_PRI_MUX_SEL_BMSK	0x3
 #define LPAIF_SPARE_MUX_CTL_PRI_MUX_SEL_SHFT		0x0
 
-static u32 spare_shadow;
-static u32 sif_shadow;
-
 static atomic_t msm9615_auxpcm_ref;
 static atomic_t msm9615_sec_auxpcm_ref;
 
@@ -1066,30 +1063,26 @@
 {
 	struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
 	if (i2s_intf == MSM_INTF_PRIM) {
-		if (i2s_dir == MSM_DIR_RX)
-			gpio_free(GPIO_PRIM_I2S_DOUT);
-		if (i2s_dir == MSM_DIR_TX)
-			gpio_free(GPIO_PRIM_I2S_DIN);
 		if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
 			pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+			gpio_free(GPIO_PRIM_I2S_DIN);
+			gpio_free(GPIO_PRIM_I2S_DOUT);
 			gpio_free(GPIO_PRIM_I2S_SCK);
 			gpio_free(GPIO_PRIM_I2S_WS);
 		}
 	} else if (i2s_intf == MSM_INTF_SECN) {
-		if (i2s_dir == MSM_DIR_RX)
-			gpio_free(GPIO_SEC_I2S_DOUT);
-		if (i2s_dir == MSM_DIR_TX)
-			gpio_free(GPIO_SEC_I2S_DIN);
 		if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
 			pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+			gpio_free(GPIO_SEC_I2S_DOUT);
 			gpio_free(GPIO_SEC_I2S_WS);
+			gpio_free(GPIO_SEC_I2S_DIN);
 			gpio_free(GPIO_SEC_I2S_SCK);
 		}
 	}
 	return 0;
 }
 
-int msm9615_i2s_intf_dir_sel(const char *cpu_dai_name,
+static int msm9615_i2s_intf_dir_sel(const char *cpu_dai_name,
 			     u8 *i2s_intf, u8 *i2s_dir)
 {
 	int ret = 0;
@@ -1117,34 +1110,37 @@
 	return ret;
 }
 
-int msm9615_enable_i2s_gpio(u8 i2s_intf, u8 i2s_dir)
+static int msm9615_enable_i2s_gpio(u8 i2s_intf, u8 i2s_dir)
 {
 	u8 ret = 0;
 	struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
+
 	if (i2s_intf == MSM_INTF_PRIM) {
-		if (i2s_dir == MSM_DIR_TX) {
+		if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
+		    pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+
+			ret = gpio_request(GPIO_PRIM_I2S_DOUT,
+					   "I2S_PRIM_DOUT");
+			if (ret) {
+				pr_err("%s: Failed to request gpio %d\n",
+					__func__, GPIO_PRIM_I2S_DOUT);
+				goto err;
+			}
+
 			ret = gpio_request(GPIO_PRIM_I2S_DIN, "I2S_PRIM_DIN");
 			if (ret) {
 				pr_err("%s: Failed to request gpio %d\n",
-				       __func__, GPIO_PRIM_I2S_DIN);
+					       __func__, GPIO_PRIM_I2S_DIN);
 				goto err;
 			}
-		} else if (i2s_dir == MSM_DIR_RX) {
-			ret = gpio_request(GPIO_PRIM_I2S_DOUT,
-					       "I2S_PRIM_DOUT");
-			if (ret) {
-				pr_err("%s: Failed to request gpio %d\n",
-				       __func__, GPIO_PRIM_I2S_DOUT);
-				goto err;
-			}
-		} else if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
-			   pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+
 			ret = gpio_request(GPIO_PRIM_I2S_SCK, "I2S_PRIM_SCK");
 			if (ret) {
 				pr_err("%s: Failed to request gpio %d\n",
 				       __func__, GPIO_PRIM_I2S_SCK);
 				goto err;
 			}
+
 			ret = gpio_request(GPIO_PRIM_I2S_WS, "I2S_PRIM_WS");
 			if (ret) {
 				pr_err("%s: Failed to request gpio %d\n",
@@ -1153,28 +1149,30 @@
 			}
 		}
 	} else if (i2s_intf == MSM_INTF_SECN) {
-		if (i2s_dir == MSM_DIR_RX) {
-			ret = gpio_request(GPIO_SEC_I2S_DOUT, "I2S_SEC_DOUT");
-			if (ret) {
-				pr_err("%s: Failed to request gpio %d\n",
-				       __func__, GPIO_SEC_I2S_DOUT);
-				goto err;
-			}
-		} else if (i2s_dir == MSM_DIR_TX) {
+		if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
+		    pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+
 			ret = gpio_request(GPIO_SEC_I2S_DIN, "I2S_SEC_DIN");
 			if (ret) {
 				pr_err("%s: Failed to request gpio %d\n",
 				       __func__, GPIO_SEC_I2S_DIN);
 				goto err;
 			}
-		} else if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
-			   pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+
+			ret = gpio_request(GPIO_SEC_I2S_DOUT, "I2S_SEC_DOUT");
+			if (ret) {
+				pr_err("%s: Failed to request gpio %d\n",
+				       __func__, GPIO_SEC_I2S_DOUT);
+				goto err;
+			}
+
 			ret = gpio_request(GPIO_SEC_I2S_SCK, "I2S_SEC_SCK");
 			if (ret) {
 				pr_err("%s: Failed to request gpio %d\n",
 				       __func__, GPIO_SEC_I2S_SCK);
 				goto err;
 			}
+
 			ret = gpio_request(GPIO_SEC_I2S_WS, "I2S_SEC_WS");
 			if (ret) {
 				pr_err("%s: Failed to request gpio %d\n",
@@ -1283,20 +1281,33 @@
 	return ret;
 }
 
-void msm9615_config_i2s_sif_mux(u8 value)
+static void msm9615_config_i2s_sif_mux(u8 value)
 {
 	struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
-	sif_shadow  = 0x00000;
+	u32 sif_shadow  = 0x0000;
+	/* Make this variable global if both secondary and
+	 * primary needs to be supported. This is required
+	 * to retain bits in interace and set only specific
+	 * bits in the register. Also set Sec Intf bits.
+	 * Secondary interface bits are 0,1.
+	 **/
 	sif_shadow = (sif_shadow & LPASS_SIF_MUX_CTL_PRI_MUX_SEL_BMSK) |
 		     (value << LPASS_SIF_MUX_CTL_PRI_MUX_SEL_SHFT);
-	iowrite32(sif_shadow, pintf->sif_virt_addr);
+	if (pintf->sif_virt_addr != NULL)
+		iowrite32(sif_shadow, pintf->sif_virt_addr);
 	/* Dont read SIF register. Device crashes. */
 	pr_debug("%s() SIF Reg = 0x%x\n", __func__, sif_shadow);
 }
 
-void msm9615_config_i2s_spare_mux(u8 value, u8 i2s_intf)
+static void msm9615_config_i2s_spare_mux(u8 value, u8 i2s_intf)
 {
 	struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
+	u32 spare_shadow = 0x0000;
+	/* Make this variable global if both secondary and
+	 * primary needs to be supported. This is required
+	 * to retain bits in interace and set only specific
+	 * bits in the register. Also set Sec Intf bits.
+	 **/
 	if (i2s_intf == MSM_INTF_PRIM) {
 		/* Configure Primary SIF */
 	    spare_shadow = (spare_shadow & LPAIF_SPARE_MUX_CTL_PRI_MUX_SEL_BMSK
@@ -1307,7 +1318,8 @@
 	    spare_shadow = (spare_shadow & LPAIF_SPARE_MUX_CTL_SEC_MUX_SEL_BMSK
 			   ) | (value << LPAIF_SPARE_MUX_CTL_SEC_MUX_SEL_SHFT);
 	}
-	iowrite32(spare_shadow, pintf->spare_virt_addr);
+	if (pintf->spare_virt_addr != NULL)
+		iowrite32(spare_shadow, pintf->spare_virt_addr);
 	/* Dont read SPARE register. Device crashes. */
 	pr_debug("%s( ): SPARE Reg =0x%x\n", __func__, spare_shadow);
 }
@@ -2214,6 +2226,9 @@
 	atomic_set(&msm9615_sec_auxpcm_ref, 0);
 	msm9x15_i2s_ctl.sif_virt_addr = ioremap(LPASS_SIF_MUX_ADDR, 4);
 	msm9x15_i2s_ctl.spare_virt_addr = ioremap(LPAIF_SPARE_ADDR, 4);
+	if (msm9x15_i2s_ctl.spare_virt_addr == NULL ||
+	    msm9x15_i2s_ctl.sif_virt_addr == NULL)
+		pr_err("%s: SIF or Spare ptr are NULL", __func__);
 	sif_virt_addr = ioremap(LPASS_SIF_MUX_ADDR, 4);
 	secpcm_portslc_virt_addr = ioremap(SEC_PCM_PORT_SLC_ADDR, 4);
 
diff --git a/sound/soc/msm/mpq8064.c b/sound/soc/msm/mpq8064.c
index 4ecd8df..6685ce5 100644
--- a/sound/soc/msm/mpq8064.c
+++ b/sound/soc/msm/mpq8064.c
@@ -850,7 +850,7 @@
 static void msm_mi2s_shutdown(struct snd_pcm_substream *substream)
 {
 	if (mi2s_bit_clk) {
-		clk_disable(mi2s_bit_clk);
+		clk_disable_unprepare(mi2s_bit_clk);
 		clk_put(mi2s_bit_clk);
 		mi2s_bit_clk = NULL;
 	}
@@ -892,7 +892,7 @@
 	if (IS_ERR(mi2s_bit_clk))
 		return PTR_ERR(mi2s_bit_clk);
 	clk_set_rate(mi2s_bit_clk, 0);
-	ret = clk_enable(mi2s_bit_clk);
+	ret = clk_prepare_enable(mi2s_bit_clk);
 	if (IS_ERR_VALUE(ret)) {
 		pr_err("Unable to enable mi2s_bit_clk\n");
 		clk_put(mi2s_bit_clk);
@@ -1257,8 +1257,8 @@
 		.cpu_dai_name	= "MultiMedia5",
 		.platform_name  = "msm-multi-ch-pcm-dsp",
 		.dynamic = 1,
-		.trigger = {SND_SOC_DPCM_TRIGGER_BESPOKE,
-					SND_SOC_DPCM_TRIGGER_BESPOKE},
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+					SND_SOC_DPCM_TRIGGER_POST},
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 		.ignore_suspend = 1,
@@ -1271,8 +1271,8 @@
 		.cpu_dai_name	= "MultiMedia6",
 		.platform_name  = "msm-multi-ch-pcm-dsp",
 		.dynamic = 1,
-		.trigger = {SND_SOC_DPCM_TRIGGER_BESPOKE,
-					SND_SOC_DPCM_TRIGGER_BESPOKE},
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+					SND_SOC_DPCM_TRIGGER_POST},
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 		.ignore_suspend = 1,
@@ -1285,8 +1285,8 @@
 		.cpu_dai_name   = "MultiMedia7",
 		.platform_name  = "msm-compr-dsp",
 		.dynamic = 1,
-		.trigger = {SND_SOC_DPCM_TRIGGER_BESPOKE,
-					SND_SOC_DPCM_TRIGGER_BESPOKE},
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+					SND_SOC_DPCM_TRIGGER_POST},
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 		.ignore_suspend = 1,
@@ -1299,8 +1299,8 @@
 		.cpu_dai_name   = "MultiMedia8",
 		.platform_name  = "msm-compr-dsp",
 		.dynamic = 1,
-		.trigger = {SND_SOC_DPCM_TRIGGER_BESPOKE,
-					SND_SOC_DPCM_TRIGGER_BESPOKE},
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+					SND_SOC_DPCM_TRIGGER_POST},
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 		.ignore_suspend = 1,
diff --git a/sound/soc/msm/msm-compr-q6.c b/sound/soc/msm/msm-compr-q6.c
index d4045e1..c894921 100644
--- a/sound/soc/msm/msm-compr-q6.c
+++ b/sound/soc/msm/msm-compr-q6.c
@@ -34,6 +34,13 @@
 #include "msm-compr-q6.h"
 #include "msm-pcm-routing.h"
 
+#define COMPRE_CAPTURE_NUM_PERIODS	16
+/* Allocate the worst case frame size for compressed audio */
+#define COMPRE_CAPTURE_HEADER_SIZE	(sizeof(struct snd_compr_audio_info))
+#define COMPRE_CAPTURE_MAX_FRAME_SIZE	(6144)
+#define COMPRE_CAPTURE_PERIOD_SIZE	(COMPRE_CAPTURE_MAX_FRAME_SIZE + \
+					 COMPRE_CAPTURE_HEADER_SIZE)
+
 struct snd_msm {
 	struct msm_audio *prtd;
 	unsigned volume;
@@ -42,6 +49,27 @@
 
 static struct audio_locks the_locks;
 
+static struct snd_pcm_hardware msm_compr_hardware_capture = {
+	.info =		 (SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_INTERLEAVED |
+				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+	.formats =	      SNDRV_PCM_FMTBIT_S16_LE,
+	.rates =		SNDRV_PCM_RATE_8000_48000,
+	.rate_min =	     8000,
+	.rate_max =	     48000,
+	.channels_min =	 1,
+	.channels_max =	 8,
+	.buffer_bytes_max =
+		COMPRE_CAPTURE_PERIOD_SIZE * COMPRE_CAPTURE_NUM_PERIODS ,
+	.period_bytes_min =	COMPRE_CAPTURE_PERIOD_SIZE,
+	.period_bytes_max = COMPRE_CAPTURE_PERIOD_SIZE,
+	.periods_min =	  COMPRE_CAPTURE_NUM_PERIODS,
+	.periods_max =	  COMPRE_CAPTURE_NUM_PERIODS,
+	.fifo_size =	    0,
+};
+
 static struct snd_pcm_hardware msm_compr_hardware_playback = {
 	.info =		 (SNDRV_PCM_INFO_MMAP |
 				SNDRV_PCM_INFO_BLOCK_TRANSFER |
@@ -81,7 +109,9 @@
 	struct snd_pcm_substream *substream = prtd->substream;
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	struct audio_aio_write_param param;
+	struct audio_aio_read_param read_param;
 	struct audio_buffer *buf = NULL;
+	uint32_t *ptrmem = (uint32_t *)payload;
 	int i = 0;
 
 	pr_debug("%s opcode =%08x\n", __func__, opcode);
@@ -138,9 +168,53 @@
 		prtd->cmd_ack = 1;
 		wake_up(&the_locks.eos_wait);
 		break;
+	case ASM_DATA_EVENT_READ_DONE: {
+		pr_debug("ASM_DATA_EVENT_READ_DONE\n");
+		pr_debug("buf = %p, data = 0x%X, *data = %p,\n"
+			 "prtd->pcm_irq_pos = %d\n",
+				prtd->audio_client->port[OUT].buf,
+			 *(uint32_t *)prtd->audio_client->port[OUT].buf->data,
+				prtd->audio_client->port[OUT].buf->data,
+				prtd->pcm_irq_pos);
+
+		memcpy(prtd->audio_client->port[OUT].buf->data +
+			   prtd->pcm_irq_pos, (ptrmem + 2),
+			   COMPRE_CAPTURE_HEADER_SIZE);
+		pr_debug("buf = %p, updated data = 0x%X, *data = %p\n",
+				prtd->audio_client->port[OUT].buf,
+			*(uint32_t *)(prtd->audio_client->port[OUT].buf->data +
+				prtd->pcm_irq_pos),
+				prtd->audio_client->port[OUT].buf->data);
+		if (!atomic_read(&prtd->start))
+			break;
+		pr_debug("frame size=%d, buffer = 0x%X\n", ptrmem[2],
+				ptrmem[1]);
+		if (ptrmem[2] > COMPRE_CAPTURE_MAX_FRAME_SIZE) {
+			pr_err("Frame length exceeded the max length");
+			break;
+		}
+		buf = prtd->audio_client->port[OUT].buf;
+		pr_debug("pcm_irq_pos=%d, buf[0].phys = 0x%X\n",
+				prtd->pcm_irq_pos, (uint32_t)buf[0].phys);
+		read_param.len = prtd->pcm_count - COMPRE_CAPTURE_HEADER_SIZE ;
+		read_param.paddr = (unsigned long)(buf[0].phys) +
+			prtd->pcm_irq_pos + COMPRE_CAPTURE_HEADER_SIZE;
+		prtd->pcm_irq_pos += prtd->pcm_count;
+
+		if (atomic_read(&prtd->start))
+			snd_pcm_period_elapsed(substream);
+
+		q6asm_async_read(prtd->audio_client, &read_param);
+		break;
+	}
 	case APR_BASIC_RSP_RESULT: {
 		switch (payload[0]) {
 		case ASM_SESSION_CMD_RUN: {
+			if (substream->stream
+				!= SNDRV_PCM_STREAM_PLAYBACK) {
+				atomic_set(&prtd->start, 1);
+				break;
+			}
 			if (!atomic_read(&prtd->pending_buffer))
 				break;
 			pr_debug("%s:writing %d bytes"
@@ -230,12 +304,15 @@
 		pr_debug("compressd playback, no need to send"
 			" the decoder params\n");
 		break;
+	case SND_AUDIOCODEC_DTS_PASS_THROUGH:
+		pr_debug("compressd DTS playback,dont send the decoder params\n");
+		break;
 	case SND_AUDIOCODEC_WMA:
 		pr_debug("SND_AUDIOCODEC_WMA\n");
 		memset(&wma_cfg, 0x0, sizeof(struct asm_wma_cfg));
 		wma_cfg.format_tag = compr->info.codec_param.codec.format;
 		wma_cfg.ch_cfg = runtime->channels;
-		wma_cfg.sample_rate = runtime->rate;
+		wma_cfg.sample_rate = compr->info.codec_param.codec.sample_rate;
 		wma_cfg.avg_bytes_per_sec =
 			compr->info.codec_param.codec.bit_rate/8;
 		wma_cfg.block_align = compr->info.codec_param.codec.align;
@@ -255,7 +332,8 @@
 		memset(&wma_pro_cfg, 0x0, sizeof(struct asm_wmapro_cfg));
 		wma_pro_cfg.format_tag = compr->info.codec_param.codec.format;
 		wma_pro_cfg.ch_cfg = compr->info.codec_param.codec.ch_in;
-		wma_pro_cfg.sample_rate = runtime->rate;
+		wma_pro_cfg.sample_rate =
+			compr->info.codec_param.codec.sample_rate;
 		wma_pro_cfg.avg_bytes_per_sec =
 			compr->info.codec_param.codec.bit_rate/8;
 		wma_pro_cfg.block_align = compr->info.codec_param.codec.align;
@@ -266,11 +344,25 @@
 			compr->info.codec_param.codec.options.wma.channelmask;
 		wma_pro_cfg.encode_opt =
 			compr->info.codec_param.codec.options.wma.encodeopt;
+		wma_pro_cfg.adv_encode_opt =
+			compr->info.codec_param.codec.options.wma.encodeopt1;
+		wma_pro_cfg.adv_encode_opt2 =
+			compr->info.codec_param.codec.options.wma.encodeopt2;
 		ret = q6asm_media_format_block_wmapro(prtd->audio_client,
 				&wma_pro_cfg);
 		if (ret < 0)
 			pr_err("%s: CMD Format block failed\n", __func__);
 		break;
+	case SND_AUDIOCODEC_DTS:
+	case SND_AUDIOCODEC_DTS_LBR:
+		pr_debug("SND_AUDIOCODEC_DTS\n");
+		ret = q6asm_media_format_block(prtd->audio_client,
+				compr->codec);
+		if (ret < 0) {
+			pr_err("%s: CMD Format block failed\n", __func__);
+			return ret;
+		}
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -281,6 +373,44 @@
 	return 0;
 }
 
+static int msm_compr_capture_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct compr_audio *compr = runtime->private_data;
+	struct msm_audio *prtd = &compr->prtd;
+	struct audio_buffer *buf = prtd->audio_client->port[OUT].buf;
+	struct audio_aio_read_param read_param;
+	int ret = 0;
+	int i;
+	prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
+	prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+	prtd->pcm_irq_pos = 0;
+
+	/* rate and channels are sent to audio driver */
+	prtd->samp_rate = runtime->rate;
+	prtd->channel_mode = runtime->channels;
+
+	if (prtd->enabled)
+		return ret;
+	read_param.len = prtd->pcm_count - COMPRE_CAPTURE_HEADER_SIZE;
+	pr_debug("%s: Samp_rate = %d, Channel = %d, pcm_size = %d,\n"
+			 "pcm_count = %d, periods = %d\n",
+			 __func__, prtd->samp_rate, prtd->channel_mode,
+			 prtd->pcm_size, prtd->pcm_count, runtime->periods);
+
+	for (i = 0; i < runtime->periods; i++) {
+		read_param.uid = i;
+		read_param.paddr = ((unsigned long)(buf[i].phys) +
+					COMPRE_CAPTURE_HEADER_SIZE);
+		q6asm_async_read(prtd->audio_client, &read_param);
+	}
+	prtd->periods = runtime->periods;
+
+	prtd->enabled = 1;
+
+	return ret;
+}
+
 static int msm_compr_trigger(struct snd_pcm_substream *substream, int cmd)
 {
 	int ret = 0;
@@ -293,8 +423,17 @@
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
 		prtd->pcm_irq_pos = 0;
-		if (compr->info.codec_param.codec.id ==
-				SND_AUDIOCODEC_AC3_PASS_THROUGH) {
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+			if (compr->info.codec_param.codec.id ==
+				SND_AUDIOCODEC_AC3_PASS_THROUGH ||
+					compr->info.codec_param.codec.id ==
+					SND_AUDIOCODEC_DTS_PASS_THROUGH) {
+				msm_pcm_routing_reg_psthr_stream(
+					soc_prtd->dai_link->be_id,
+					prtd->session_id, substream->stream,
+					1);
+			}
+		} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
 			msm_pcm_routing_reg_psthr_stream(
 				soc_prtd->dai_link->be_id,
 				prtd->session_id, substream->stream, 1);
@@ -307,11 +446,19 @@
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
 		pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
-		if (compr->info.codec_param.codec.id ==
-				SND_AUDIOCODEC_AC3_PASS_THROUGH) {
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+			if (compr->info.codec_param.codec.id ==
+					SND_AUDIOCODEC_AC3_PASS_THROUGH) {
+				msm_pcm_routing_reg_psthr_stream(
+					soc_prtd->dai_link->be_id,
+					prtd->session_id, substream->stream,
+					0);
+			}
+		} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
 			msm_pcm_routing_reg_psthr_stream(
 				soc_prtd->dai_link->be_id,
-				prtd->session_id, substream->stream, 0);
+				prtd->session_id, substream->stream,
+				0);
 		}
 		atomic_set(&prtd->start, 0);
 		break;
@@ -344,6 +491,9 @@
 	compr->info.compr_cap.codecs[2] = SND_AUDIOCODEC_AC3_PASS_THROUGH;
 	compr->info.compr_cap.codecs[3] = SND_AUDIOCODEC_WMA;
 	compr->info.compr_cap.codecs[4] = SND_AUDIOCODEC_WMA_PRO;
+	compr->info.compr_cap.codecs[5] = SND_AUDIOCODEC_DTS;
+	compr->info.compr_cap.codecs[6] = SND_AUDIOCODEC_DTS_LBR;
+	compr->info.compr_cap.codecs[7] = SND_AUDIOCODEC_DTS_PASS_THROUGH;
 	/* Add new codecs here */
 }
 
@@ -365,10 +515,6 @@
 		.rampingcurve = SOFT_VOLUME_CURVE_LINEAR,
 	};
 
-	/* Capture path */
-	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-		return -EINVAL;
-
 	pr_debug("%s\n", __func__);
 	compr = kzalloc(sizeof(struct compr_audio), GFP_KERNEL);
 	if (compr == NULL) {
@@ -384,13 +530,18 @@
 		kfree(prtd);
 		return -ENOMEM;
 	}
-	runtime->hw = msm_compr_hardware_playback;
 
 	pr_info("%s: session ID %d\n", __func__, prtd->audio_client->session);
 
 	prtd->session_id = prtd->audio_client->session;
 
-	prtd->cmd_ack = 1;
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		runtime->hw = msm_compr_hardware_playback;
+		prtd->cmd_ack = 1;
+	} else {
+		runtime->hw = msm_compr_hardware_capture;
+	}
+
 
 	ret = snd_pcm_hw_constraint_list(runtime, 0,
 			SNDRV_PCM_HW_PARAM_RATE,
@@ -405,7 +556,8 @@
 
 	prtd->dsp_cnt = 0;
 	atomic_set(&prtd->pending_buffer, 1);
-	compr->codec = FORMAT_MP3;
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		compr->codec = FORMAT_MP3;
 	populate_codec_list(compr, runtime);
 	runtime->private_data = compr;
 	compressed_audio.prtd =  &compr->prtd;
@@ -468,6 +620,27 @@
 	return 0;
 }
 
+static int msm_compr_capture_close(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+	struct compr_audio *compr = runtime->private_data;
+	struct msm_audio *prtd = &compr->prtd;
+	int dir = OUT;
+
+	pr_debug("%s\n", __func__);
+	atomic_set(&prtd->pending_buffer, 0);
+	q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+	q6asm_audio_client_buf_free_contiguous(dir,
+				prtd->audio_client);
+	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
+				SNDRV_PCM_STREAM_CAPTURE);
+	q6asm_audio_client_free(prtd->audio_client);
+	kfree(prtd);
+
+	return 0;
+}
+
 static int msm_compr_close(struct snd_pcm_substream *substream)
 {
 	int ret = 0;
@@ -475,7 +648,7 @@
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 		ret = msm_compr_playback_close(substream);
 	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-		ret = EINVAL;
+		ret = msm_compr_capture_close(substream);
 	return ret;
 }
 static int msm_compr_prepare(struct snd_pcm_substream *substream)
@@ -485,7 +658,7 @@
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 		ret = msm_compr_playback_prepare(substream);
 	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-		ret = EINVAL;
+		ret = msm_compr_capture_prepare(substream);
 	return ret;
 }
 
@@ -499,7 +672,10 @@
 	if (prtd->pcm_irq_pos >= prtd->pcm_size)
 		prtd->pcm_irq_pos = 0;
 
-	pr_debug("pcm_irq_pos = %d\n", prtd->pcm_irq_pos);
+	pr_debug("%s: pcm_irq_pos = %d, pcm_size = %d, sample_bits = %d,\n"
+			 "frame_bits = %d\n", __func__, prtd->pcm_irq_pos,
+			 prtd->pcm_size, runtime->sample_bits,
+			 runtime->frame_bits);
 	return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
 }
 
@@ -541,28 +717,45 @@
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 		dir = IN;
 	else
-		return -EINVAL;
+		dir = OUT;
 
-	switch (compr->info.codec_param.codec.id) {
-	case SND_AUDIOCODEC_AC3_PASS_THROUGH:
-		ret = q6asm_open_write_compressed(prtd->audio_client,
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		switch (compr->info.codec_param.codec.id) {
+		case SND_AUDIOCODEC_AC3_PASS_THROUGH:
+		case SND_AUDIOCODEC_DTS_PASS_THROUGH:
+			ret = q6asm_open_write_compressed(prtd->audio_client,
 					compr->codec);
+
+			if (ret < 0) {
+				pr_err("%s: Session out open failed\n",
+					__func__);
+				return -ENOMEM;
+			}
+			break;
+		default:
+			ret = q6asm_open_write(prtd->audio_client,
+					compr->codec);
+			if (ret < 0) {
+				pr_err("%s: Session out open failed\n",
+					__func__);
+				return -ENOMEM;
+			}
+			msm_pcm_routing_reg_phy_stream(
+				soc_prtd->dai_link->be_id,
+				prtd->session_id, substream->stream);
+
+			break;
+		}
+	} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		ret = q6asm_open_read_compressed(prtd->audio_client,
+					compr->codec);
+
 		if (ret < 0) {
 			pr_err("%s: compressed Session out open failed\n",
-					__func__);
+				__func__);
 			return -ENOMEM;
 		}
-		break;
-	default:
-		ret = q6asm_open_write(prtd->audio_client, compr->codec);
-		if (ret < 0) {
-			pr_err("%s: Session out open failed\n", __func__);
-			return -ENOMEM;
-		}
-		msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
-			prtd->session_id, substream->stream);
-
-		break;
 	}
 	ret = q6asm_set_io_mode(prtd->audio_client, ASYNC_IO_MODE);
 	if (ret < 0) {
@@ -581,13 +774,17 @@
 	}
 	buf = prtd->audio_client->port[dir].buf;
 
-	pr_debug("%s:buf = %p\n", __func__, buf);
 	dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
 	dma_buf->dev.dev = substream->pcm->card->dev;
 	dma_buf->private_data = NULL;
 	dma_buf->area = buf[0].data;
 	dma_buf->addr =  buf[0].phys;
 	dma_buf->bytes = runtime->hw.buffer_bytes_max;
+
+	pr_debug("%s: buf[%p]dma_buf->area[%p]dma_buf->addr[%p]\n"
+		 "dma_buf->bytes[%d]\n", __func__,
+		 (void *)buf, (void *)dma_buf->area,
+		 (void *)dma_buf->addr, dma_buf->bytes);
 	if (!dma_buf->area)
 		return -ENOMEM;
 
@@ -669,6 +866,18 @@
 			pr_debug("SND_AUDIOCODEC_WMA_PRO\n");
 			compr->codec = FORMAT_WMA_V10PRO;
 			break;
+		case SND_AUDIOCODEC_DTS_PASS_THROUGH:
+			pr_debug("SND_AUDIOCODEC_DTS_PASS_THROUGH\n");
+			compr->codec = FORMAT_DTS;
+			break;
+		case SND_AUDIOCODEC_DTS:
+			pr_debug("SND_AUDIOCODEC_DTS\n");
+			compr->codec = FORMAT_DTS;
+			break;
+		case SND_AUDIOCODEC_DTS_LBR:
+			pr_debug("SND_AUDIOCODEC_DTS\n");
+			compr->codec = FORMAT_DTS_LBR;
+			break;
 		default:
 			pr_debug("FORMAT_LINEAR_PCM\n");
 			compr->codec = FORMAT_LINEAR_PCM;
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 56e83d5..4cd4a2c 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
+#include <linux/of_device.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
@@ -92,7 +93,7 @@
 					SNDRV_PCM_RATE_KNOT),
 			.formats = SNDRV_PCM_FMTBIT_S16_LE,
 			.channels_min = 1,
-			.channels_max = 2,
+			.channels_max = 8,
 			.rate_min =     8000,
 			.rate_max =	48000,
 		},
@@ -176,12 +177,24 @@
 			.rate_min =	8000,
 			.rate_max = 48000,
 		},
+		.capture = {
+			.stream_name = "MultiMedia4 Capture",
+			.aif_name = "MM_UL4",
+			.rates = (SNDRV_PCM_RATE_8000_48000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =	48000,
+		},
 		.ops = &msm_fe_Multimedia_dai_ops,
 		.name = "MultiMedia4",
 	},
 	{
 		.playback = {
 			.stream_name = "MultiMedia5 Playback",
+			.aif_name = "MM_DL5",
 			.rates = (SNDRV_PCM_RATE_8000_48000 |
 					SNDRV_PCM_RATE_KNOT),
 			.formats = SNDRV_PCM_FMTBIT_S16_LE,
@@ -196,6 +209,7 @@
 	{
 		.playback = {
 			.stream_name = "MultiMedia6 Playback",
+			.aif_name = "MM_DL6",
 			.rates = (SNDRV_PCM_RATE_8000_48000 |
 					SNDRV_PCM_RATE_KNOT),
 			.formats = SNDRV_PCM_FMTBIT_S16_LE,
@@ -210,6 +224,7 @@
 	{
 		.playback = {
 			.stream_name = "MultiMedia7 Playback",
+			.aif_name = "MM_DL7",
 			.rates = (SNDRV_PCM_RATE_8000_48000 |
 					SNDRV_PCM_RATE_KNOT),
 			.formats = SNDRV_PCM_FMTBIT_S16_LE,
@@ -224,6 +239,7 @@
 	{
 		.playback = {
 			.stream_name = "MultiMedia8 Playback",
+			.aif_name = "MM_DL8",
 			.rates = (SNDRV_PCM_RATE_8000_48000 |
 					SNDRV_PCM_RATE_KNOT),
 			.formats = SNDRV_PCM_FMTBIT_S16_LE,
@@ -454,8 +470,11 @@
 
 static __devinit int msm_fe_dai_dev_probe(struct platform_device *pdev)
 {
+	if (pdev->dev.of_node)
+		dev_set_name(&pdev->dev, "%s", "msm-dai-fe");
+
 	dev_dbg(&pdev->dev, "%s: dev name %s\n", __func__,
-	dev_name(&pdev->dev));
+		dev_name(&pdev->dev));
 	return snd_soc_register_dais(&pdev->dev, msm_fe_dais,
 		ARRAY_SIZE(msm_fe_dais));
 }
@@ -466,12 +485,18 @@
 	return 0;
 }
 
+static const struct of_device_id msm_dai_fe_dt_match[] = {
+	{.compatible = "qcom,msm-dai-fe"},
+	{}
+};
+
 static struct platform_driver msm_fe_dai_driver = {
 	.probe  = msm_fe_dai_dev_probe,
 	.remove = msm_fe_dai_dev_remove,
 	.driver = {
 		.name = "msm-dai-fe",
 		.owner = THIS_MODULE,
+		.of_match_table = msm_dai_fe_dt_match,
 	},
 };
 
diff --git a/sound/soc/msm/msm-dai-q6.c b/sound/soc/msm/msm-dai-q6.c
index b3e5120..147316e 100644
--- a/sound/soc/msm/msm-dai-q6.c
+++ b/sound/soc/msm/msm-dai-q6.c
@@ -40,9 +40,14 @@
 	union afe_port_config port_config;
 };
 
+struct msm_dai_q6_mi2s_dai_config {
+	u16 pdata_mi2s_lines;
+	struct msm_dai_q6_dai_data mi2s_dai_data;
+};
+
 struct msm_dai_q6_mi2s_dai_data {
-	struct msm_dai_q6_dai_data tx_dai;
-	struct msm_dai_q6_dai_data rx_dai;
+	struct msm_dai_q6_mi2s_dai_config tx_dai;
+	struct msm_dai_q6_mi2s_dai_config rx_dai;
 	struct snd_pcm_hw_constraint_list rate_constraint;
 	struct snd_pcm_hw_constraint_list bitwidth_constraint;
 };
@@ -86,8 +91,8 @@
 static const char *mi2s_format[] = {
 	"LPCM",
 	"Compr",
-	"60958-LPCM",
-	"60958-Compr"};
+	"LPCM-60958",
+	"Compr-60958"};
 
 static const struct soc_enum mi2s_config_enum[] = {
 	SOC_ENUM_SINGLE_EXT(4, mi2s_format),
@@ -143,21 +148,63 @@
 {
 	struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
 		dev_get_drvdata(dai->dev);
-	struct msm_dai_q6_dai_data *dai_data =
+	struct msm_dai_q6_mi2s_dai_config *mi2s_dai_config =
 		(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
 		&mi2s_dai_data->rx_dai : &mi2s_dai_data->tx_dai);
+	struct msm_dai_q6_dai_data *dai_data = &mi2s_dai_config->mi2s_dai_data;
 
 	dai_data->channels = params_channels(params);
 	switch (dai_data->channels) {
-	case 2:
-		dai_data->port_config.mi2s.channel = MSM_AFE_STEREO;
+	case 8:
+	case 7:
+		if (mi2s_dai_config->pdata_mi2s_lines < AFE_I2S_8CHS)
+			goto error_invalid_data;
+		dai_data->port_config.mi2s.line = AFE_I2S_8CHS;
 		break;
+	case 6:
+	case 5:
+		if (mi2s_dai_config->pdata_mi2s_lines < AFE_I2S_6CHS)
+			goto error_invalid_data;
+		dai_data->port_config.mi2s.line = AFE_I2S_6CHS;
+		break;
+	case 4:
+	case 3:
+		if (mi2s_dai_config->pdata_mi2s_lines < AFE_I2S_QUAD01)
+			goto error_invalid_data;
+		if (mi2s_dai_config->pdata_mi2s_lines == AFE_I2S_QUAD23)
+			dai_data->port_config.mi2s.line =
+				mi2s_dai_config->pdata_mi2s_lines;
+		else
+			dai_data->port_config.mi2s.line = AFE_I2S_QUAD01;
+		break;
+	case 2:
 	case 1:
-		dai_data->port_config.mi2s.channel = MSM_AFE_MONO;
+		if (mi2s_dai_config->pdata_mi2s_lines < AFE_I2S_SD0)
+			goto error_invalid_data;
+		switch (mi2s_dai_config->pdata_mi2s_lines) {
+		case AFE_I2S_SD0:
+		case AFE_I2S_SD1:
+		case AFE_I2S_SD2:
+		case AFE_I2S_SD3:
+			dai_data->port_config.mi2s.line =
+				mi2s_dai_config->pdata_mi2s_lines;
+			break;
+		case AFE_I2S_QUAD01:
+		case AFE_I2S_6CHS:
+		case AFE_I2S_8CHS:
+			dai_data->port_config.mi2s.line = AFE_I2S_SD0;
+			break;
+		case AFE_I2S_QUAD23:
+			dai_data->port_config.mi2s.line = AFE_I2S_SD2;
+			break;
+		}
+		if (dai_data->channels == 2)
+			dai_data->port_config.mi2s.channel = MSM_AFE_STEREO;
+		else
+			dai_data->port_config.mi2s.channel = MSM_AFE_MONO;
 		break;
 	default:
-		pr_warn("greater than stereo has not been validated");
-		break;
+		goto error_invalid_data;
 	}
 	dai_data->rate = params_rate(params);
 	dai_data->port_config.mi2s.bitwidth = 16;
@@ -166,7 +213,14 @@
 		mi2s_dai_data->rate_constraint.list = &dai_data->rate;
 		mi2s_dai_data->bitwidth_constraint.list = &dai_data->bitwidth;
 	}
+
+	pr_debug("%s: dai_data->channels = %d, line = %d\n", __func__,
+			dai_data->channels, dai_data->port_config.mi2s.line);
 	return 0;
+error_invalid_data:
+	pr_err("%s: dai_data->channels = %d, line = %d\n", __func__,
+			 dai_data->channels, dai_data->port_config.mi2s.line);
+	return -EINVAL;
 }
 
 static int msm_dai_q6_mi2s_get_lineconfig(u16 sd_lines, u16 *config_ptr,
@@ -276,7 +330,9 @@
 	}
 
 	if (ch_cnt) {
-		dai_data->rx_dai.port_config.mi2s.line = sdline_config;
+		dai_data->rx_dai.mi2s_dai_data.port_config.mi2s.line =
+			sdline_config;
+		dai_data->rx_dai.pdata_mi2s_lines = sdline_config;
 		dai_driver->playback.channels_min = 1;
 		dai_driver->playback.channels_max = ch_cnt << 1;
 	} else {
@@ -292,7 +348,9 @@
 	}
 
 	if (ch_cnt) {
-		dai_data->tx_dai.port_config.mi2s.line = sdline_config;
+		dai_data->tx_dai.mi2s_dai_data.port_config.mi2s.line =
+			sdline_config;
+		dai_data->tx_dai.pdata_mi2s_lines = sdline_config;
 		dai_driver->capture.channels_min = 1;
 		dai_driver->capture.channels_max = ch_cnt << 1;
 	} else {
@@ -301,8 +359,8 @@
 	}
 
 	dev_info(&pdev->dev, "%s: playback sdline %x capture sdline %x\n",
-		 __func__, dai_data->rx_dai.port_config.mi2s.line,
-		 dai_data->tx_dai.port_config.mi2s.line);
+		 __func__, dai_data->rx_dai.pdata_mi2s_lines,
+		 dai_data->tx_dai.pdata_mi2s_lines);
 	dev_info(&pdev->dev, "%s: playback ch_max %d capture ch_mx %d\n",
 		 __func__, dai_driver->playback.channels_max,
 		 dai_driver->capture.channels_max);
@@ -315,8 +373,10 @@
 	struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
 	dev_get_drvdata(dai->dev);
 
-	if (test_bit(STATUS_PORT_STARTED, mi2s_dai_data->rx_dai.status_mask) ||
-	    test_bit(STATUS_PORT_STARTED, mi2s_dai_data->rx_dai.status_mask)) {
+	if (test_bit(STATUS_PORT_STARTED,
+		mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask) ||
+	    test_bit(STATUS_PORT_STARTED,
+		mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask)) {
 		dev_err(dai->dev, "%s: err chg i2s mode while dai running",
 			__func__);
 		return -EPERM;
@@ -324,12 +384,12 @@
 
 	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
 	case SND_SOC_DAIFMT_CBS_CFS:
-		mi2s_dai_data->rx_dai.port_config.mi2s.ws = 1;
-		mi2s_dai_data->tx_dai.port_config.mi2s.ws = 1;
+		mi2s_dai_data->rx_dai.mi2s_dai_data.port_config.mi2s.ws = 1;
+		mi2s_dai_data->tx_dai.mi2s_dai_data.port_config.mi2s.ws = 1;
 		break;
 	case SND_SOC_DAIFMT_CBM_CFM:
-		mi2s_dai_data->rx_dai.port_config.mi2s.ws = 0;
-		mi2s_dai_data->tx_dai.port_config.mi2s.ws = 0;
+		mi2s_dai_data->rx_dai.mi2s_dai_data.port_config.mi2s.ws = 0;
+		mi2s_dai_data->tx_dai.mi2s_dai_data.port_config.mi2s.ws = 0;
 		break;
 	default:
 		return -EINVAL;
@@ -345,7 +405,8 @@
 		dev_get_drvdata(dai->dev);
 	struct msm_dai_q6_dai_data *dai_data =
 		(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
-		&mi2s_dai_data->rx_dai : &mi2s_dai_data->tx_dai);
+		 &mi2s_dai_data->rx_dai.mi2s_dai_data :
+		 &mi2s_dai_data->tx_dai.mi2s_dai_data);
 	int rc = 0;
 
 	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
@@ -364,7 +425,8 @@
 		dev_get_drvdata(dai->dev);
 	struct msm_dai_q6_dai_data *dai_data =
 		(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
-		&mi2s_dai_data->rx_dai : &mi2s_dai_data->tx_dai);
+		 &mi2s_dai_data->rx_dai.mi2s_dai_data :
+		 &mi2s_dai_data->tx_dai.mi2s_dai_data);
 	u16 port_id = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
 		MI2S_RX : MI2S_TX);
 	int rc = 0;
@@ -406,7 +468,8 @@
 		dev_get_drvdata(dai->dev);
 	struct msm_dai_q6_dai_data *dai_data =
 		(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
-		&mi2s_dai_data->rx_dai : &mi2s_dai_data->tx_dai);
+		 &mi2s_dai_data->rx_dai.mi2s_dai_data :
+		 &mi2s_dai_data->tx_dai.mi2s_dai_data);
 	u16 port_id = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
 		MI2S_RX : MI2S_TX);
 	int rc = 0;
@@ -418,8 +481,10 @@
 		clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
 	}
 
-	if (!test_bit(STATUS_PORT_STARTED, mi2s_dai_data->rx_dai.status_mask) &&
-	    !test_bit(STATUS_PORT_STARTED, mi2s_dai_data->rx_dai.status_mask)) {
+	if (!test_bit(STATUS_PORT_STARTED,
+			mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask) &&
+	    !test_bit(STATUS_PORT_STARTED,
+			mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask)) {
 		mi2s_dai_data->rate_constraint.list = NULL;
 		mi2s_dai_data->bitwidth_constraint.list = NULL;
 	}
@@ -1268,9 +1333,9 @@
 	struct snd_kcontrol *kcontrol = NULL;
 	int rc = 0;
 
-	if (mi2s_dai_data->rx_dai.port_config.mi2s.line) {
+	if (mi2s_dai_data->rx_dai.mi2s_dai_data.port_config.mi2s.line) {
 		kcontrol = snd_ctl_new1(&mi2s_config_controls[0],
-					&mi2s_dai_data->rx_dai);
+					&mi2s_dai_data->rx_dai.mi2s_dai_data);
 		rc = snd_ctl_add(dai->card->snd_card, kcontrol);
 
 		if (IS_ERR_VALUE(rc)) {
@@ -1279,10 +1344,10 @@
 		}
 	}
 
-	if (mi2s_dai_data->tx_dai.port_config.mi2s.line) {
+	if (mi2s_dai_data->tx_dai.mi2s_dai_data.port_config.mi2s.line) {
 		rc = snd_ctl_add(dai->card->snd_card,
-				 snd_ctl_new1(&mi2s_config_controls[2],
-					      &mi2s_dai_data->tx_dai));
+				snd_ctl_new1(&mi2s_config_controls[2],
+				&mi2s_dai_data->tx_dai.mi2s_dai_data));
 
 		if (IS_ERR_VALUE(rc)) {
 			if (kcontrol)
@@ -1302,19 +1367,21 @@
 	int rc;
 
 	/* If AFE port is still up, close it */
-	if (test_bit(STATUS_PORT_STARTED, mi2s_dai_data->rx_dai.status_mask)) {
+	if (test_bit(STATUS_PORT_STARTED,
+			mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask)) {
 		rc = afe_close(MI2S_RX); /* can block */
 		if (IS_ERR_VALUE(rc))
 			dev_err(dai->dev, "fail to close MI2S_RX port\n");
 		clear_bit(STATUS_PORT_STARTED,
-			  mi2s_dai_data->rx_dai.status_mask);
+			  mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask);
 	}
-	if (test_bit(STATUS_PORT_STARTED, mi2s_dai_data->tx_dai.status_mask)) {
+	if (test_bit(STATUS_PORT_STARTED,
+			mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask)) {
 		rc = afe_close(MI2S_TX); /* can block */
 		if (IS_ERR_VALUE(rc))
 			dev_err(dai->dev, "fail to close MI2S_TX port\n");
 		clear_bit(STATUS_PORT_STARTED,
-			  mi2s_dai_data->tx_dai.status_mask);
+			  mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask);
 	}
 	kfree(mi2s_dai_data);
 	snd_soc_unregister_dai(dai->dev);
@@ -1966,6 +2033,8 @@
 	return 0;
 
 err_pdata:
+
+	dev_err(&pdev->dev, "fail to msm_dai_q6_mi2s_dev_probe\n");
 	kfree(dai_data);
 rtn:
 	return rc;
diff --git a/sound/soc/msm/msm-multi-ch-pcm-q6.c b/sound/soc/msm/msm-multi-ch-pcm-q6.c
index 734d34f..ef58dd1 100644
--- a/sound/soc/msm/msm-multi-ch-pcm-q6.c
+++ b/sound/soc/msm/msm-multi-ch-pcm-q6.c
@@ -49,7 +49,8 @@
 #define PLAYBACK_MAX_PERIOD_SIZE	4032
 #define PLAYBACK_MIN_PERIOD_SIZE        256
 #define CAPTURE_NUM_PERIODS		16
-#define CAPTURE_PERIOD_SIZE		320
+#define CAPTURE_MIN_PERIOD_SIZE		320
+#define CAPTURE_MAX_PERIOD_SIZE		5376
 
 static struct snd_pcm_hardware msm_pcm_hardware_capture = {
 	.info =                 (SNDRV_PCM_INFO_MMAP |
@@ -62,10 +63,10 @@
 	.rate_min =             8000,
 	.rate_max =             48000,
 	.channels_min =         1,
-	.channels_max =         2,
-	.buffer_bytes_max =     CAPTURE_NUM_PERIODS * CAPTURE_PERIOD_SIZE,
-	.period_bytes_min =	CAPTURE_PERIOD_SIZE,
-	.period_bytes_max =     CAPTURE_PERIOD_SIZE,
+	.channels_max =         8,
+	.buffer_bytes_max =     CAPTURE_NUM_PERIODS * CAPTURE_MAX_PERIOD_SIZE,
+	.period_bytes_min =	CAPTURE_MIN_PERIOD_SIZE,
+	.period_bytes_max =     CAPTURE_MAX_PERIOD_SIZE,
 	.periods_min =          CAPTURE_NUM_PERIODS,
 	.periods_max =          CAPTURE_NUM_PERIODS,
 	.fifo_size =            0,
@@ -390,6 +391,17 @@
 		}
 	}
 
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		ret = snd_pcm_hw_constraint_minmax(runtime,
+			SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+			CAPTURE_NUM_PERIODS * CAPTURE_MIN_PERIOD_SIZE,
+			CAPTURE_NUM_PERIODS * CAPTURE_MAX_PERIOD_SIZE);
+		if (ret < 0) {
+			pr_err("constraint for buffer bytes min max ret = %d\n",
+									ret);
+		}
+	}
+
 	prtd->dsp_cnt = 0;
 	runtime->private_data = prtd;
 	pr_debug("substream->pcm->device = %d\n", substream->pcm->device);
@@ -695,21 +707,14 @@
 	else
 		dir = OUT;
 
-	if (dir == OUT) {
-		ret = q6asm_audio_client_buf_alloc_contiguous(dir,
-			prtd->audio_client,
-			runtime->hw.period_bytes_min,
-			runtime->hw.periods_max);
-	} else {
-		/*
-		 *TODO : Need to Add Async IO changes. All period
-		 * size might not be supported.
-		 */
-		ret = q6asm_audio_client_buf_alloc_contiguous(dir,
-			prtd->audio_client,
-			(params_buffer_bytes(params) / params_periods(params)),
-			params_periods(params));
-	}
+	/*
+	 *TODO : Need to Add Async IO changes. All period
+	 * size might not be supported.
+	 */
+	ret = q6asm_audio_client_buf_alloc_contiguous(dir,
+		prtd->audio_client,
+		(params_buffer_bytes(params) / params_periods(params)),
+		params_periods(params));
 
 	if (ret < 0) {
 		pr_err("Audio Start: Buffer Allocation failed rc = %d\n", ret);
@@ -723,10 +728,7 @@
 	dma_buf->private_data = NULL;
 	dma_buf->area = buf[0].data;
 	dma_buf->addr =  buf[0].phys;
-	if (dir == OUT)
-		dma_buf->bytes = runtime->hw.buffer_bytes_max;
-	else
-		dma_buf->bytes = params_buffer_bytes(params);
+	dma_buf->bytes = params_buffer_bytes(params);
 	if (!dma_buf->area)
 		return -ENOMEM;
 
diff --git a/sound/soc/msm/msm-pcm-hostless.c b/sound/soc/msm/msm-pcm-hostless.c
index c61511d..c9b23d0 100644
--- a/sound/soc/msm/msm-pcm-hostless.c
+++ b/sound/soc/msm/msm-pcm-hostless.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/of_device.h>
 #include <sound/core.h>
 #include <sound/soc.h>
 #include <sound/pcm.h>
@@ -25,6 +26,9 @@
 
 static __devinit int msm_pcm_hostless_probe(struct platform_device *pdev)
 {
+	if (pdev->dev.of_node)
+		dev_set_name(&pdev->dev, "%s", "msm-pcm-hostless");
+
 	pr_debug("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
 	return snd_soc_register_platform(&pdev->dev,
 				   &msm_soc_hostless_platform);
@@ -36,10 +40,16 @@
 	return 0;
 }
 
+static const struct of_device_id msm_pcm_hostless_dt_match[] = {
+	{.compatible = "qcom,msm-pcm-hostless"},
+	{}
+};
+
 static struct platform_driver msm_pcm_hostless_driver = {
 	.driver = {
 		.name = "msm-pcm-hostless",
 		.owner = THIS_MODULE,
+		.of_match_table = msm_pcm_hostless_dt_match,
 	},
 	.probe = msm_pcm_hostless_probe,
 	.remove = __devexit_p(msm_pcm_hostless_remove),
diff --git a/sound/soc/msm/msm-pcm-q6.c b/sound/soc/msm/msm-pcm-q6.c
index 39ce436..168cf97 100644
--- a/sound/soc/msm/msm-pcm-q6.c
+++ b/sound/soc/msm/msm-pcm-q6.c
@@ -532,12 +532,15 @@
 	int dir = OUT;
 
 	pr_debug("%s\n", __func__);
-	q6asm_cmd(prtd->audio_client, CMD_CLOSE);
-	q6asm_audio_client_buf_free_contiguous(dir,
+	if (prtd->audio_client) {
+		q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+		q6asm_audio_client_buf_free_contiguous(dir,
 				prtd->audio_client);
+		q6asm_audio_client_free(prtd->audio_client);
+	}
+
 	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
-	SNDRV_PCM_STREAM_CAPTURE);
-	q6asm_audio_client_free(prtd->audio_client);
+		SNDRV_PCM_STREAM_CAPTURE);
 	kfree(prtd);
 
 	return 0;
@@ -638,7 +641,7 @@
 		if (ret < 0) {
 			pr_err("%s: q6asm_open_read failed\n", __func__);
 			q6asm_audio_client_free(prtd->audio_client);
-			kfree(prtd);
+			prtd->audio_client = NULL;
 			return -ENOMEM;
 		}
 	}
diff --git a/sound/soc/msm/msm-pcm-routing.c b/sound/soc/msm/msm-pcm-routing.c
index cc51a0f6..afc14f5 100644
--- a/sound/soc/msm/msm-pcm-routing.c
+++ b/sound/soc/msm/msm-pcm-routing.c
@@ -69,7 +69,7 @@
 static const DECLARE_TLV_DB_LINEAR(compressed_rx_vol_gain, 0,
 			INT_RX_VOL_MAX_STEPS);
 
-
+static int msm_route_ec_ref_rx;
 
 /* Equal to Frontend after last of the MULTIMEDIA SESSIONS */
 #define MAX_EQ_SESSIONS		MSM_FRONTEND_DAI_CS_VOICE
@@ -1046,6 +1046,45 @@
 	return 0;
 }
 
+static int msm_routing_ec_ref_rx_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s: ec_ref_rx  = %d", __func__, msm_route_ec_ref_rx);
+	ucontrol->value.integer.value[0] = msm_route_ec_ref_rx;
+	return 0;
+}
+
+static int msm_routing_ec_ref_rx_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	switch (ucontrol->value.integer.value[0]) {
+	case 0:
+		msm_route_ec_ref_rx = SLIMBUS_0_RX;
+		break;
+	case 1:
+		msm_route_ec_ref_rx = PRIMARY_I2S_RX;
+		break;
+	default:
+		msm_route_ec_ref_rx = 0;
+		break;
+	}
+	adm_ec_ref_rx_id(msm_route_ec_ref_rx);
+	pr_debug("%s: msm_route_ec_ref_rx = %d\n",
+			__func__, msm_route_ec_ref_rx);
+	return 0;
+}
+
+static const char * const ec_ref_rx[] = {"SLIM_RX", "I2S_RX", "PROXY_RX",
+								"NONE"};
+static const struct soc_enum msm_route_ec_ref_rx_enum[] = {
+				SOC_ENUM_SINGLE_EXT(4, ec_ref_rx),
+};
+
+static const struct snd_kcontrol_new ec_ref_rx_mixer_controls[] = {
+	SOC_ENUM_EXT("EC_REF_RX", msm_route_ec_ref_rx_enum[0],
+	msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put),
+};
+
 static const struct snd_kcontrol_new pri_i2s_rx_mixer_controls[] = {
 	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_I2S_RX ,
 	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
@@ -1313,6 +1352,13 @@
 	msm_routing_put_audio_mixer),
 };
 
+
+static const struct snd_kcontrol_new mmul4_mixer_controls[] = {
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
 static const struct snd_kcontrol_new pri_rx_voice_mixer_controls[] = {
 	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_PRI_I2S_RX,
 	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
@@ -1944,6 +1990,7 @@
 	SND_SOC_DAPM_AIF_IN("VOIP_DL", "VoIP Playback", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("MM_UL1", "MultiMedia1 Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL4", "MultiMedia4 Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("CS-VOICE_DL1", "CS-VOICE Playback", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("CS-VOICE_UL1", "CS-VOICE Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("VoLTE_DL", "VoLTE Playback", 0, 0, 0, 0),
@@ -2041,6 +2088,8 @@
 	mmul1_mixer_controls, ARRAY_SIZE(mmul1_mixer_controls)),
 	SND_SOC_DAPM_MIXER("MultiMedia2 Mixer", SND_SOC_NOPM, 0, 0,
 	mmul2_mixer_controls, ARRAY_SIZE(mmul2_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia4 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul4_mixer_controls, ARRAY_SIZE(mmul4_mixer_controls)),
 	SND_SOC_DAPM_MIXER("AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
 	auxpcm_rx_mixer_controls, ARRAY_SIZE(auxpcm_rx_mixer_controls)),
 	SND_SOC_DAPM_MIXER("SEC_AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
@@ -2208,6 +2257,7 @@
 	{"MultiMedia1 Mixer", "PRI_TX", "PRI_I2S_TX"},
 	{"MultiMedia1 Mixer", "MI2S_TX", "MI2S_TX"},
 	{"MultiMedia2 Mixer", "MI2S_TX", "MI2S_TX"},
+	{"MultiMedia4 Mixer", "MI2S_TX", "MI2S_TX"},
 	{"MultiMedia1 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
 	{"MultiMedia1 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
 	{"MultiMedia1 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
@@ -2237,6 +2287,7 @@
 	{"MM_UL1", NULL, "MultiMedia1 Mixer"},
 	{"MultiMedia2 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
 	{"MM_UL2", NULL, "MultiMedia2 Mixer"},
+	{"MM_UL4", NULL, "MultiMedia4 Mixer"},
 
 	{"AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
 	{"AUX_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
@@ -2409,6 +2460,10 @@
 	{"BE_OUT", NULL, "SLIMBUS_3_RX"},
 	{"BE_OUT", NULL, "STUB_RX"},
 	{"STUB_TX", NULL, "BE_IN"},
+	{"BE_OUT", NULL, "SEC_AUX_PCM_RX"},
+	{"SEC_AUX_PCM_TX", NULL, "BE_IN"},
+	{"BE_OUT", NULL, "AUX_PCM_RX"},
+	{"AUX_PCM_TX", NULL, "BE_IN"},
 };
 
 static int msm_pcm_routing_hw_params(struct snd_pcm_substream *substream,
@@ -2599,6 +2654,9 @@
 				lpa_SRS_trumedia_controls_I2S,
 			ARRAY_SIZE(lpa_SRS_trumedia_controls_I2S));
 
+	snd_soc_add_platform_controls(platform,
+				ec_ref_rx_mixer_controls,
+			ARRAY_SIZE(ec_ref_rx_mixer_controls));
 	return 0;
 }
 
diff --git a/sound/soc/msm/msm8930.c b/sound/soc/msm/msm8930.c
index 20ac6e1..d07cdff 100644
--- a/sound/soc/msm/msm8930.c
+++ b/sound/soc/msm/msm8930.c
@@ -307,8 +307,8 @@
 	{"Ext Spk Left Neg", NULL, "LINEOUT2"},
 
 	/* Headset Mic */
-	{"AMIC2", NULL, "MIC BIAS2 Internal1"},
-	{"MIC BIAS2 Internal1", NULL, "Headset Mic"},
+	{"AMIC2", NULL, "MIC BIAS2 External"},
+	{"MIC BIAS2 External", NULL, "Headset Mic"},
 
 	/* Microphone path */
 	{"AMIC1", NULL, "MIC BIAS2 External"},
@@ -1222,7 +1222,7 @@
 {
 	int ret;
 
-	if (!cpu_is_msm8930()) {
+	if (!cpu_is_msm8930() && !cpu_is_msm8627()) {
 		pr_err("%s: Not the right machine type\n", __func__);
 		return -ENODEV ;
 	}
@@ -1260,7 +1260,7 @@
 
 static void __exit msm8930_audio_exit(void)
 {
-	if (!cpu_is_msm8930()) {
+	if (!cpu_is_msm8930() && !cpu_is_msm8627()) {
 		pr_err("%s: Not the right machine type\n", __func__);
 		return ;
 	}
diff --git a/sound/soc/msm/msm8960.c b/sound/soc/msm/msm8960.c
index 2c44b46..5de3855 100644
--- a/sound/soc/msm/msm8960.c
+++ b/sound/soc/msm/msm8960.c
@@ -479,10 +479,11 @@
 	 * on FLUID.
 	 */
 	{"AMIC3", NULL, "MIC BIAS3 Internal1"},
-	{"MIC BIAS3 Internal1", NULL, "ANCRight Headset Mic"},
-
+	{"MIC BIAS3 Internal1", NULL, "MIC BIAS2 External"},
+	{"MIC BIAS2 External", NULL, "ANCRight Headset Mic"},
 	{"AMIC4", NULL, "MIC BIAS1 Internal2"},
-	{"MIC BIAS1 Internal2", NULL, "ANCLeft Headset Mic"},
+	{"MIC BIAS1 Internal2", NULL, "MIC BIAS2 External"},
+	{"MIC BIAS2 External", NULL, "ANCLeft Headset Mic"},
 
 	{"HEADPHONE", NULL, "LDO_H"},
 
diff --git a/sound/soc/msm/msm8974.c b/sound/soc/msm/msm8974.c
index 4678ea4..b110250 100644
--- a/sound/soc/msm/msm8974.c
+++ b/sound/soc/msm/msm8974.c
@@ -648,6 +648,21 @@
 		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA1
 	},
 	{
+		.name = "MSM VoIP",
+		.stream_name = "VoIP",
+		.cpu_dai_name	= "VoIP",
+		.platform_name  = "msm-voip-dsp",
+		.dynamic = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_VOIP,
+	},
+	{
 		.name = "MSM8974 LPA",
 		.stream_name = "LPA",
 		.cpu_dai_name	= "MultiMedia3",
@@ -662,6 +677,21 @@
 		.ignore_pmdown_time = 1,
 		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA3,
 	},
+	{
+		.name = "AUXPCM Hostless",
+		.stream_name = "AUXPCM Hostless",
+		.cpu_dai_name   = "AUXPCM_HOSTLESS",
+		.platform_name  = "msm-pcm-hostless",
+		.dynamic = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+	},
 
 	/* AUX PCM Backend DAI Links */
 	{
@@ -711,7 +741,7 @@
 static int __init msm_audio_init(void)
 {
 	int ret = 0;
-	if (!machine_is_copper_sim()) {
+	if (!machine_is_msm8974_sim()) {
 		pr_err("%s: Not the right machine type\n", __func__);
 		return -ENODEV;
 	}
@@ -736,7 +766,7 @@
 
 static void __exit msm_audio_exit(void)
 {
-	if (!machine_is_copper_sim()) {
+	if (!machine_is_msm8974_sim()) {
 		pr_err("%s: Not the right machine type\n", __func__);
 		return ;
 	}
diff --git a/sound/soc/msm/qdsp6/q6adm.c b/sound/soc/msm/qdsp6/q6adm.c
index bc57ef3..0327e4a 100644
--- a/sound/soc/msm/qdsp6/q6adm.c
+++ b/sound/soc/msm/qdsp6/q6adm.c
@@ -38,6 +38,7 @@
 	atomic_t copp_cnt[AFE_MAX_PORTS];
 	atomic_t copp_stat[AFE_MAX_PORTS];
 	wait_queue_head_t wait;
+	int  ec_ref_rx;
 };
 
 static struct acdb_cal_block mem_addr_audproc[MAX_AUDPROC_TYPES];
@@ -45,6 +46,7 @@
 
 static struct adm_ctl			this_adm;
 
+
 int srs_trumedia_open(int port_id, int srs_tech_id, void *srs_params)
 {
 	struct asm_pp_params_command *open = NULL;
@@ -642,8 +644,16 @@
 
 		open.mode = path;
 		open.endpoint_id1 = port_id;
-		open.endpoint_id2 = 0xFFFF;
 
+		if (this_adm.ec_ref_rx == 0) {
+			open.endpoint_id2 = 0xFFFF;
+		} else if (this_adm.ec_ref_rx && (path != 1)) {
+				open.endpoint_id2 = this_adm.ec_ref_rx;
+				this_adm.ec_ref_rx = 0;
+		}
+
+		pr_debug("%s open.endpoint_id1:%d open.endpoint_id2:%d",
+			__func__, open.endpoint_id1, open.endpoint_id2);
 		/* convert path to acdb path */
 		if (path == ADM_PATH_PLAYBACK)
 			open.topology_id = get_adm_rx_topology();
@@ -755,6 +765,15 @@
 			open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
 			open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
 			open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
+		} else if (channel_mode == 8) {
+			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
+			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
+			open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
+			open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
+			open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
+			open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
+			open.dev_channel_mapping[6] = PCM_CHANNEL_FLC;
+			open.dev_channel_mapping[7] = PCM_CHANNEL_FRC;
 		} else {
 			pr_err("%s invalid num_chan %d\n", __func__,
 					channel_mode);
@@ -772,8 +791,16 @@
 
 		open.mode = path;
 		open.endpoint_id1 = port_id;
-		open.endpoint_id2 = 0xFFFF;
 
+		if (this_adm.ec_ref_rx == 0) {
+			open.endpoint_id2 = 0xFFFF;
+		} else if (this_adm.ec_ref_rx && (path != 1)) {
+				open.endpoint_id2 = this_adm.ec_ref_rx;
+				this_adm.ec_ref_rx = 0;
+		}
+
+		pr_debug("%s open.endpoint_id1:%d open.endpoint_id2:%d",
+			__func__, open.endpoint_id1, open.endpoint_id2);
 		/* convert path to acdb path */
 		if (path == ADM_PATH_PLAYBACK)
 			open.topology_id = get_adm_rx_topology();
@@ -1073,6 +1100,12 @@
 	return atomic_read(&this_adm.copp_id[port_index]);
 }
 
+void adm_ec_ref_rx_id(int  port_id)
+{
+	this_adm.ec_ref_rx = port_id;
+	pr_debug("%s ec_ref_rx:%d", __func__, this_adm.ec_ref_rx);
+}
+
 int adm_close(int port_id)
 {
 	struct apr_hdr close;
diff --git a/sound/soc/msm/qdsp6/q6asm.c b/sound/soc/msm/qdsp6/q6asm.c
index 09bfd94..9136f93 100644
--- a/sound/soc/msm/qdsp6/q6asm.c
+++ b/sound/soc/msm/qdsp6/q6asm.c
@@ -246,21 +246,20 @@
 						port->buf[cnt].handle);
 				ion_client_destroy(port->buf[cnt].client);
 #else
-				pr_debug("%s:data[%p]phys[%p][%p] cnt[%d]"
-					 "mem_buffer[%p]\n",
+				pr_debug("%s:data[%p]phys[%p][%p] cnt[%d] mem_buffer[%p]\n",
 					__func__, (void *)port->buf[cnt].data,
-					   (void *)port->buf[cnt].phys,
-					   (void *)&port->buf[cnt].phys, cnt,
-					   (void *)port->buf[cnt].mem_buffer);
+					(void *)port->buf[cnt].phys,
+					(void *)&port->buf[cnt].phys, cnt,
+					(void *)port->buf[cnt].mem_buffer);
 				if (IS_ERR((void *)port->buf[cnt].mem_buffer))
-					pr_err("%s:mem buffer invalid, error ="
-						 "%ld\n", __func__,
+					pr_err("%s:mem buffer invalid, error = %ld\n",
+					 __func__,
 				PTR_ERR((void *)port->buf[cnt].mem_buffer));
 				else {
 					if (iounmap(
 						port->buf[cnt].mem_buffer) < 0)
-						pr_err("%s: unmap buffer"
-							" failed\n", __func__);
+						pr_err("%s: unmap buffer failed\n",
+								 __func__);
 				}
 				free_contiguous_memory_by_paddr(
 					port->buf[cnt].phys);
@@ -306,8 +305,7 @@
 		ion_unmap_kernel(port->buf[0].client, port->buf[0].handle);
 		ion_free(port->buf[0].client, port->buf[0].handle);
 		ion_client_destroy(port->buf[0].client);
-		pr_debug("%s:data[%p]phys[%p][%p]"
-			", client[%p] handle[%p]\n",
+		pr_debug("%s:data[%p]phys[%p][%p], client[%p] handle[%p]\n",
 			__func__,
 			(void *)port->buf[0].data,
 			(void *)port->buf[0].phys,
@@ -315,22 +313,20 @@
 			(void *)port->buf[0].client,
 			(void *)port->buf[0].handle);
 #else
-		pr_debug("%s:data[%p]phys[%p][%p]"
-			"mem_buffer[%p]\n",
+		pr_debug("%s:data[%p]phys[%p][%p] mem_buffer[%p]\n",
 			__func__,
 			(void *)port->buf[0].data,
 			(void *)port->buf[0].phys,
 			(void *)&port->buf[0].phys,
 			(void *)port->buf[0].mem_buffer);
 		if (IS_ERR((void *)port->buf[0].mem_buffer))
-			pr_err("%s:mem buffer invalid, error ="
-				"%ld\n", __func__,
+			pr_err("%s:mem buffer invalid, error = %ld\n",
+				 __func__,
 				PTR_ERR((void *)port->buf[0].mem_buffer));
 		else {
 			if (iounmap(
 				port->buf[0].mem_buffer) < 0)
-				pr_err("%s: unmap buffer"
-					" failed\n", __func__);
+				pr_err("%s: unmap buffer failed\n", __func__);
 		}
 		free_contiguous_memory_by_paddr(port->buf[0].phys);
 #endif
@@ -433,8 +429,8 @@
 					(apr_fn)q6asm_mmapcallback,\
 					0x0FFFFFFFF, &this_mmap);
 		if (this_mmap.apr == NULL) {
-			pr_debug("%s Unable to register \
-				APR ASM common port \n", __func__);
+			pr_debug("%s Unable to register APR ASM common port\n",
+							 __func__);
 			goto fail;
 		}
 	}
@@ -523,8 +519,7 @@
 						(UINT_MAX, "audio_client");
 					if (IS_ERR_OR_NULL((void *)
 						buf[cnt].client)) {
-						pr_err("%s: ION create client"
-						" for AUDIO failed\n",
+						pr_err("%s: ION create client for AUDIO failed\n",
 						__func__);
 						mutex_unlock(&ac->cmd_lock);
 						goto fail;
@@ -534,8 +529,7 @@
 						(0x1 << ION_AUDIO_HEAP_ID));
 					if (IS_ERR_OR_NULL((void *)
 						buf[cnt].handle)) {
-						pr_err("%s: ION memory"
-					" allocation for AUDIO failed\n",
+						pr_err("%s: ION memory allocation for AUDIO failed\n",
 							__func__);
 						mutex_unlock(&ac->cmd_lock);
 						goto fail;
@@ -547,8 +541,7 @@
 						&buf[cnt].phys,
 						(size_t *)&len);
 					if (rc) {
-						pr_err("%s: ION Get Physical"
-						" for AUDIO failed, rc = %d\n",
+						pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
 							__func__, rc);
 						mutex_unlock(&ac->cmd_lock);
 						goto fail;
@@ -559,8 +552,8 @@
 							 0);
 					if (IS_ERR_OR_NULL((void *)
 						buf[cnt].data)) {
-						pr_err("%s: ION memory"
-				" mapping for AUDIO failed\n", __func__);
+						pr_err("%s: ION memory mapping for AUDIO failed\n",
+								 __func__);
 						mutex_unlock(&ac->cmd_lock);
 						goto fail;
 					}
@@ -571,8 +564,8 @@
 					allocate_contiguous_ebi_nomap(bufsz,
 						SZ_4K);
 					if (!buf[cnt].phys) {
-						pr_err("%s:Buf alloc failed "
-						" size=%d\n", __func__,
+						pr_err("%s:Buf alloc failed size=%d\n",
+						 __func__,
 						bufsz);
 						mutex_unlock(&ac->cmd_lock);
 						goto fail;
@@ -581,17 +574,17 @@
 					ioremap(buf[cnt].phys, bufsz);
 					if (IS_ERR(
 						(void *)buf[cnt].mem_buffer)) {
-						pr_err("%s:map_buffer failed,"
-							"error = %ld\n",
-				__func__, PTR_ERR((void *)buf[cnt].mem_buffer));
+						pr_err("%s:map_buffer failed, error = %ld\n",
+					__func__,
+					 PTR_ERR((void *)buf[cnt].mem_buffer));
 						mutex_unlock(&ac->cmd_lock);
 						goto fail;
 					}
 					buf[cnt].data =
 						buf[cnt].mem_buffer;
 					if (!buf[cnt].data) {
-						pr_err("%s:invalid vaddr,"
-						" iomap failed\n", __func__);
+						pr_err("%s:invalid vaddr, iomap failed\n",
+						__func__);
 						mutex_unlock(&ac->cmd_lock);
 						goto fail;
 					}
@@ -697,17 +690,15 @@
 	buf[0].phys = allocate_contiguous_ebi_nomap(bufsz * bufcnt,
 						SZ_4K);
 	if (!buf[0].phys) {
-		pr_err("%s:Buf alloc failed "
-			" size=%d, bufcnt=%d\n", __func__,
-			bufsz, bufcnt);
+		pr_err("%s:Buf alloc failed size=%d, bufcnt=%d\n",
+		 __func__, bufsz, bufcnt);
 		mutex_unlock(&ac->cmd_lock);
 		goto fail;
 	}
 
 	buf[0].mem_buffer = ioremap(buf[0].phys, bufsz * bufcnt);
 	if (IS_ERR((void *)buf[cnt].mem_buffer)) {
-		pr_err("%s:map_buffer failed,"
-			"error = %ld\n",
+		pr_err("%s:map_buffer failed, error = %ld\n",
 			__func__, PTR_ERR((void *)buf[0].mem_buffer));
 
 		mutex_unlock(&ac->cmd_lock);
@@ -716,8 +707,7 @@
 	buf[0].data = buf[0].mem_buffer;
 #endif
 	if (!buf[0].data) {
-		pr_err("%s:invalid vaddr,"
-			" iomap failed\n", __func__);
+		pr_err("%s:invalid vaddr, iomap failed\n", __func__);
 		mutex_unlock(&ac->cmd_lock);
 		goto fail;
 	}
@@ -747,6 +737,9 @@
 		cnt++;
 	}
 	ac->port[dir].max_buf_cnt = cnt;
+
+	pr_debug("%s ac->port[%d].max_buf_cnt[%d]\n", __func__, dir,
+			 ac->port[dir].max_buf_cnt);
 	mutex_unlock(&ac->cmd_lock);
 	rc = q6asm_memory_map(ac, buf[0].phys, dir, bufsz, cnt);
 	if (rc < 0) {
@@ -776,9 +769,8 @@
 		return 0;
 	}
 
-	pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x]"
-		"token[0x%x]payload_s[%d] src[%d] dest[%d]\n", __func__,
-		payload[0], payload[1], data->opcode, data->token,
+	pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]\n",
+		 __func__, payload[0], payload[1], data->opcode, data->token,
 		data->payload_size, data->src_port, data->dest_port);
 
 	if (data->opcode == APR_BASIC_RSP_RESULT) {
@@ -836,8 +828,8 @@
 		return 0;
 	}
 
-	pr_debug("%s: session[%d]opcode[0x%x] \
-		token[0x%x]payload_s[%d] src[%d] dest[%d]\n", __func__,
+	pr_debug("%s: session[%d]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]\n",
+		 __func__,
 		ac->session, data->opcode,
 		data->token, data->payload_size, data->src_port,
 		data->dest_port);
@@ -868,6 +860,7 @@
 		case ASM_DATA_CMD_MEDIA_FORMAT_UPDATE:
 		case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
 		case ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED:
+		case ASM_STREAM_CMD_OPEN_READ_COMPRESSED:
 			if (atomic_read(&ac->cmd_state)) {
 				atomic_set(&ac->cmd_state, 0);
 				wake_up(&ac->cmd_wait);
@@ -915,9 +908,8 @@
 				   out_cold_index*/
 				if (out_cold_index != 1) {
 					do_gettimeofday(&out_cold_tv);
-					pr_debug("COLD: apr_send_pkt at %ld \
-					sec %ld microsec\n",\
-					out_cold_tv.tv_sec,\
+					pr_debug("COLD: apr_send_pkt at %ld sec %ld microsec\n",
+					out_cold_tv.tv_sec,
 					out_cold_tv.tv_usec);
 					out_cold_index = 1;
 				}
@@ -953,8 +945,7 @@
 			 */
 			if (in_cont_index == 7) {
 				do_gettimeofday(&in_cont_tv);
-				pr_err("In_CONT:previous read buffer done \
-				at %ld sec %ld microsec\n",\
+				pr_err("In_CONT:previous read buffer done at %ld sec %ld microsec\n",
 				in_cont_tv.tv_sec, in_cont_tv.tv_usec);
 			}
 		}
@@ -971,9 +962,8 @@
 				payload[READDONE_IDX_ID],
 				payload[READDONE_IDX_NUMFRAMES]);
 #ifdef CONFIG_DEBUG_FS
-		if (in_enable_flag) {
+		if (in_enable_flag)
 			in_cont_index++;
-		}
 #endif
 		if (ac->io_mode == SYNC_IO_MODE) {
 			if (port->buf == NULL) {
@@ -1009,9 +999,8 @@
 		pr_err("ASM_SESSION_EVENT_TX_OVERFLOW\n");
 		break;
 	case ASM_SESSION_CMDRSP_GET_SESSION_TIME:
-		pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSION_TIME, "
-				"payload[0] = %d, payload[1] = %d, "
-				"payload[2] = %d\n", __func__,
+		pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSION_TIME, payload[0] = %d, payload[1] = %d, payload[2] = %d\n",
+				 __func__,
 				 payload[0], payload[1], payload[2]);
 		ac->time_stamp = (uint64_t)(((uint64_t)payload[1] << 32) |
 				payload[2]);
@@ -1022,9 +1011,8 @@
 		break;
 	case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
 	case ASM_DATA_EVENT_ENC_SR_CM_NOTIFY:
-		pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, "
-				"payload[0] = %d, payload[1] = %d, "
-				"payload[2] = %d, payload[3] = %d\n", __func__,
+		pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, payload[0] = %d, payload[1] = %d, payload[2] = %d, payload[3] = %d\n",
+				 __func__,
 				payload[0], payload[1], payload[2],
 				payload[3]);
 		break;
@@ -1061,8 +1049,8 @@
 		if (port->buf[idx].used == dir) {
 			/* To make it more robust, we could loop and get the
 			next avail buf, its risky though */
-			pr_debug("%s:Next buf idx[0x%x] not available,\
-				dir[%d]\n", __func__, idx, dir);
+			pr_debug("%s:Next buf idx[0x%x] not available,dir[%d]\n",
+			 __func__, idx, dir);
 			mutex_unlock(&port->lock);
 			return NULL;
 		}
@@ -1111,8 +1099,8 @@
 		 * To make it more robust, we could loop and get the
 		 * next avail buf, its risky though
 		 */
-		pr_debug("%s:Next buf idx[0x%x] not available,\
-			dir[%d]\n", __func__, idx, dir);
+		pr_debug("%s:Next buf idx[0x%x] not available, dir[%d]\n",
+		 __func__, idx, dir);
 		return NULL;
 	}
 	*size = port->buf[idx].actual_size;
@@ -1276,6 +1264,42 @@
 	return -EINVAL;
 }
 
+int q6asm_open_read_compressed(struct audio_client *ac, uint32_t format)
+{
+	int rc = 0x00;
+	struct asm_stream_cmd_open_read_compressed open;
+#ifdef CONFIG_DEBUG_FS
+	in_cont_index = 0;
+#endif
+	if ((ac == NULL) || (ac->apr == NULL)) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s:session[%d]", __func__, ac->session);
+
+	q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+	open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ_COMPRESSED;
+	/* hardcoded as following*/
+	open.frame_per_buf = 1;
+	open.uMode = 0;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+	if (rc < 0) {
+		pr_err("open failed op[0x%x]rc[%d]\n", open.hdr.opcode, rc);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+		(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for OPEN_READ_COMPRESSED rc[%d]\n",
+				 __func__, rc);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
 int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format)
 {
 	int rc = 0x00;
@@ -1305,6 +1329,9 @@
 	case FORMAT_DTS:
 		open.format = DTS;
 		break;
+	case FORMAT_DTS_LBR:
+		open.format = DTS_LBR;
+		break;
 	case FORMAT_AAC:
 		open.format = MPEG4_AAC;
 		break;
@@ -1388,6 +1415,12 @@
 	case FORMAT_MP3:
 		open.format = MP3;
 		break;
+	case FORMAT_DTS:
+		open.format = DTS;
+		break;
+	case FORMAT_DTS_LBR:
+		open.format = DTS_LBR;
+		break;
 	default:
 		pr_err("%s: Invalid format[%d]\n", __func__, format);
 		goto fail_cmd;
@@ -1594,8 +1627,8 @@
 	struct asm_stream_cmd_encdec_cfg_blk enc_cfg;
 	int rc = 0;
 
-	pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d]"
-		"format[%d]", __func__, ac->session, frames_per_buf,
+	pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d] format[%d]",
+		 __func__, ac->session, frames_per_buf,
 		sample_rate, channels, bit_rate, mode, format);
 
 	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
@@ -1670,6 +1703,47 @@
 	return -EINVAL;
 }
 
+int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac,
+			uint32_t rate, uint32_t channels)
+{
+	struct asm_stream_cmd_encdec_cfg_blk  enc_cfg;
+
+	int rc = 0;
+
+	pr_debug("%s: Session %d, rate = %d, channels = %d, setting the rate and channels to 0 for native\n",
+			 __func__, ac->session, rate, channels);
+
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID;
+	enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk);
+	enc_cfg.enc_blk.frames_per_buf = 1;
+	enc_cfg.enc_blk.format_id = LINEAR_PCM;
+	enc_cfg.enc_blk.cfg_size = sizeof(struct asm_pcm_cfg);
+	enc_cfg.enc_blk.cfg.pcm.ch_cfg = 0;/*channels;*/
+	enc_cfg.enc_blk.cfg.pcm.bits_per_sample = 16;
+	enc_cfg.enc_blk.cfg.pcm.sample_rate = 0;/*rate;*/
+	enc_cfg.enc_blk.cfg.pcm.is_signed = 1;
+	enc_cfg.enc_blk.cfg.pcm.interleaved = 1;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("Comamnd open failed\n");
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("timeout opcode[0x%x] ", enc_cfg.hdr.opcode);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
 int q6asm_enc_cfg_blk_multi_ch_pcm(struct audio_client *ac,
 			uint32_t rate, uint32_t channels)
 {
@@ -1694,14 +1768,43 @@
 	enc_cfg.enc_blk.cfg.mpcm.sample_rate = rate;
 	enc_cfg.enc_blk.cfg.mpcm.is_signed = 1;
 	enc_cfg.enc_blk.cfg.mpcm.is_interleaved = 1;
-	enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL;
-	enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = PCM_CHANNEL_FR;
-	enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = PCM_CHANNEL_RB;
-	enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = PCM_CHANNEL_LB;
-	enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = 0;
-	enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = 0;
-	enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = 0;
-	enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = 0;
+	if (channels == 2) {
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = PCM_CHANNEL_FR;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = 0;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = 0;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = 0;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = 0;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = 0;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = 0;
+	} else if (channels == 4) {
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = PCM_CHANNEL_FR;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = PCM_CHANNEL_RB;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = PCM_CHANNEL_LB;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = 0;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = 0;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = 0;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = 0;
+	} else if (channels == 6) {
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = PCM_CHANNEL_FR;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = PCM_CHANNEL_LFE;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = PCM_CHANNEL_FC;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = PCM_CHANNEL_LB;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = PCM_CHANNEL_RB;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = 0;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = 0;
+	} else if (channels == 8) {
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = PCM_CHANNEL_FR;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = PCM_CHANNEL_LFE;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = PCM_CHANNEL_FC;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = PCM_CHANNEL_LB;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = PCM_CHANNEL_RB;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = PCM_CHANNEL_FLC;
+		enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = PCM_CHANNEL_FRC;
+	}
 
 	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
 	if (rc < 0) {
@@ -1861,8 +1964,8 @@
 	struct asm_stream_cmd_encdec_cfg_blk enc_cfg;
 	int rc = 0;
 
-	pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] \
-		reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]", __func__,
+	pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]",
+		 __func__,
 		ac->session, frames_per_buf, min_rate, max_rate,
 		reduced_rate_level, rate_modulation_cmd);
 
@@ -1904,8 +2007,8 @@
 	struct asm_stream_cmd_encdec_cfg_blk enc_cfg;
 	int rc = 0;
 
-	pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] \
-		rate_modulation_cmd[0x%4x]", __func__, ac->session,
+	pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] rate_modulation_cmd[0x%4x]",
+		 __func__, ac->session,
 		frames_per_buf,	min_rate, max_rate, rate_modulation_cmd);
 
 	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
@@ -2238,6 +2341,12 @@
 	case FORMAT_MP3:
 		fmt.format = MP3;
 		break;
+	case FORMAT_DTS:
+		fmt.format = DTS;
+		break;
+	case FORMAT_DTS_LBR:
+		fmt.format = DTS_LBR;
+		break;
 	default:
 		pr_err("Invalid format[%d]\n", format);
 		goto fail_cmd;
@@ -2267,8 +2376,7 @@
 	struct asm_wma_cfg *wma_cfg = (struct asm_wma_cfg *)cfg;
 	int rc = 0;
 
-	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d],\
-		balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n",
+	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n",
 		ac->session, wma_cfg->format_tag, wma_cfg->sample_rate,
 		wma_cfg->ch_cfg, wma_cfg->avg_bytes_per_sec,
 		wma_cfg->block_align, wma_cfg->valid_bits_per_sample,
@@ -2319,9 +2427,7 @@
 	struct asm_wmapro_cfg *wmapro_cfg = (struct asm_wmapro_cfg *)cfg;
 	int rc = 0;
 
-	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d],"
-		"balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x],\
-		adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n",
+	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x], adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n",
 		ac->session, wmapro_cfg->format_tag, wmapro_cfg->sample_rate,
 		wmapro_cfg->ch_cfg,  wmapro_cfg->avg_bytes_per_sec,
 		wmapro_cfg->block_align, wmapro_cfg->valid_bits_per_sample,
@@ -2778,8 +2884,8 @@
 	params->period = pause_param->period;
 	params->step = pause_param->step;
 	params->rampingcurve = pause_param->rampingcurve;
-	pr_debug("%s: soft Pause Command: enable = %d, period = %d,"
-			 "step = %d, curve = %d\n", __func__, params->enable,
+	pr_debug("%s: soft Pause Command: enable = %d, period = %d, step = %d, curve = %d\n",
+			 __func__, params->enable,
 			 params->period, params->step, params->rampingcurve);
 	rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd);
 	if (rc < 0) {
@@ -2791,8 +2897,8 @@
 	rc = wait_event_timeout(ac->cmd_wait,
 			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
 	if (!rc) {
-		pr_err("%s: timeout in sending volume command(soft_pause)"
-		       "to apr\n", __func__);
+		pr_err("%s: timeout in sending volume command(soft_pause) to apr\n",
+						 __func__);
 		rc = -EINVAL;
 		goto fail_cmd;
 	}
@@ -2837,13 +2943,13 @@
 	params->period = softvol_param->period;
 	params->step = softvol_param->step;
 	params->rampingcurve = softvol_param->rampingcurve;
-	pr_debug("%s: soft Volume:opcode = %d,payload_sz =%d,module_id =%d,"
-			 "param_id = %d, param_sz = %d\n", __func__,
+	pr_debug("%s: soft Volume:opcode = %d,payload_sz =%d,module_id =%d, param_id = %d, param_sz = %d\n",
+			 __func__,
 			cmd->hdr.opcode, cmd->payload_size,
 			cmd->params.module_id, cmd->params.param_id,
 			cmd->params.param_size);
-	pr_debug("%s: soft Volume Command: period = %d,"
-			 "step = %d, curve = %d\n", __func__, params->period,
+	pr_debug("%s: soft Volume Command: period = %d, step = %d, curve = %d\n",
+			 __func__, params->period,
 			 params->step, params->rampingcurve);
 	rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd);
 	if (rc < 0) {
@@ -2855,8 +2961,8 @@
 	rc = wait_event_timeout(ac->cmd_wait,
 			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
 	if (!rc) {
-		pr_err("%s: timeout in sending volume command(soft_volume)"
-		       "to apr\n", __func__);
+		pr_err("%s: timeout in sending volume command(soft_volume) to apr\n",
+							 __func__);
 		rc = -EINVAL;
 		goto fail_cmd;
 	}
@@ -3197,8 +3303,8 @@
 			if ((strncmp(((char *)ab->data), zero_pattern, 2)) &&
 			(!strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
 				do_gettimeofday(&out_warm_tv);
-				pr_debug("WARM:apr_send_pkt at \
-				%ld sec %ld microsec\n", out_warm_tv.tv_sec,\
+				pr_debug("WARM:apr_send_pkt at %ld sec %ld microsec\n",
+				 out_warm_tv.tv_sec,\
 				out_warm_tv.tv_usec);
 				pr_debug("Warm Pattern Matched");
 			}
@@ -3207,8 +3313,8 @@
 			else if ((!strncmp(((char *)ab->data), zero_pattern, 2))
 			&& (strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
 				do_gettimeofday(&out_cont_tv);
-				pr_debug("CONT:apr_send_pkt at \
-				%ld sec %ld microsec\n", out_cont_tv.tv_sec,\
+				pr_debug("CONT:apr_send_pkt at %ld sec %ld microsec\n",
+				out_cont_tv.tv_sec,\
 				out_cont_tv.tv_usec);
 				pr_debug("Cont Pattern Matched");
 			}
diff --git a/sound/soc/msm/qdsp6v2/Makefile b/sound/soc/msm/qdsp6v2/Makefile
index 6f765d1..ff2cc8d 100644
--- a/sound/soc/msm/qdsp6v2/Makefile
+++ b/sound/soc/msm/qdsp6v2/Makefile
@@ -1,4 +1,4 @@
 snd-soc-qdsp6v2-objs += msm-dai-q6-v2.o msm-pcm-q6-v2.o msm-pcm-routing-v2.o msm-compr-q6-v2.o  msm-multi-ch-pcm-q6-v2.o
-snd-soc-qdsp6v2-objs += msm-pcm-lpa-v2.o msm-pcm-afe-v2.o
+snd-soc-qdsp6v2-objs += msm-pcm-lpa-v2.o msm-pcm-afe-v2.o msm-pcm-voip-v2.o msm-pcm-voice-v2.o
 obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o
-obj-y += q6adm.o q6afe.o q6asm.o q6audio-v2.o
+obj-y += q6adm.o q6afe.o q6asm.o q6audio-v2.o q6voice.o
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 1605062..783a03d 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -18,6 +18,7 @@
 #include <linux/bitops.h>
 #include <linux/slab.h>
 #include <linux/clk.h>
+#include <linux/of_device.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
@@ -42,239 +43,7 @@
 static struct clk *pcm_clk;
 static DEFINE_MUTEX(aux_pcm_mutex);
 static int aux_pcm_count;
-static struct msm_dai_auxpcm_pdata *auxpcm_plat_data;
 
-static u8 num_of_bits_set(u8 sd_line_mask)
-{
-	u8 num_bits_set = 0;
-
-	while (sd_line_mask) {
-		num_bits_set++;
-		sd_line_mask = sd_line_mask & (sd_line_mask - 1);
-	}
-	return num_bits_set;
-}
-
-static int msm_dai_q6_cdc_hw_params(struct snd_pcm_hw_params *params,
-				    struct snd_soc_dai *dai, int stream)
-{
-	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
-
-	dai_data->channels = params_channels(params);
-	switch (dai_data->channels) {
-	case 2:
-		dai_data->port_config.i2s.mono_stereo = MSM_AFE_STEREO;
-		break;
-	case 1:
-		dai_data->port_config.i2s.mono_stereo = MSM_AFE_MONO;
-		break;
-	default:
-		return -EINVAL;
-		break;
-	}
-	dai_data->rate = params_rate(params);
-	dai_data->port_config.i2s.sample_rate = dai_data->rate;
-	dai_data->port_config.i2s.i2s_cfg_minor_version =
-						AFE_API_VERSION_I2S_CONFIG;
-	dai_data->port_config.i2s.data_format =  AFE_LINEAR_PCM_DATA;
-	dev_dbg(dai->dev, " channel %d sample rate %d entered\n",
-	dai_data->channels, dai_data->rate);
-
-	/* Q6 only supports 16 as now */
-	dai_data->port_config.i2s.bit_width = 16;
-	dai_data->port_config.i2s.channel_mode = 1;
-	return 0;
-}
-
-static int msm_dai_q6_i2s_hw_params(struct snd_pcm_hw_params *params,
-				    struct snd_soc_dai *dai, int stream)
-{
-	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
-	struct msm_i2s_data *i2s_pdata =
-			(struct msm_i2s_data *) dai->dev->platform_data;
-
-	dai_data->channels = params_channels(params);
-	if (num_of_bits_set(i2s_pdata->sd_lines) == 1) {
-		switch (dai_data->channels) {
-		case 2:
-			dai_data->port_config.i2s.mono_stereo = MSM_AFE_STEREO;
-			break;
-		case 1:
-			dai_data->port_config.i2s.mono_stereo = MSM_AFE_MONO;
-			break;
-		default:
-			pr_warn("greater than stereo has not been validated");
-			break;
-		}
-	}
-	dai_data->rate = params_rate(params);
-	dai_data->port_config.i2s.sample_rate = dai_data->rate;
-	dai_data->port_config.i2s.i2s_cfg_minor_version =
-						AFE_API_VERSION_I2S_CONFIG;
-	dai_data->port_config.i2s.data_format =  AFE_LINEAR_PCM_DATA;
-	/* Q6 only supports 16 as now */
-	dai_data->port_config.i2s.bit_width = 16;
-	dai_data->port_config.i2s.channel_mode = 1;
-
-	return 0;
-}
-
-static int msm_dai_q6_i2s_platform_data_validation(
-					struct snd_soc_dai *dai)
-{
-	u8 num_of_sd_lines;
-	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
-	struct msm_i2s_data *i2s_pdata =
-			(struct msm_i2s_data *)dai->dev->platform_data;
-	struct snd_soc_dai_driver *dai_driver =
-			(struct snd_soc_dai_driver *)dai->driver;
-
-	num_of_sd_lines = num_of_bits_set(i2s_pdata->sd_lines);
-
-	switch (num_of_sd_lines) {
-	case 1:
-		switch (i2s_pdata->sd_lines) {
-		case MSM_MI2S_SD0:
-			dai_data->port_config.i2s.channel_mode =
-					AFE_PORT_I2S_SD0;
-			break;
-		case MSM_MI2S_SD1:
-			dai_data->port_config.i2s.channel_mode =
-					AFE_PORT_I2S_SD1;
-			break;
-		case MSM_MI2S_SD2:
-			dai_data->port_config.i2s.channel_mode =
-					AFE_PORT_I2S_SD2;
-			break;
-		case MSM_MI2S_SD3:
-			dai_data->port_config.i2s.channel_mode =
-					AFE_PORT_I2S_SD3;
-			break;
-		default:
-			pr_err("%s: invalid SD line\n",
-				   __func__);
-			goto error_invalid_data;
-		}
-		break;
-	case 2:
-		switch (i2s_pdata->sd_lines) {
-		case MSM_MI2S_SD0 | MSM_MI2S_SD1:
-			dai_data->port_config.i2s.channel_mode =
-					AFE_PORT_I2S_QUAD01;
-			break;
-		case MSM_MI2S_SD2 | MSM_MI2S_SD3:
-			dai_data->port_config.i2s.channel_mode =
-					AFE_PORT_I2S_QUAD23;
-			break;
-		default:
-			pr_err("%s: invalid SD line\n",
-				   __func__);
-			goto error_invalid_data;
-		}
-		break;
-	case 3:
-		switch (i2s_pdata->sd_lines) {
-		case MSM_MI2S_SD0 | MSM_MI2S_SD1 | MSM_MI2S_SD2:
-			dai_data->port_config.i2s.channel_mode =
-					AFE_PORT_I2S_6CHS;
-			break;
-		default:
-			pr_err("%s: invalid SD lines\n",
-				   __func__);
-			goto error_invalid_data;
-		}
-		break;
-	case 4:
-		switch (i2s_pdata->sd_lines) {
-		case MSM_MI2S_SD0 | MSM_MI2S_SD1 | MSM_MI2S_SD2 | MSM_MI2S_SD3:
-			dai_data->port_config.i2s.channel_mode =
-					AFE_PORT_I2S_8CHS;
-			break;
-		default:
-			pr_err("%s: invalid SD lines\n",
-				   __func__);
-			goto error_invalid_data;
-		}
-		break;
-	default:
-		pr_err("%s: invalid SD lines\n", __func__);
-		goto error_invalid_data;
-	}
-	if (i2s_pdata->capability == MSM_MI2S_CAP_RX)
-		dai_driver->playback.channels_max = num_of_sd_lines << 1;
-
-	return 0;
-
-error_invalid_data:
-	return -EINVAL;
-}
-
-static int msm_dai_q6_cdc_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
-{
-	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
-
-	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
-	case SND_SOC_DAIFMT_CBS_CFS:
-		dai_data->port_config.i2s.ws_src = 1; /* CPU is master */
-		break;
-	case SND_SOC_DAIFMT_CBM_CFM:
-		dai_data->port_config.i2s.ws_src = 0; /* CPU is slave */
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-
-static int msm_dai_q6_slim_bus_hw_params(struct snd_pcm_hw_params *params,
-				    struct snd_soc_dai *dai, int stream)
-{
-	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
-
-	dai_data->channels = params_channels(params);
-	dai_data->rate = params_rate(params);
-
-	/* Q6 only supports 16 as now */
-	dai_data->port_config.slim_sch.sb_cfg_minor_version =
-				AFE_API_VERSION_SLIMBUS_CONFIG;
-	dai_data->port_config.slim_sch.bit_width = 16;
-	dai_data->port_config.slim_sch.data_format = 0;
-	dai_data->port_config.slim_sch.num_channels = dai_data->channels;
-	dai_data->port_config.slim_sch.sample_rate = dai_data->rate;
-
-	dev_dbg(dai->dev, "%s:slimbus_dev_id[%hu] bit_wd[%hu] format[%hu]\n"
-		"num_channel %hu  shared_ch_mapping[0]  %hu\n"
-		"slave_port_mapping[1]  %hu slave_port_mapping[2]  %hu\n"
-		"sample_rate %d\n", __func__,
-		dai_data->port_config.slim_sch.slimbus_dev_id,
-		dai_data->port_config.slim_sch.bit_width,
-		dai_data->port_config.slim_sch.data_format,
-		dai_data->port_config.slim_sch.num_channels,
-		dai_data->port_config.slim_sch.shared_ch_mapping[0],
-		dai_data->port_config.slim_sch.shared_ch_mapping[1],
-		dai_data->port_config.slim_sch.shared_ch_mapping[2],
-		dai_data->rate);
-
-	return 0;
-}
-
-static int msm_dai_q6_bt_fm_hw_params(struct snd_pcm_hw_params *params,
-				struct snd_soc_dai *dai, int stream)
-{
-	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
-
-	dai_data->channels = params_channels(params);
-	dai_data->rate = params_rate(params);
-
-	dev_dbg(dai->dev, "channels %d sample rate %d entered\n",
-		dai_data->channels, dai_data->rate);
-
-	memset(&dai_data->port_config, 0, sizeof(dai_data->port_config));
-
-	return 0;
-}
 static int msm_dai_q6_auxpcm_hw_params(
 				struct snd_pcm_substream *substream,
 				struct snd_pcm_hw_params *params,
@@ -311,83 +80,6 @@
 	return 0;
 }
 
-static int msm_dai_q6_afe_rtproxy_hw_params(struct snd_pcm_hw_params *params,
-				struct snd_soc_dai *dai)
-{
-	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
-
-	dai_data->rate = params_rate(params);
-	dai_data->port_config.rtproxy.num_channels = params_channels(params);
-	dai_data->port_config.rtproxy.sample_rate = params_rate(params);
-
-	pr_debug("channel %d entered,dai_id: %d,rate: %d\n",
-	dai_data->port_config.rtproxy.num_channels, dai->id, dai_data->rate);
-
-	dai_data->port_config.rtproxy.rt_proxy_cfg_minor_version =
-				AFE_API_VERSION_RT_PROXY_CONFIG;
-	dai_data->port_config.rtproxy.bit_width = 16; /* Q6 only supports 16 */
-	dai_data->port_config.rtproxy.interleaved = 1;
-	dai_data->port_config.rtproxy.frame_size = params_period_bytes(params);
-	dai_data->port_config.rtproxy.jitter_allowance =
-				dai_data->port_config.rtproxy.frame_size/2;
-	dai_data->port_config.rtproxy.low_water_mark = 0;
-	dai_data->port_config.rtproxy.high_water_mark = 0;
-
-	return 0;
-}
-
-/* Current implementation assumes hw_param is called once
- * This may not be the case but what to do when ADM and AFE
- * port are already opened and parameter changes
- */
-static int msm_dai_q6_hw_params(struct snd_pcm_substream *substream,
-				struct snd_pcm_hw_params *params,
-				struct snd_soc_dai *dai)
-{
-	int rc = 0;
-
-	switch (dai->id) {
-	case PRIMARY_I2S_TX:
-	case PRIMARY_I2S_RX:
-	case SECONDARY_I2S_RX:
-		rc = msm_dai_q6_cdc_hw_params(params, dai, substream->stream);
-		break;
-	case MI2S_RX:
-		rc = msm_dai_q6_i2s_hw_params(params, dai, substream->stream);
-		break;
-	case SLIMBUS_0_RX:
-	case SLIMBUS_1_RX:
-	case SLIMBUS_0_TX:
-	case SLIMBUS_1_TX:
-		rc = msm_dai_q6_slim_bus_hw_params(params, dai,
-				substream->stream);
-		break;
-	case INT_BT_SCO_RX:
-	case INT_BT_SCO_TX:
-	case INT_FM_RX:
-	case INT_FM_TX:
-		rc = msm_dai_q6_bt_fm_hw_params(params, dai, substream->stream);
-		break;
-	case RT_PROXY_DAI_001_TX:
-	case RT_PROXY_DAI_001_RX:
-	case RT_PROXY_DAI_002_TX:
-	case RT_PROXY_DAI_002_RX:
-		rc = msm_dai_q6_afe_rtproxy_hw_params(params, dai);
-		break;
-	case VOICE_PLAYBACK_TX:
-	case VOICE_RECORD_RX:
-	case VOICE_RECORD_TX:
-		rc = 0;
-		break;
-	default:
-		dev_err(dai->dev, "invalid AFE port ID\n");
-		rc = -EINVAL;
-		break;
-	}
-
-	return rc;
-}
-
 static void msm_dai_q6_auxpcm_shutdown(struct snd_pcm_substream *substream,
 				struct snd_soc_dai *dai)
 {
@@ -396,8 +88,8 @@
 	mutex_lock(&aux_pcm_mutex);
 
 	if (aux_pcm_count == 0) {
-		dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count is 0. Just"
-				" return\n", __func__, dai->id);
+		dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count is 0. Just return\n",
+				__func__, dai->id);
 		mutex_unlock(&aux_pcm_mutex);
 		return;
 	}
@@ -410,8 +102,7 @@
 		mutex_unlock(&aux_pcm_mutex);
 		return;
 	} else if (aux_pcm_count < 0) {
-		dev_err(dai->dev, "%s(): ERROR: dai->id %d"
-			" aux_pcm_count = %d < 0\n",
+		dev_err(dai->dev, "%s(): ERROR: dai->id %d aux_pcm_count = %d < 0\n",
 			__func__, dai->id, aux_pcm_count);
 		aux_pcm_count = 0;
 		mutex_unlock(&aux_pcm_mutex);
@@ -432,33 +123,6 @@
 	mutex_unlock(&aux_pcm_mutex);
 }
 
-static void msm_dai_q6_shutdown(struct snd_pcm_substream *substream,
-				struct snd_soc_dai *dai)
-{
-	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
-	int rc = 0;
-
-	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
-		switch (dai->id) {
-		case VOICE_PLAYBACK_TX:
-		case VOICE_RECORD_TX:
-		case VOICE_RECORD_RX:
-			pr_debug("%s, stop pseudo port:%d\n",
-						__func__,  dai->id);
-			rc = afe_stop_pseudo_port(dai->id);
-			break;
-		default:
-			rc = afe_close(dai->id); /* can block */
-			break;
-		}
-		if (IS_ERR_VALUE(rc))
-			dev_err(dai->dev, "fail to close AFE port\n");
-		pr_debug("%s: dai_data->status_mask = %ld\n", __func__,
-			*dai_data->status_mask);
-		clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
-	}
-}
-
 static int msm_dai_q6_auxpcm_prepare(struct snd_pcm_substream *substream,
 		struct snd_soc_dai *dai)
 {
@@ -468,13 +132,12 @@
 	mutex_lock(&aux_pcm_mutex);
 
 	if (aux_pcm_count == 2) {
-		dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count is 2. Just"
-			" return.\n", __func__, dai->id);
+		dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count is 2. Just return.\n",
+			__func__, dai->id);
 		mutex_unlock(&aux_pcm_mutex);
 		return 0;
 	} else if (aux_pcm_count > 2) {
-		dev_err(dai->dev, "%s(): ERROR: dai->id %d"
-			" aux_pcm_count = %d > 2\n",
+		dev_err(dai->dev, "%s(): ERROR: dai->id %d aux_pcm_count = %d > 2\n",
 			__func__, dai->id, aux_pcm_count);
 		mutex_unlock(&aux_pcm_mutex);
 		return 0;
@@ -482,8 +145,8 @@
 
 	aux_pcm_count++;
 	if (aux_pcm_count == 2)  {
-		dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count = %d after "
-			" increment\n", __func__, dai->id, aux_pcm_count);
+		dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count = %d after increment\n",
+				__func__, dai->id, aux_pcm_count);
 		mutex_unlock(&aux_pcm_mutex);
 		return 0;
 	}
@@ -516,21 +179,6 @@
 	return rc;
 }
 
-static int msm_dai_q6_prepare(struct snd_pcm_substream *substream,
-		struct snd_soc_dai *dai)
-{
-	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
-	int rc = 0;
-
-	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
-		/* PORT START should be set if prepare called in active state */
-		rc = afe_q6_interface_prepare();
-		if (IS_ERR_VALUE(rc))
-			dev_err(dai->dev, "fail to open AFE APR\n");
-	}
-	return rc;
-}
-
 static int msm_dai_q6_auxpcm_trigger(struct snd_pcm_substream *substream,
 		int cmd, struct snd_soc_dai *dai)
 {
@@ -560,82 +208,18 @@
 
 }
 
-static int msm_dai_q6_trigger(struct snd_pcm_substream *substream, int cmd,
-		struct snd_soc_dai *dai)
-{
-	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
-	int rc = 0;
-
-	/* Start/stop port without waiting for Q6 AFE response. Need to have
-	 * native q6 AFE driver propagates AFE response in order to handle
-	 * port start/stop command error properly if error does arise.
-	 */
-	pr_debug("%s:port:%d  cmd:%d dai_data->status_mask = %ld",
-		__func__, dai->id, cmd, *dai_data->status_mask);
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-	case SNDRV_PCM_TRIGGER_RESUME:
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
-			switch (dai->id) {
-			case VOICE_PLAYBACK_TX:
-			case VOICE_RECORD_TX:
-			case VOICE_RECORD_RX:
-				afe_pseudo_port_start_nowait(dai->id);
-				break;
-			default:
-				afe_port_start_nowait(dai->id,
-					&dai_data->port_config, dai_data->rate);
-				break;
-			}
-			set_bit(STATUS_PORT_STARTED,
-				dai_data->status_mask);
-		}
-		break;
-	case SNDRV_PCM_TRIGGER_STOP:
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
-			switch (dai->id) {
-			case VOICE_PLAYBACK_TX:
-			case VOICE_RECORD_TX:
-			case VOICE_RECORD_RX:
-				afe_pseudo_port_stop_nowait(dai->id);
-				break;
-			default:
-				afe_port_stop_nowait(dai->id);
-				break;
-			}
-			clear_bit(STATUS_PORT_STARTED,
-				dai_data->status_mask);
-		}
-		break;
-
-	default:
-		rc = -EINVAL;
-	}
-
-	return rc;
-}
 static int msm_dai_q6_dai_auxpcm_probe(struct snd_soc_dai *dai)
 {
 	struct msm_dai_q6_dai_data *dai_data;
 	int rc = 0;
+	struct msm_dai_auxpcm_pdata *auxpcm_pdata = NULL;
 
-	struct msm_dai_auxpcm_pdata *auxpcm_pdata =
-			(struct msm_dai_auxpcm_pdata *) dai->dev->platform_data;
+	auxpcm_pdata = (struct msm_dai_auxpcm_pdata *)
+					dev_get_drvdata(dai->dev);
+	dai->dev->platform_data = auxpcm_pdata;
 
 	mutex_lock(&aux_pcm_mutex);
 
-	if (!auxpcm_plat_data)
-		auxpcm_plat_data = auxpcm_pdata;
-	else if (auxpcm_plat_data != auxpcm_pdata) {
-
-		dev_err(dai->dev, "AUX PCM RX and TX devices does not have"
-				" same platform data\n");
-		return -EINVAL;
-	}
-
 	/*
 	 * The clk name for AUX PCM operation is passed as platform
 	 * data to the cpu driver, since cpu drive is unaware of any
@@ -669,8 +253,8 @@
 	mutex_lock(&aux_pcm_mutex);
 
 	if (aux_pcm_count == 0) {
-		dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count is 0. clean"
-				" up and return\n", __func__, dai->id);
+		dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count is 0. clean up and return\n",
+					__func__, dai->id);
 		goto done;
 	}
 
@@ -681,14 +265,12 @@
 			__func__, dai->id, aux_pcm_count);
 		goto done;
 	} else if (aux_pcm_count < 0) {
-		dev_err(dai->dev, "%s(): ERROR: dai->id %d"
-			" aux_pcm_count = %d < 0\n",
+		dev_err(dai->dev, "%s(): ERROR: dai->id %d aux_pcm_count = %d < 0\n",
 			__func__, dai->id, aux_pcm_count);
 		goto done;
 	}
 
-	dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count = %d."
-			"closing afe\n",
+	dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count = %d.closing afe\n",
 		__func__, dai->id, aux_pcm_count);
 
 	rc = afe_close(PCM_RX); /* can block */
@@ -707,172 +289,6 @@
 
 	return 0;
 }
-static int msm_dai_q6_dai_i2s_probe(struct snd_soc_dai *dai)
-{
-	struct msm_dai_q6_dai_data *dai_data;
-	int rc = 0;
-
-	dai_data = kzalloc(sizeof(struct msm_dai_q6_dai_data),
-		GFP_KERNEL);
-
-	if (!dai_data) {
-		dev_err(dai->dev, "DAI-%d: fail to allocate dai data\n",
-		dai->id);
-		rc = -ENOMEM;
-		goto rtn;
-	} else
-		dev_set_drvdata(dai->dev, dai_data);
-
-	rc = msm_dai_q6_i2s_platform_data_validation(dai);
-	if (rc != 0) {
-		pr_err("%s: The msm_dai_q6_i2s_platform_data_validation failed\n",
-			    __func__);
-		kfree(dai_data);
-	}
-rtn:
-	return rc;
-}
-
-static int msm_dai_q6_dai_probe(struct snd_soc_dai *dai)
-{
-	struct msm_dai_q6_dai_data *dai_data;
-	int rc = 0;
-
-	dai_data = kzalloc(sizeof(struct msm_dai_q6_dai_data),
-		GFP_KERNEL);
-
-	if (!dai_data) {
-		dev_err(dai->dev, "DAI-%d: fail to allocate dai data\n",
-		dai->id);
-		rc = -ENOMEM;
-	} else
-		dev_set_drvdata(dai->dev, dai_data);
-
-	return rc;
-}
-
-static int msm_dai_q6_dai_remove(struct snd_soc_dai *dai)
-{
-	struct msm_dai_q6_dai_data *dai_data;
-	int rc;
-
-	dai_data = dev_get_drvdata(dai->dev);
-
-	/* If AFE port is still up, close it */
-	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
-		switch (dai->id) {
-		case VOICE_PLAYBACK_TX:
-		case VOICE_RECORD_TX:
-		case VOICE_RECORD_RX:
-			pr_debug("%s, stop pseudo port:%d\n",
-						__func__,  dai->id);
-			rc = afe_stop_pseudo_port(dai->id);
-			break;
-		default:
-			rc = afe_close(dai->id); /* can block */
-		}
-		if (IS_ERR_VALUE(rc))
-			dev_err(dai->dev, "fail to close AFE port\n");
-		clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
-	}
-	kfree(dai_data);
-	snd_soc_unregister_dai(dai->dev);
-
-	return 0;
-}
-
-static int msm_dai_q6_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
-{
-	int rc = 0;
-
-	dev_dbg(dai->dev, "enter %s, id = %d fmt[%d]\n", __func__,
-							dai->id, fmt);
-	switch (dai->id) {
-	case PRIMARY_I2S_TX:
-	case PRIMARY_I2S_RX:
-	case MI2S_RX:
-	case SECONDARY_I2S_RX:
-		rc = msm_dai_q6_cdc_set_fmt(dai, fmt);
-		break;
-	default:
-		dev_err(dai->dev, "invalid cpu_dai set_fmt\n");
-		rc = -EINVAL;
-		break;
-	}
-
-	return rc;
-}
-
-static int msm_dai_q6_set_channel_map(struct snd_soc_dai *dai,
-				unsigned int tx_num, unsigned int *tx_slot,
-				unsigned int rx_num, unsigned int *rx_slot)
-
-{
-	int rc = 0;
-	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
-	unsigned int i = 0;
-
-	dev_dbg(dai->dev, "enter %s, id = %d\n", __func__,
-							dai->id);
-	switch (dai->id) {
-	case SLIMBUS_0_RX:
-	case SLIMBUS_1_RX:
-		/* channel number to be between 128 and 255. For RX port
-		 * use channel numbers from 138 to 144, for TX port
-		 * use channel numbers from 128 to 137
-		 * For ports between MDM-APQ use channel numbers from 145
-		 */
-		if (!rx_slot)
-			return -EINVAL;
-		for (i = 0; i < rx_num; i++) {
-			dai_data->port_config.slim_sch.shared_ch_mapping[i] =
-							rx_slot[i];
-			pr_debug("%s: find number of channels[%d] ch[%d]\n",
-							__func__, i,
-							rx_slot[i]);
-		}
-		dai_data->port_config.slim_sch.num_channels = rx_num;
-		pr_debug("%s:SLIMBUS_0_RX cnt[%d] ch[%d %d]\n", __func__,
-		rx_num, dai_data->port_config.slim_sch.shared_ch_mapping[0],
-		dai_data->port_config.slim_sch.shared_ch_mapping[1]);
-
-		break;
-	case SLIMBUS_0_TX:
-	case SLIMBUS_1_TX:
-		/* channel number to be between 128 and 255. For RX port
-		 * use channel numbers from 138 to 144, for TX port
-		 * use channel numbers from 128 to 137
-		 * For ports between MDM-APQ use channel numbers from 145
-		 */
-		if (!tx_slot)
-			return -EINVAL;
-		for (i = 0; i < tx_num; i++) {
-			dai_data->port_config.slim_sch.shared_ch_mapping[i] =
-							tx_slot[i];
-			pr_debug("%s: find number of channels[%d] ch[%d]\n",
-						__func__, i, tx_slot[i]);
-		}
-		dai_data->port_config.slim_sch.num_channels = tx_num;
-		pr_debug("%s:SLIMBUS_0_TX cnt[%d] ch[%d %d]\n", __func__,
-		tx_num, dai_data->port_config.slim_sch.shared_ch_mapping[0],
-		dai_data->port_config.slim_sch.shared_ch_mapping[1]);
-		break;
-	default:
-		dev_err(dai->dev, "invalid cpu_dai set_fmt\n");
-		rc = -EINVAL;
-		break;
-	}
-	return rc;
-}
-
-static struct snd_soc_dai_ops msm_dai_q6_ops = {
-	.prepare	= msm_dai_q6_prepare,
-	.trigger	= msm_dai_q6_trigger,
-	.hw_params	= msm_dai_q6_hw_params,
-	.shutdown	= msm_dai_q6_shutdown,
-	.set_fmt	= msm_dai_q6_set_fmt,
-	.set_channel_map = msm_dai_q6_set_channel_map,
-};
 
 static struct snd_soc_dai_ops msm_dai_q6_auxpcm_ops = {
 	.prepare	= msm_dai_q6_auxpcm_prepare,
@@ -881,184 +297,6 @@
 	.shutdown	= msm_dai_q6_auxpcm_shutdown,
 };
 
-static struct snd_soc_dai_driver msm_dai_q6_i2s_rx_dai = {
-	.playback = {
-		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
-		SNDRV_PCM_RATE_16000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,
-		.channels_min = 1,
-		.channels_max = 4,
-		.rate_min =     8000,
-		.rate_max =	48000,
-	},
-	.ops = &msm_dai_q6_ops,
-	.probe = msm_dai_q6_dai_i2s_probe,
-	.remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_i2s_tx_dai = {
-	.capture = {
-		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
-		SNDRV_PCM_RATE_16000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,
-		.channels_min = 1,
-		.channels_max = 2,
-		.rate_min =     8000,
-		.rate_max =	48000,
-	},
-	.ops = &msm_dai_q6_ops,
-	.probe = msm_dai_q6_dai_probe,
-	.remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_afe_rx_dai = {
-	.playback = {
-		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
-		SNDRV_PCM_RATE_16000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,
-		.channels_min = 1,
-		.channels_max = 2,
-		.rate_min =     8000,
-		.rate_max =	48000,
-	},
-	.ops = &msm_dai_q6_ops,
-	.probe = msm_dai_q6_dai_probe,
-	.remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_afe_tx_dai = {
-	.capture = {
-		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
-		SNDRV_PCM_RATE_16000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,
-		.channels_min = 1,
-		.channels_max = 2,
-		.rate_min =     8000,
-		.rate_max =	48000,
-	},
-	.ops = &msm_dai_q6_ops,
-	.probe = msm_dai_q6_dai_probe,
-	.remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_voice_playback_tx_dai = {
-	.playback = {
-		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
-		SNDRV_PCM_RATE_16000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,
-		.channels_min = 1,
-		.channels_max = 2,
-		.rate_max =     48000,
-		.rate_min =     8000,
-	},
-	.ops = &msm_dai_q6_ops,
-	.probe = msm_dai_q6_dai_probe,
-	.remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_slimbus_rx_dai = {
-	.playback = {
-		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
-		SNDRV_PCM_RATE_16000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,
-		.channels_min = 1,
-		.channels_max = 2,
-		.rate_min =     8000,
-		.rate_max =	48000,
-	},
-	.ops = &msm_dai_q6_ops,
-	.probe = msm_dai_q6_dai_probe,
-	.remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_slimbus_tx_dai = {
-	.capture = {
-		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
-		SNDRV_PCM_RATE_16000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,
-		.channels_min = 1,
-		.channels_max = 2,
-		.rate_min =     8000,
-		.rate_max =	48000,
-	},
-	.ops = &msm_dai_q6_ops,
-	.probe = msm_dai_q6_dai_probe,
-	.remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_incall_record_dai = {
-	.capture = {
-		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
-		SNDRV_PCM_RATE_16000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,
-		.channels_min = 1,
-		.channels_max = 2,
-		.rate_min =     8000,
-		.rate_max =     48000,
-	},
-	.ops = &msm_dai_q6_ops,
-	.probe = msm_dai_q6_dai_probe,
-	.remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_bt_sco_rx_dai = {
-	.playback = {
-		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,
-		.channels_min = 1,
-		.channels_max = 1,
-		.rate_max = 16000,
-		.rate_min = 8000,
-	},
-	.ops = &msm_dai_q6_ops,
-	.probe = msm_dai_q6_dai_probe,
-	.remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_bt_sco_tx_dai = {
-	.playback = {
-		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,
-		.channels_min = 1,
-		.channels_max = 1,
-		.rate_max = 16000,
-		.rate_min = 8000,
-	},
-	.ops = &msm_dai_q6_ops,
-	.probe = msm_dai_q6_dai_probe,
-	.remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_fm_rx_dai = {
-	.playback = {
-		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
-		SNDRV_PCM_RATE_16000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,
-		.channels_min = 2,
-		.channels_max = 2,
-		.rate_max = 48000,
-		.rate_min = 8000,
-	},
-	.ops = &msm_dai_q6_ops,
-	.probe = msm_dai_q6_dai_probe,
-	.remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_fm_tx_dai = {
-	.playback = {
-		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
-		SNDRV_PCM_RATE_16000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,
-		.channels_min = 2,
-		.channels_max = 2,
-		.rate_max = 48000,
-		.rate_min = 8000,
-	},
-	.ops = &msm_dai_q6_ops,
-	.probe = msm_dai_q6_dai_probe,
-	.remove = msm_dai_q6_dai_remove,
-};
-
 static struct snd_soc_dai_driver msm_dai_q6_aux_pcm_rx_dai = {
 	.playback = {
 		.rates = SNDRV_PCM_RATE_8000,
@@ -1087,140 +325,217 @@
 	.remove = msm_dai_q6_dai_auxpcm_remove,
 };
 
-
-static struct snd_soc_dai_driver msm_dai_q6_slimbus_1_rx_dai = {
-	.playback = {
-		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,
-		.channels_min = 1,
-		.channels_max = 1,
-		.rate_min = 8000,
-		.rate_max = 16000,
-	},
-	.ops = &msm_dai_q6_ops,
-	.probe = msm_dai_q6_dai_probe,
-	.remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_slimbus_1_tx_dai = {
-	.capture = {
-		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,
-		.channels_min = 1,
-		.channels_max = 1,
-		.rate_min = 8000,
-		.rate_max = 16000,
-	},
-	.ops = &msm_dai_q6_ops,
-	.probe = msm_dai_q6_dai_probe,
-	.remove = msm_dai_q6_dai_remove,
-};
-
-static __devinit int msm_dai_q6_dev_probe(struct platform_device *pdev)
+static int msm_auxpcm_dev_probe(struct platform_device *pdev)
 {
+	int id;
+	void *plat_data;
 	int rc = 0;
 
+	if (pdev->dev.parent == NULL)
+		return -ENODEV;
+
+	plat_data = dev_get_drvdata(pdev->dev.parent);
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,msm-auxpcm-dev-id", &id);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: qcom,msm-auxpcm-dev-id missing in DT node\n",
+				__func__);
+		return rc;
+	}
+
+	pdev->id = id;
+	dev_set_name(&pdev->dev, "%s.%d", "msm-dai-q6", id);
 	dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev));
 
-	switch (pdev->id) {
-	case PRIMARY_I2S_RX:
-	case SECONDARY_I2S_RX:
-		rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_i2s_rx_dai);
-		break;
-	case PRIMARY_I2S_TX:
-		rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_i2s_tx_dai);
-		break;
+	dev_set_drvdata(&pdev->dev, plat_data);
+
+	switch (id) {
 	case AFE_PORT_ID_PRIMARY_PCM_RX:
 		rc = snd_soc_register_dai(&pdev->dev,
-				&msm_dai_q6_aux_pcm_rx_dai);
+					&msm_dai_q6_aux_pcm_rx_dai);
 		break;
 	case AFE_PORT_ID_PRIMARY_PCM_TX:
 		rc = snd_soc_register_dai(&pdev->dev,
-				&msm_dai_q6_aux_pcm_tx_dai);
-		break;
-	case MI2S_RX:
-		rc = snd_soc_register_dai(&pdev->dev,
-					&msm_dai_q6_i2s_rx_dai);
-		break;
-	case SLIMBUS_0_RX:
-		rc = snd_soc_register_dai(&pdev->dev,
-				&msm_dai_q6_slimbus_rx_dai);
-		break;
-	case SLIMBUS_0_TX:
-		rc = snd_soc_register_dai(&pdev->dev,
-				&msm_dai_q6_slimbus_tx_dai);
-		break;
-
-	case SLIMBUS_1_RX:
-		rc = snd_soc_register_dai(&pdev->dev,
-				&msm_dai_q6_slimbus_1_rx_dai);
-		break;
-	case SLIMBUS_1_TX:
-		rc = snd_soc_register_dai(&pdev->dev,
-				&msm_dai_q6_slimbus_1_tx_dai);
-		break;
-	case INT_BT_SCO_RX:
-		rc = snd_soc_register_dai(&pdev->dev,
-					&msm_dai_q6_bt_sco_rx_dai);
-		break;
-	case INT_BT_SCO_TX:
-		rc = snd_soc_register_dai(&pdev->dev,
-					&msm_dai_q6_bt_sco_tx_dai);
-		break;
-	case INT_FM_RX:
-		rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_fm_rx_dai);
-		break;
-	case INT_FM_TX:
-		rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_fm_tx_dai);
-		break;
-	case RT_PROXY_DAI_001_RX:
-	case RT_PROXY_DAI_002_RX:
-		rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_afe_rx_dai);
-		break;
-	case RT_PROXY_DAI_001_TX:
-	case RT_PROXY_DAI_002_TX:
-		rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_afe_tx_dai);
-		break;
-	case VOICE_PLAYBACK_TX:
-		rc = snd_soc_register_dai(&pdev->dev,
-					&msm_dai_q6_voice_playback_tx_dai);
-		break;
-	case VOICE_RECORD_RX:
-	case VOICE_RECORD_TX:
-		rc = snd_soc_register_dai(&pdev->dev,
-						&msm_dai_q6_incall_record_dai);
+					&msm_dai_q6_aux_pcm_tx_dai);
 		break;
 	default:
 		rc = -ENODEV;
 		break;
 	}
+
 	return rc;
 }
 
-static __devexit int msm_dai_q6_dev_remove(struct platform_device *pdev)
+static int msm_auxpcm_resource_probe(
+			struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_dai_auxpcm_pdata *auxpcm_pdata = NULL;
+	u32 property_val;
+
+	auxpcm_pdata = kzalloc(sizeof(struct msm_dai_auxpcm_pdata),
+				GFP_KERNEL);
+
+	if (!auxpcm_pdata) {
+		dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_string(pdev->dev.of_node,
+			"qcom,msm-cpudai-auxpcm-clk",
+			&auxpcm_pdata->clk);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-clk missing in DT node\n",
+			__func__);
+		goto fail_free_plat;
+	}
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,msm-cpudai-auxpcm-mode", &property_val);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-mode missing in DT node\n",
+			__func__);
+		goto fail_free_plat;
+	}
+	auxpcm_pdata->mode = (u16)property_val;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,msm-cpudai-auxpcm-sync", &property_val);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-sync missing in DT node\n",
+			__func__);
+		goto fail_free_plat;
+	}
+	auxpcm_pdata->sync = (u16)property_val;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,msm-cpudai-auxpcm-frame", &property_val);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-frame missing in DT node\n",
+			__func__);
+		goto fail_free_plat;
+	}
+	auxpcm_pdata->frame = (u16)property_val;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,msm-cpudai-auxpcm-quant", &property_val);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-quant missing in DT node\n",
+			__func__);
+		goto fail_free_plat;
+	}
+	auxpcm_pdata->quant = (u16)property_val;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,msm-cpudai-auxpcm-slot", &property_val);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-slot missing in DT node\n",
+			__func__);
+		goto fail_free_plat;
+	}
+	auxpcm_pdata->slot = (u16)property_val;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,msm-cpudai-auxpcm-data", &property_val);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-data missing in DT node\n",
+			__func__);
+		goto fail_free_plat;
+	}
+	auxpcm_pdata->data = (u16)property_val;
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,msm-cpudai-auxpcm-pcm-clk-rate",
+			&auxpcm_pdata->pcm_clk_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-pcm-clk-rate missing in DT node\n",
+			__func__);
+		goto fail_free_plat;
+	}
+	platform_set_drvdata(pdev, auxpcm_pdata);
+
+	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: failed to add child nodes, rc=%d\n",
+				__func__, rc);
+		goto fail_free_plat;
+	}
+
+	return rc;
+
+fail_free_plat:
+	kfree(auxpcm_pdata);
+	return rc;
+}
+
+static int msm_auxpcm_dev_remove(struct platform_device *pdev)
 {
 	snd_soc_unregister_dai(&pdev->dev);
 	return 0;
 }
 
-static struct platform_driver msm_dai_q6_driver = {
-	.probe  = msm_dai_q6_dev_probe,
-	.remove = msm_dai_q6_dev_remove,
+static int msm_auxpcm_resource_remove(
+				struct platform_device *pdev)
+{
+	void *auxpcm_pdata;
+
+	auxpcm_pdata = dev_get_drvdata(&pdev->dev);
+	kfree(auxpcm_pdata);
+
+	return 0;
+}
+
+static const struct of_device_id msm_auxpcm_resource_dt_match[] = {
+	{ .compatible = "qcom,msm-auxpcm-resource", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_auxpcm_resource_dt_match);
+
+static const struct of_device_id msm_auxpcm_dev_dt_match[] = {
+	{ .compatible = "qcom,msm-auxpcm-dev", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_auxpcm_dev_dt_match);
+
+
+static struct platform_driver msm_auxpcm_dev = {
+	.probe  = msm_auxpcm_dev_probe,
+	.remove = msm_auxpcm_dev_remove,
 	.driver = {
-		.name = "msm-dai-q6",
+		.name = "msm-auxpcm-dev",
 		.owner = THIS_MODULE,
+		.of_match_table = msm_auxpcm_dev_dt_match,
 	},
 };
 
+static struct platform_driver msm_auxpcm_resource = {
+	.probe  = msm_auxpcm_resource_probe,
+	.remove  = msm_auxpcm_resource_remove,
+	.driver = {
+		.name = "msm-auxpcm-resource",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_auxpcm_resource_dt_match,
+	},
+};
+
+
 static int __init msm_dai_q6_init(void)
 {
-	return platform_driver_register(&msm_dai_q6_driver);
+	int rc;
+
+	rc = platform_driver_register(&msm_auxpcm_dev);
+	if (rc)
+		goto fail;
+
+	rc = platform_driver_register(&msm_auxpcm_resource);
+
+	if (rc) {
+		pr_err("%s: fail to register cpu dai driver\n", __func__);
+		platform_driver_unregister(&msm_auxpcm_dev);
+	}
+fail:
+	return rc;
 }
 module_init(msm_dai_q6_init);
 
 static void __exit msm_dai_q6_exit(void)
 {
-	platform_driver_unregister(&msm_dai_q6_driver);
+	platform_driver_unregister(&msm_auxpcm_dev);
+	platform_driver_unregister(&msm_auxpcm_resource);
 }
 module_exit(msm_dai_q6_exit);
 
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
index 05ef2ce..1ac872d 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
@@ -27,6 +27,7 @@
 #include <asm/dma.h>
 #include <linux/dma-mapping.h>
 #include <linux/android_pmem.h>
+#include <linux/of_device.h>
 #include <sound/compress_params.h>
 #include <sound/compress_offload.h>
 #include <sound/compress_driver.h>
@@ -562,6 +563,9 @@
 
 static __devinit int msm_pcm_probe(struct platform_device *pdev)
 {
+	if (pdev->dev.of_node)
+		dev_set_name(&pdev->dev, "%s", "msm-pcm-lpa");
+
 	dev_info(&pdev->dev, "%s: dev name %s\n",
 			__func__, dev_name(&pdev->dev));
 	return snd_soc_register_platform(&pdev->dev,
@@ -574,10 +578,17 @@
 	return 0;
 }
 
+static const struct of_device_id msm_pcm_lpa_dt_match[] = {
+	{.compatible = "qcom,msm-pcm-lpa"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_pcm_lpa_dt_match);
+
 static struct platform_driver msm_pcm_driver = {
 	.driver = {
 		.name = "msm-pcm-lpa",
 		.owner = THIS_MODULE,
+		.of_match_table = msm_pcm_lpa_dt_match,
 	},
 	.probe = msm_pcm_probe,
 	.remove = __devexit_p(msm_pcm_remove),
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
index f94e6c1..c9f9593 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
@@ -28,6 +28,7 @@
 #include <asm/dma.h>
 #include <linux/dma-mapping.h>
 #include <linux/android_pmem.h>
+#include <linux/of_device.h>
 
 #include "msm-pcm-q6-v2.h"
 #include "msm-pcm-routing-v2.h"
@@ -163,8 +164,7 @@
 				break;
 			}
 			if (prtd->mmap_flag) {
-				pr_debug("%s:writing %d bytes"
-					" of buffer to dsp\n",
+				pr_debug("%s:writing %d bytes of buffer to dsp\n",
 					__func__,
 					prtd->pcm_count);
 				q6asm_write_nolock(prtd->audio_client,
@@ -172,8 +172,7 @@
 					0, 0, NO_TIMESTAMP);
 			} else {
 				while (atomic_read(&prtd->out_needed)) {
-					pr_debug("%s:writing %d bytes"
-						 " of buffer to dsp\n",
+					pr_debug("%s:writing %d bytes of buffer to dsp\n",
 						__func__,
 						prtd->pcm_count);
 					q6asm_write_nolock(prtd->audio_client,
@@ -626,17 +625,17 @@
 		dir = IN;
 	else
 		dir = OUT;
-pr_err("%s: before buf alloc\n", __func__);
+	pr_debug("%s: before buf alloc\n", __func__);
 	ret = q6asm_audio_client_buf_alloc_contiguous(dir,
 			prtd->audio_client,
 			runtime->hw.period_bytes_min,
 			runtime->hw.periods_max);
 	if (ret < 0) {
-		pr_err("Audio Start: Buffer Allocation failed "
-					"rc = %d\n", ret);
+		pr_err("Audio Start: Buffer Allocation failed rc = %d\n",
+							ret);
 		return -ENOMEM;
 	}
-pr_err("%s: after buf alloc\n", __func__);
+	pr_debug("%s: after buf alloc\n", __func__);
 	buf = prtd->audio_client->port[dir].buf;
 	if (buf == NULL || buf[0].data == NULL)
 		return -ENOMEM;
@@ -684,6 +683,9 @@
 
 static __devinit int msm_pcm_probe(struct platform_device *pdev)
 {
+	if (pdev->dev.of_node)
+		dev_set_name(&pdev->dev, "%s", "msm-pcm-dsp");
+
 	pr_info("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
 	return snd_soc_register_platform(&pdev->dev,
 				   &msm_soc_platform);
@@ -694,11 +696,17 @@
 	snd_soc_unregister_platform(&pdev->dev);
 	return 0;
 }
+static const struct of_device_id msm_pcm_dt_match[] = {
+	{.compatible = "qcom,msm-pcm-dsp"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_pcm_dt_match);
 
 static struct platform_driver msm_pcm_driver = {
 	.driver = {
 		.name = "msm-pcm-dsp",
 		.owner = THIS_MODULE,
+		.of_match_table = msm_pcm_dt_match,
 	},
 	.probe = msm_pcm_probe,
 	.remove = __devexit_p(msm_pcm_remove),
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 2eebae5..67ee8e4 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -18,6 +18,7 @@
 #include <linux/platform_device.h>
 #include <linux/bitops.h>
 #include <linux/mutex.h>
+#include <linux/of_device.h>
 #include <sound/core.h>
 #include <sound/soc.h>
 #include <sound/soc-dapm.h>
@@ -28,15 +29,17 @@
 #include <sound/q6asm-v2.h>
 #include <sound/q6afe-v2.h>
 #include <sound/tlv.h>
+
 #include "msm-pcm-routing-v2.h"
-#include "../qdsp6/q6voice.h"
+#include "q6voice.h"
 
 struct msm_pcm_routing_bdai_data {
 	u16 port_id; /* AFE port ID */
 	u8 active; /* track if this backend is enabled */
-	struct snd_pcm_hw_params *hw_params; /* to get freq and channel mode */
 	unsigned long fe_sessions; /* Front-end sessions */
 	unsigned long port_sessions; /* track Tx BE ports -> Rx BE */
+	unsigned int  sample_rate;
+	unsigned int  channel;
 };
 
 #define INVALID_SESSION -1
@@ -46,28 +49,26 @@
 static struct mutex routing_lock;
 
 static int fm_switch_enable;
+static int fm_pcmrx_switch_enable;
 
-#define INT_FM_RX_VOL_MAX_STEPS 100
-#define INT_FM_RX_VOL_GAIN 2000
-
-static int msm_route_fm_vol_control;
-static const DECLARE_TLV_DB_SCALE(fm_rx_vol_gain, 0,
-			INT_FM_RX_VOL_MAX_STEPS, 0);
-
-#define INT_RX_VOL_MAX_STEPS 100
+#define INT_RX_VOL_MAX_STEPS 0x2000
 #define INT_RX_VOL_GAIN 0x2000
 
+static int msm_route_fm_vol_control;
+static const DECLARE_TLV_DB_LINEAR(fm_rx_vol_gain, 0,
+			INT_RX_VOL_MAX_STEPS);
+
 static int msm_route_lpa_vol_control;
-static const DECLARE_TLV_DB_SCALE(lpa_rx_vol_gain, 0,
-			INT_RX_VOL_MAX_STEPS, 0);
+static const DECLARE_TLV_DB_LINEAR(lpa_rx_vol_gain, 0,
+			INT_RX_VOL_MAX_STEPS);
 
 static int msm_route_multimedia2_vol_control;
-static const DECLARE_TLV_DB_SCALE(multimedia2_rx_vol_gain, 0,
-			INT_RX_VOL_MAX_STEPS, 0);
+static const DECLARE_TLV_DB_LINEAR(multimedia2_rx_vol_gain, 0,
+			INT_RX_VOL_MAX_STEPS);
 
 static int msm_route_compressed_vol_control;
-static const DECLARE_TLV_DB_SCALE(compressed_rx_vol_gain, 0,
-			INT_RX_VOL_MAX_STEPS, 0);
+static const DECLARE_TLV_DB_LINEAR(compressed_rx_vol_gain, 0,
+			INT_RX_VOL_MAX_STEPS);
 
 
 
@@ -111,28 +112,35 @@
 /* This array is indexed by back-end DAI ID defined in msm-pcm-routing.h
  * If new back-end is defined, add new back-end DAI ID at the end of enum
  */
+#define SLIMBUS_EXTPROC_RX AFE_PORT_INVALID
 static struct msm_pcm_routing_bdai_data msm_bedais[MSM_BACKEND_DAI_MAX] = {
-	{ PRIMARY_I2S_RX, 0, NULL, 0, 0},
-	{ PRIMARY_I2S_TX, 0, NULL, 0, 0},
-	{ SLIMBUS_0_RX, 0, NULL, 0, 0},
-	{ SLIMBUS_0_TX, 0, NULL, 0, 0},
-	{ HDMI_RX, 0, NULL,  0, 0},
-	{ INT_BT_SCO_RX, 0, NULL, 0, 0},
-	{ INT_BT_SCO_TX, 0, NULL, 0, 0},
-	{ INT_FM_RX, 0, NULL, 0, 0},
-	{ INT_FM_TX, 0, NULL, 0, 0},
-	{ RT_PROXY_PORT_001_RX, 0, NULL, 0, 0},
-	{ RT_PROXY_PORT_001_TX, 0, NULL, 0, 0},
-	{ PCM_RX, 0, NULL, 0, 0},
-	{ PCM_TX, 0, NULL, 0, 0},
-	{ VOICE_PLAYBACK_TX, 0, NULL, 0, 0},
-	{ VOICE_RECORD_RX, 0, NULL, 0, 0},
-	{ VOICE_RECORD_TX, 0, NULL, 0, 0},
-	{ MI2S_RX, 0, NULL, 0, 0},
-	{ SECONDARY_I2S_RX, 0, NULL, 0, 0},
-	{ SLIMBUS_1_RX, 0, NULL, 0, 0},
-	{ SLIMBUS_1_TX, 0, NULL, 0, 0},
-	{ SLIMBUS_INVALID, 0, NULL, 0, 0},
+	{ PRIMARY_I2S_RX, 0, 0, 0, 0, 0},
+	{ PRIMARY_I2S_TX, 0, 0, 0, 0, 0},
+	{ SLIMBUS_0_RX, 0, 0, 0, 0, 0},
+	{ SLIMBUS_0_TX, 0, 0, 0, 0, 0},
+	{ HDMI_RX, 0, 0, 0, 0, 0},
+	{ INT_BT_SCO_RX, 0, 0, 0, 0, 0},
+	{ INT_BT_SCO_TX, 0, 0, 0, 0, 0},
+	{ INT_FM_RX, 0, 0, 0, 0, 0},
+	{ INT_FM_TX, 0, 0, 0, 0, 0},
+	{ RT_PROXY_PORT_001_RX, 0, 0, 0, 0, 0},
+	{ RT_PROXY_PORT_001_TX, 0, 0, 0, 0, 0},
+	{ PCM_RX, 0, 0, 0, 0, 0},
+	{ PCM_TX, 0, 0, 0, 0, 0},
+	{ VOICE_PLAYBACK_TX, 0, 0, 0, 0, 0},
+	{ VOICE_RECORD_RX, 0, 0, 0, 0, 0},
+	{ VOICE_RECORD_TX, 0, 0, 0, 0, 0},
+	{ MI2S_RX, 0, 0, 0, 0, 0},
+	{ MI2S_TX, 0, 0, 0, 0},
+	{ SECONDARY_I2S_RX, 0, 0, 0, 0, 0},
+	{ SLIMBUS_1_RX, 0, 0, 0, 0, 0},
+	{ SLIMBUS_1_TX, 0, 0, 0, 0, 0},
+	{ SLIMBUS_4_RX, 0, 0, 0, 0, 0},
+	{ SLIMBUS_4_TX, 0, 0, 0, 0, 0},
+	{ SLIMBUS_3_RX, 0, 0, 0, 0, 0},
+	{ SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0},
+	{ SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0},
+	{ SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0},
 };
 
 
@@ -148,6 +156,16 @@
 	{INVALID_SESSION, INVALID_SESSION},
 };
 
+static uint8_t is_be_dai_extproc(int be_dai)
+{
+	if (be_dai == MSM_BACKEND_DAI_EXTPROC_RX ||
+	   be_dai == MSM_BACKEND_DAI_EXTPROC_TX ||
+	   be_dai == MSM_BACKEND_DAI_EXTPROC_EC_TX)
+		return 1;
+	else
+		return 0;
+}
+
 static void msm_pcm_routing_build_matrix(int fedai_id, int dspst_id,
 	int path_type)
 {
@@ -159,10 +177,10 @@
 		MSM_AFE_PORT_TYPE_RX : MSM_AFE_PORT_TYPE_TX);
 
 	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
-		if ((afe_get_port_type(msm_bedais[i].port_id) ==
-			port_type) &&
-			msm_bedais[i].active && (test_bit(fedai_id,
-				&msm_bedais[i].fe_sessions)))
+		if (!is_be_dai_extproc(i) &&
+		   (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+		   (msm_bedais[i].active) &&
+		   (test_bit(fedai_id, &msm_bedais[i].fe_sessions)))
 			payload.copp_ids[payload.num_copps++] =
 					msm_bedais[i].port_id;
 	}
@@ -172,6 +190,44 @@
 			payload.num_copps, payload.copp_ids, 0);
 }
 
+void msm_pcm_routing_reg_psthr_stream(int fedai_id, int dspst_id,
+					int stream_type)
+{
+	int i, session_type, path_type, port_type;
+	u32 mode = 0;
+
+	if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+		/* bad ID assigned in machine driver */
+		pr_err("%s: bad MM ID\n", __func__);
+		return;
+	}
+
+	if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) {
+		session_type = SESSION_TYPE_RX;
+		path_type = ADM_PATH_PLAYBACK;
+		port_type = MSM_AFE_PORT_TYPE_RX;
+	} else {
+		session_type = SESSION_TYPE_TX;
+		path_type = ADM_PATH_LIVE_REC;
+		port_type = MSM_AFE_PORT_TYPE_TX;
+	}
+
+	mutex_lock(&routing_lock);
+
+	fe_dai_map[fedai_id][session_type] = dspst_id;
+	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+		if (!is_be_dai_extproc(i) &&
+		   (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+		   (msm_bedais[i].active) &&
+		   (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
+			mode = afe_get_port_type(msm_bedais[i].port_id);
+			/*adm_connect_afe_port needs to be called*/
+			break;
+		}
+	}
+	mutex_unlock(&routing_lock);
+}
+
 void msm_pcm_routing_reg_phy_stream(int fedai_id, int dspst_id, int stream_type)
 {
 	int i, session_type, path_type, port_type;
@@ -202,25 +258,25 @@
 	if (eq_data[fedai_id].enable)
 		msm_send_eq_values(fedai_id);
 	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
-		if ((afe_get_port_type(msm_bedais[i].port_id) ==
-			port_type) && msm_bedais[i].active &&
-			(test_bit(fedai_id,
-			&msm_bedais[i].fe_sessions))) {
+		if (!is_be_dai_extproc(i) &&
+		   (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+		   (msm_bedais[i].active) &&
+		   (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
 
-			channels = params_channels(msm_bedais[i].hw_params);
+			channels = msm_bedais[i].channel;
 
 			if ((stream_type == SNDRV_PCM_STREAM_PLAYBACK) &&
 				(channels > 2))
 				adm_multi_ch_copp_open(msm_bedais[i].port_id,
 				path_type,
-				params_rate(msm_bedais[i].hw_params),
-				channels,
+				msm_bedais[i].sample_rate,
+				msm_bedais[i].channel,
 				DEFAULT_COPP_TOPOLOGY);
 			else
 				adm_open(msm_bedais[i].port_id,
 				path_type,
-				params_rate(msm_bedais[i].hw_params),
-				params_channels(msm_bedais[i].hw_params),
+				msm_bedais[i].sample_rate,
+				msm_bedais[i].channel,
 				DEFAULT_COPP_TOPOLOGY);
 
 			payload.copp_ids[payload.num_copps++] =
@@ -255,10 +311,10 @@
 	mutex_lock(&routing_lock);
 
 	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
-		if ((afe_get_port_type(msm_bedais[i].port_id) ==
-			port_type) && msm_bedais[i].active &&
-			(test_bit(fedai_id,
-			&msm_bedais[i].fe_sessions)))
+		if (!is_be_dai_extproc(i) &&
+		   (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+		   (msm_bedais[i].active) &&
+		   (test_bit(fedai_id, &msm_bedais[i].fe_sessions)))
 			adm_close(msm_bedais[i].port_id);
 	}
 
@@ -309,29 +365,35 @@
 	mutex_lock(&routing_lock);
 
 	if (set) {
+		if (!test_bit(val, &msm_bedais[reg].fe_sessions) &&
+			(msm_bedais[reg].port_id == VOICE_PLAYBACK_TX))
+			voc_start_playback(set);
+
 		set_bit(val, &msm_bedais[reg].fe_sessions);
 		if (msm_bedais[reg].active && fe_dai_map[val][session_type] !=
 			INVALID_SESSION) {
 
-			channels = params_channels(msm_bedais[reg].hw_params);
+			channels = msm_bedais[reg].channel;
 
 			if ((session_type == SESSION_TYPE_RX) && (channels > 2))
 				adm_multi_ch_copp_open(msm_bedais[reg].port_id,
 				path_type,
-				params_rate(msm_bedais[reg].hw_params),
+				msm_bedais[reg].sample_rate,
 				channels,
 				DEFAULT_COPP_TOPOLOGY);
 			else
 				adm_open(msm_bedais[reg].port_id,
 				path_type,
-				params_rate(msm_bedais[reg].hw_params),
-				params_channels(msm_bedais[reg].hw_params),
+				msm_bedais[reg].sample_rate, channels,
 				DEFAULT_COPP_TOPOLOGY);
 
 			msm_pcm_routing_build_matrix(val,
 				fe_dai_map[val][session_type], path_type);
 		}
 	} else {
+		if (test_bit(val, &msm_bedais[reg].fe_sessions) &&
+			(msm_bedais[reg].port_id == VOICE_PLAYBACK_TX))
+			voc_start_playback(set);
 		clear_bit(val, &msm_bedais[reg].fe_sessions);
 		if (msm_bedais[reg].active && fe_dai_map[val][session_type] !=
 			INVALID_SESSION) {
@@ -340,6 +402,10 @@
 				fe_dai_map[val][session_type], path_type);
 		}
 	}
+	if ((msm_bedais[reg].port_id == VOICE_RECORD_RX)
+			|| (msm_bedais[reg].port_id == VOICE_RECORD_TX))
+		voc_start_record(msm_bedais[reg].port_id, set);
+
 	mutex_unlock(&routing_lock);
 }
 
@@ -354,7 +420,7 @@
 	else
 		ucontrol->value.integer.value[0] = 0;
 
-	pr_info("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+	pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
 	ucontrol->value.integer.value[0]);
 
 	return 0;
@@ -370,23 +436,68 @@
 
 
 	if (ucontrol->value.integer.value[0] &&
-	    msm_pcm_routing_route_is_set(mc->reg, mc->shift) == false) {
+	   msm_pcm_routing_route_is_set(mc->reg, mc->shift) == false) {
 		msm_pcm_routing_process_audio(mc->reg, mc->shift, 1);
 		snd_soc_dapm_mixer_update_power(widget, kcontrol, 1);
 	} else if (!ucontrol->value.integer.value[0] &&
-		   msm_pcm_routing_route_is_set(mc->reg, mc->shift) == true) {
+		  msm_pcm_routing_route_is_set(mc->reg, mc->shift) == true) {
 		msm_pcm_routing_process_audio(mc->reg, mc->shift, 0);
 		snd_soc_dapm_mixer_update_power(widget, kcontrol, 0);
 	}
-	pr_info("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
-					ucontrol->value.integer.value[0]);
 
 	return 1;
 }
 
 static void msm_pcm_routing_process_voice(u16 reg, u16 val, int set)
 {
-	return;
+	u16 session_id = 0;
+
+	pr_debug("%s: reg %x val %x set %x\n", __func__, reg, val, set);
+
+	if (val == MSM_FRONTEND_DAI_CS_VOICE)
+		session_id = voc_get_session_id(VOICE_SESSION_NAME);
+	else if (val == MSM_FRONTEND_DAI_VOLTE)
+		session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+	else
+		session_id = voc_get_session_id(VOIP_SESSION_NAME);
+
+	pr_debug("%s: FE DAI 0x%x session_id 0x%x\n",
+		__func__, val, session_id);
+
+	mutex_lock(&routing_lock);
+
+	if (set)
+		set_bit(val, &msm_bedais[reg].fe_sessions);
+	else
+		clear_bit(val, &msm_bedais[reg].fe_sessions);
+
+	mutex_unlock(&routing_lock);
+
+	if (afe_get_port_type(msm_bedais[reg].port_id) ==
+						MSM_AFE_PORT_TYPE_RX) {
+		voc_set_route_flag(session_id, RX_PATH, set);
+		if (set) {
+			voc_set_rxtx_port(session_id,
+				msm_bedais[reg].port_id, DEV_RX);
+
+			if (voc_get_route_flag(session_id, RX_PATH) &&
+			   voc_get_route_flag(session_id, TX_PATH))
+				voc_enable_cvp(session_id);
+		} else {
+			voc_disable_cvp(session_id);
+		}
+	} else {
+		voc_set_route_flag(session_id, TX_PATH, set);
+		if (set) {
+			voc_set_rxtx_port(session_id,
+				msm_bedais[reg].port_id, DEV_TX);
+			if (voc_get_route_flag(session_id, RX_PATH) &&
+			   voc_get_route_flag(session_id, TX_PATH))
+				voc_enable_cvp(session_id);
+		} else {
+			voc_disable_cvp(session_id);
+		}
+	}
 }
 
 static int msm_routing_get_voice_mixer(struct snd_kcontrol *kcontrol,
@@ -503,6 +614,31 @@
 	return 1;
 }
 
+static int msm_routing_get_fm_pcmrx_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = fm_pcmrx_switch_enable;
+	pr_debug("%s: FM Switch enable %ld\n", __func__,
+		ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_routing_put_fm_pcmrx_switch_mixer(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+
+	pr_debug("%s: FM Switch enable %ld\n", __func__,
+			ucontrol->value.integer.value[0]);
+	if (ucontrol->value.integer.value[0])
+		snd_soc_dapm_mixer_update_power(widget, kcontrol, 1);
+	else
+		snd_soc_dapm_mixer_update_power(widget, kcontrol, 0);
+	fm_pcmrx_switch_enable = ucontrol->value.integer.value[0];
+	return 1;
+}
+
 static int msm_routing_get_port_mixer(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_value *ucontrol)
 {
@@ -531,12 +667,12 @@
 
 	if (ucontrol->value.integer.value[0]) {
 		afe_loopback(1, msm_bedais[mc->reg].port_id,
-			     msm_bedais[mc->shift].port_id);
+			    msm_bedais[mc->shift].port_id);
 		set_bit(mc->shift,
 		&msm_bedais[mc->reg].port_sessions);
 	} else {
 		afe_loopback(0, msm_bedais[mc->reg].port_id,
-			     msm_bedais[mc->shift].port_id);
+			    msm_bedais[mc->shift].port_id);
 		clear_bit(mc->shift,
 		&msm_bedais[mc->reg].port_sessions);
 	}
@@ -576,7 +712,6 @@
 			ucontrol->value.integer.value[0];
 
 	return 0;
-
 }
 
 static int msm_routing_get_multimedia2_vol_mixer(struct snd_kcontrol *kcontrol,
@@ -590,9 +725,11 @@
 static int msm_routing_set_multimedia2_vol_mixer(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_value *ucontrol)
 {
+
 	if (!multi_ch_pcm_set_volume(ucontrol->value.integer.value[0]))
 		msm_route_multimedia2_vol_control =
 			ucontrol->value.integer.value[0];
+
 	return 0;
 }
 
@@ -610,6 +747,7 @@
 	if (!compressed_set_volume(ucontrol->value.integer.value[0]))
 		msm_route_compressed_vol_control =
 			ucontrol->value.integer.value[0];
+
 	return 0;
 }
 
@@ -621,7 +759,7 @@
 
 	if (ac == NULL) {
 		pr_err("%s: Could not get audio client for session: %d\n",
-		       __func__, fe_dai_map[eq_idx][SESSION_TYPE_RX]);
+		      __func__, fe_dai_map[eq_idx][SESSION_TYPE_RX]);
 		goto done;
 	}
 
@@ -629,7 +767,7 @@
 
 	if (result < 0)
 		pr_err("%s: Call to ASM equalizer failed, returned = %d\n",
-		       __func__, result);
+		      __func__, result);
 done:
 	return;
 }
@@ -827,6 +965,15 @@
 	msm_routing_put_audio_mixer),
 };
 
+static const struct snd_kcontrol_new slimbus_4_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SLIMBUS_4_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SLIMBUS_4_RX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
 static const struct snd_kcontrol_new int_bt_sco_rx_mixer_controls[] = {
 	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_INT_BT_SCO_RX,
 	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
@@ -891,6 +1038,9 @@
 	SOC_SINGLE_EXT("PRI_TX", MSM_BACKEND_DAI_PRI_I2S_TX,
 		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
 		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
 	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
 		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
 		msm_routing_put_audio_mixer),
@@ -912,12 +1062,18 @@
 	SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
 		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
 		msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SLIM_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+		msm_routing_put_audio_mixer),
 };
 
 static const struct snd_kcontrol_new mmul2_mixer_controls[] = {
 	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
 	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
 };
 
 static const struct snd_kcontrol_new pri_rx_voice_mixer_controls[] = {
@@ -927,6 +1083,9 @@
 	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_PRI_I2S_RX,
 	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_PRI_I2S_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
 };
 
 static const struct snd_kcontrol_new sec_i2s_rx_voice_mixer_controls[] = {
@@ -936,6 +1095,9 @@
 	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_SEC_I2S_RX,
 	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
 };
 
 static const struct snd_kcontrol_new slimbus_rx_voice_mixer_controls[] = {
@@ -945,6 +1107,9 @@
 	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
 	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
 };
 
 static const struct snd_kcontrol_new bt_sco_rx_voice_mixer_controls[] = {
@@ -957,6 +1122,21 @@
 	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_INT_BT_SCO_RX,
 	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
 	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new mi2s_rx_voice_mixer_controls[] = {
+	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
 };
 
 static const struct snd_kcontrol_new afe_pcm_rx_voice_mixer_controls[] = {
@@ -969,6 +1149,9 @@
 	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_AFE_PCM_RX,
 	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
 	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
 };
 
 static const struct snd_kcontrol_new aux_pcm_rx_voice_mixer_controls[] = {
@@ -981,6 +1164,9 @@
 	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_AUXPCM_RX,
 	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
 	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
 };
 
 static const struct snd_kcontrol_new hdmi_rx_voice_mixer_controls[] = {
@@ -990,10 +1176,16 @@
 	SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_HDMI_RX,
 	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
 };
 
 static const struct snd_kcontrol_new stub_rx_mixer_controls[] = {
-	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_INVALID,
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_EXTPROC_RX,
 	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
 	msm_routing_put_voice_stub_mixer),
 };
@@ -1004,10 +1196,19 @@
 	msm_routing_put_voice_stub_mixer),
 };
 
+static const struct snd_kcontrol_new slimbus_3_rx_mixer_controls[] = {
+	SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_SLIMBUS_3_RX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+};
+
 static const struct snd_kcontrol_new tx_voice_mixer_controls[] = {
 	SOC_SINGLE_EXT("PRI_TX_Voice", MSM_BACKEND_DAI_PRI_I2S_TX,
 	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("MI2S_TX_Voice", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
 	SOC_SINGLE_EXT("SLIM_0_TX_Voice", MSM_BACKEND_DAI_SLIMBUS_0_TX,
 	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
@@ -1022,10 +1223,31 @@
 	msm_routing_put_voice_mixer),
 };
 
+static const struct snd_kcontrol_new tx_volte_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_TX_VoLTE", MSM_BACKEND_DAI_PRI_I2S_TX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("SLIM_0_TX_VoLTE", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_VoLTE",
+	MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_VOLTE, 1, 0,
+	msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AFE_PCM_TX_VoLTE", MSM_BACKEND_DAI_AFE_PCM_TX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("AUX_PCM_TX_VoLTE", MSM_BACKEND_DAI_AUXPCM_TX,
+	MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
+};
+
 static const struct snd_kcontrol_new tx_voip_mixer_controls[] = {
 	SOC_SINGLE_EXT("PRI_TX_Voip", MSM_BACKEND_DAI_PRI_I2S_TX,
 	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
+	SOC_SINGLE_EXT("MI2S_TX_Voip", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+	msm_routing_put_voice_mixer),
 	SOC_SINGLE_EXT("SLIM_0_TX_Voip", MSM_BACKEND_DAI_SLIMBUS_0_TX,
 	MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
 	msm_routing_put_voice_mixer),
@@ -1041,7 +1263,7 @@
 };
 
 static const struct snd_kcontrol_new tx_voice_stub_mixer_controls[] = {
-	SOC_SINGLE_EXT("STUB_TX_HL", MSM_BACKEND_DAI_INVALID,
+	SOC_SINGLE_EXT("STUB_TX_HL", MSM_BACKEND_DAI_EXTPROC_TX,
 	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
 	msm_routing_put_voice_stub_mixer),
 	SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
@@ -1050,6 +1272,12 @@
 	SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
 	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
 	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("STUB_1_TX_HL", MSM_BACKEND_DAI_EXTPROC_EC_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+	MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+	msm_routing_put_voice_stub_mixer),
 };
 
 static const struct snd_kcontrol_new sbus_0_rx_port_mixer_controls[] = {
@@ -1062,6 +1290,9 @@
 	SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
 	MSM_BACKEND_DAI_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
 	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+	MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
 };
 
 static const struct snd_kcontrol_new auxpcm_rx_port_mixer_controls[] = {
@@ -1079,20 +1310,58 @@
 	msm_routing_put_port_mixer),
 };
 
+static const struct snd_kcontrol_new sbus_3_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("INTERNAL_BT_SCO_RX", MSM_BACKEND_DAI_SLIMBUS_3_RX,
+	MSM_BACKEND_DAI_INT_BT_SCO_RX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_3_RX,
+	MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
 static const struct snd_kcontrol_new bt_sco_rx_port_mixer_controls[] = {
 	SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_INT_BT_SCO_RX,
 	MSM_BACKEND_DAI_SLIMBUS_1_TX, 1, 0, msm_routing_get_port_mixer,
 	msm_routing_put_port_mixer),
 };
 
+static const struct snd_kcontrol_new afe_pcm_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_AFE_PCM_RX,
+	MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+
+static const struct snd_kcontrol_new hdmi_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_HDMI_RX,
+	MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new sec_i2s_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_SEC_I2S_RX,
+	MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new mi2s_rx_port_mixer_controls[] = {
+	SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_1_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
+};
+
 static const struct snd_kcontrol_new fm_switch_mixer_controls =
 	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
 	0, 1, 0, msm_routing_get_switch_mixer,
 	msm_routing_put_switch_mixer);
 
+static const struct snd_kcontrol_new pcm_rx_switch_mixer_controls =
+	SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+	0, 1, 0, msm_routing_get_fm_pcmrx_switch_mixer,
+	msm_routing_put_fm_pcmrx_switch_mixer);
+
 static const struct snd_kcontrol_new int_fm_vol_mixer_controls[] = {
 	SOC_SINGLE_EXT_TLV("Internal FM RX Volume", SND_SOC_NOPM, 0,
-	INT_FM_RX_VOL_GAIN, 0, msm_routing_get_fm_vol_mixer,
+	INT_RX_VOL_GAIN, 0, msm_routing_get_fm_vol_mixer,
 	msm_routing_set_fm_vol_mixer, fm_rx_vol_gain),
 };
 
@@ -1113,7 +1382,6 @@
 	INT_RX_VOL_GAIN, 0, msm_routing_get_compressed_vol_mixer,
 	msm_routing_set_compressed_vol_mixer, compressed_rx_vol_gain),
 };
-
 static const struct snd_kcontrol_new eq_enable_mixer_controls[] = {
 	SOC_SINGLE_EXT("MultiMedia1 EQ Enable", SND_SOC_NOPM,
 	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_eq_enable_mixer,
@@ -1302,6 +1570,8 @@
 	SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("CS-VOICE_DL1", "CS-VOICE Playback", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("CS-VOICE_UL1", "CS-VOICE Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("VoLTE_DL", "VoLTE Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("VoLTE_UL", "VoLTE Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("VOIP_UL", "VoIP Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("SLIM0_DL_HL", "SLIMBUS0_HOSTLESS Playback",
 		0, 0, 0, 0),
@@ -1312,14 +1582,18 @@
 	SND_SOC_DAPM_AIF_OUT("INTFM_UL_HL", "INT_FM_HOSTLESS Capture",
 		0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("HDMI_DL_HL", "HDMI_HOSTLESS Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SEC_I2S_DL_HL", "SEC_I2S_RX_HOSTLESS Playback",
+		0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("AUXPCM_DL_HL", "AUXPCM_HOSTLESS Playback",
 		0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("AUXPCM_UL_HL", "AUXPCM_HOSTLESS Capture",
 		0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MI2S_UL_HL", "MI2S_TX_HOSTLESS Capture",
+		0, 0, 0, 0),
 
 	/* Backend AIF */
 	/* Stream name equals to backend dai link stream name
-	 */
+	*/
 	SND_SOC_DAPM_AIF_OUT("PRI_I2S_RX", "Primary I2S Playback", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("SEC_I2S_RX", "Secondary I2S Playback",
 				0, 0, 0 , 0),
@@ -1327,6 +1601,7 @@
 	SND_SOC_DAPM_AIF_OUT("HDMI", "HDMI Playback", 0, 0, 0 , 0),
 	SND_SOC_DAPM_AIF_OUT("MI2S_RX", "MI2S Playback", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("PRI_I2S_TX", "Primary I2S Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MI2S_TX", "MI2S Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("SLIMBUS_0_TX", "Slimbus Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("INT_BT_SCO_RX", "Internal BT-SCO Playback",
 				0, 0, 0 , 0),
@@ -1343,10 +1618,15 @@
 	/* incall */
 	SND_SOC_DAPM_AIF_OUT("VOICE_PLAYBACK_TX", "Voice Farend Playback",
 				0, 0, 0 , 0),
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_RX", "Slimbus4 Playback",
+				0, 0, 0 , 0),
 	SND_SOC_DAPM_AIF_IN("INCALL_RECORD_TX", "Voice Uplink Capture",
 				0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("INCALL_RECORD_RX", "Voice Downlink Capture",
 				0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_4_TX", "Slimbus4 Capture",
+				0, 0, 0, 0),
+
 	SND_SOC_DAPM_AIF_OUT("AUX_PCM_RX", "AUX PCM Playback", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("AUX_PCM_TX", "AUX PCM Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("VOICE_STUB_DL", "VOICE_STUB Playback", 0, 0, 0, 0),
@@ -1355,10 +1635,14 @@
 	SND_SOC_DAPM_AIF_IN("STUB_TX", "Stub Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_RX", "Slimbus1 Playback", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("SLIMBUS_1_TX", "Slimbus1 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("STUB_1_TX", "Stub1 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_RX", "Slimbus3 Playback", 0, 0, 0, 0),
 
 	/* Switch Definitions */
 	SND_SOC_DAPM_SWITCH("SLIMBUS_DL_HL", SND_SOC_NOPM, 0, 0,
 				&fm_switch_mixer_controls),
+	SND_SOC_DAPM_SWITCH("PCM_RX_DL_HL", SND_SOC_NOPM, 0, 0,
+				&pcm_rx_switch_mixer_controls),
 	/* Mixer definitions */
 	SND_SOC_DAPM_MIXER("PRI_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
 	pri_i2s_rx_mixer_controls, ARRAY_SIZE(pri_i2s_rx_mixer_controls)),
@@ -1380,6 +1664,9 @@
 	SND_SOC_DAPM_MIXER("Incall_Music Audio Mixer", SND_SOC_NOPM, 0, 0,
 			incall_music_delivery_mixer_controls,
 			ARRAY_SIZE(incall_music_delivery_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIMBUS_4_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+			slimbus_4_rx_mixer_controls,
+			ARRAY_SIZE(slimbus_4_rx_mixer_controls)),
 	/* Voice Mixer */
 	SND_SOC_DAPM_MIXER("PRI_RX_Voice Mixer",
 				SND_SOC_NOPM, 0, 0, pri_rx_voice_mixer_controls,
@@ -1408,12 +1695,19 @@
 				SND_SOC_NOPM, 0, 0,
 				hdmi_rx_voice_mixer_controls,
 				ARRAY_SIZE(hdmi_rx_voice_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MI2S_RX_Voice Mixer",
+				SND_SOC_NOPM, 0, 0,
+				mi2s_rx_voice_mixer_controls,
+				ARRAY_SIZE(mi2s_rx_voice_mixer_controls)),
 	SND_SOC_DAPM_MIXER("Voice_Tx Mixer",
 				SND_SOC_NOPM, 0, 0, tx_voice_mixer_controls,
 				ARRAY_SIZE(tx_voice_mixer_controls)),
 	SND_SOC_DAPM_MIXER("Voip_Tx Mixer",
 				SND_SOC_NOPM, 0, 0, tx_voip_mixer_controls,
 				ARRAY_SIZE(tx_voip_mixer_controls)),
+	SND_SOC_DAPM_MIXER("VoLTE_Tx Mixer",
+				SND_SOC_NOPM, 0, 0, tx_volte_mixer_controls,
+				ARRAY_SIZE(tx_volte_mixer_controls)),
 	SND_SOC_DAPM_MIXER("INTERNAL_BT_SCO_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
 	int_bt_sco_rx_mixer_controls, ARRAY_SIZE(int_bt_sco_rx_mixer_controls)),
 	SND_SOC_DAPM_MIXER("INTERNAL_FM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
@@ -1426,6 +1720,8 @@
 	stub_rx_mixer_controls, ARRAY_SIZE(stub_rx_mixer_controls)),
 	SND_SOC_DAPM_MIXER("SLIMBUS_1_RX Mixer", SND_SOC_NOPM, 0, 0,
 	slimbus_1_rx_mixer_controls, ARRAY_SIZE(slimbus_1_rx_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIMBUS_3_RX_Voice Mixer", SND_SOC_NOPM, 0, 0,
+	slimbus_3_rx_mixer_controls, ARRAY_SIZE(slimbus_3_rx_mixer_controls)),
 	SND_SOC_DAPM_MIXER("SLIMBUS_0_RX Port Mixer",
 	SND_SOC_NOPM, 0, 0, sbus_0_rx_port_mixer_controls,
 	ARRAY_SIZE(sbus_0_rx_port_mixer_controls)),
@@ -1438,6 +1734,25 @@
 	SND_SOC_DAPM_MIXER("INTERNAL_BT_SCO_RX Port Mixer", SND_SOC_NOPM, 0, 0,
 	bt_sco_rx_port_mixer_controls,
 	ARRAY_SIZE(bt_sco_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("AFE_PCM_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, afe_pcm_rx_port_mixer_controls,
+	ARRAY_SIZE(afe_pcm_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("HDMI_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, hdmi_rx_port_mixer_controls,
+	ARRAY_SIZE(hdmi_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SEC_I2S_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, sec_i2s_rx_port_mixer_controls,
+	ARRAY_SIZE(sec_i2s_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("SLIMBUS_3_RX Port Mixer",
+	SND_SOC_NOPM, 0, 0, sbus_3_rx_port_mixer_controls,
+	ARRAY_SIZE(sbus_3_rx_port_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0,
+	mi2s_rx_port_mixer_controls, ARRAY_SIZE(mi2s_rx_port_mixer_controls)),
+
+	/* Virtual Pins to force backends ON atm */
+	SND_SOC_DAPM_OUTPUT("BE_OUT"),
+	SND_SOC_DAPM_INPUT("BE_IN"),
+
 };
 
 static const struct snd_soc_dapm_route intercon[] = {
@@ -1469,9 +1784,13 @@
 	{"Incall_Music Audio Mixer", "MultiMedia1", "MM_DL1"},
 	{"Incall_Music Audio Mixer", "MultiMedia2", "MM_DL2"},
 	{"VOICE_PLAYBACK_TX", NULL, "Incall_Music Audio Mixer"},
+	{"SLIMBUS_4_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"SLIMBUS_4_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"SLIMBUS_4_RX", NULL, "SLIMBUS_4_RX Audio Mixer"},
 
 	{"MultiMedia1 Mixer", "VOC_REC_UL", "INCALL_RECORD_TX"},
 	{"MultiMedia1 Mixer", "VOC_REC_DL", "INCALL_RECORD_RX"},
+	{"MultiMedia1 Mixer", "SLIM_4_TX", "SLIMBUS_4_TX"},
 	{"MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
 	{"MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
 	{"MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
@@ -1479,6 +1798,8 @@
 	{"MI2S_RX", NULL, "MI2S_RX Audio Mixer"},
 
 	{"MultiMedia1 Mixer", "PRI_TX", "PRI_I2S_TX"},
+	{"MultiMedia1 Mixer", "MI2S_TX", "MI2S_TX"},
+	{"MultiMedia2 Mixer", "MI2S_TX", "MI2S_TX"},
 	{"MultiMedia1 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
 	{"MultiMedia1 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
 
@@ -1515,41 +1836,56 @@
 	{"AUX_PCM_RX", NULL, "AUX_PCM_RX Audio Mixer"},
 
 	{"PRI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"PRI_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
 	{"PRI_RX_Voice Mixer", "Voip", "VOIP_DL"},
 	{"PRI_I2S_RX", NULL, "PRI_RX_Voice Mixer"},
 
 	{"SEC_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"SEC_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
 	{"SEC_RX_Voice Mixer", "Voip", "VOIP_DL"},
 	{"SEC_I2S_RX", NULL, "SEC_RX_Voice Mixer"},
 
 	{"SLIM_0_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"SLIM_0_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
 	{"SLIM_0_RX_Voice Mixer", "Voip", "VOIP_DL"},
 	{"SLIMBUS_0_RX", NULL, "SLIM_0_RX_Voice Mixer"},
 
 	{"INTERNAL_BT_SCO_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"INTERNAL_BT_SCO_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
 	{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voip", "VOIP_DL"},
 	{"INT_BT_SCO_RX", NULL, "INTERNAL_BT_SCO_RX_Voice Mixer"},
 
 	{"AFE_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"AFE_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
 	{"AFE_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
 	{"PCM_RX", NULL, "AFE_PCM_RX_Voice Mixer"},
 
 	{"AUX_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"AUX_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
 	{"AUX_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
 	{"AUX_PCM_RX", NULL, "AUX_PCM_RX_Voice Mixer"},
 
 	{"HDMI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+	{"HDMI_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
 	{"HDMI_RX_Voice Mixer", "Voip", "VOIP_DL"},
 	{"HDMI", NULL, "HDMI_RX_Voice Mixer"},
 	{"HDMI", NULL, "HDMI_DL_HL"},
 
 	{"Voice_Tx Mixer", "PRI_TX_Voice", "PRI_I2S_TX"},
+	{"Voice_Tx Mixer", "MI2S_TX_Voice", "MI2S_TX"},
 	{"Voice_Tx Mixer", "SLIM_0_TX_Voice", "SLIMBUS_0_TX"},
 	{"Voice_Tx Mixer", "INTERNAL_BT_SCO_TX_Voice", "INT_BT_SCO_TX"},
 	{"Voice_Tx Mixer", "AFE_PCM_TX_Voice", "PCM_TX"},
 	{"Voice_Tx Mixer", "AUX_PCM_TX_Voice", "AUX_PCM_TX"},
 	{"CS-VOICE_UL1", NULL, "Voice_Tx Mixer"},
+	{"VoLTE_Tx Mixer", "PRI_TX_VoLTE", "PRI_I2S_TX"},
+	{"VoLTE_Tx Mixer", "SLIM_0_TX_VoLTE", "SLIMBUS_0_TX"},
+	{"VoLTE_Tx Mixer", "INTERNAL_BT_SCO_TX_VoLTE", "INT_BT_SCO_TX"},
+	{"VoLTE_Tx Mixer", "AFE_PCM_TX_VoLTE", "PCM_TX"},
+	{"VoLTE_Tx Mixer", "AUX_PCM_TX_VoLTE", "AUX_PCM_TX"},
+	{"VoLTE_UL", NULL, "VoLTE_Tx Mixer"},
 	{"Voip_Tx Mixer", "PRI_TX_Voip", "PRI_I2S_TX"},
+	{"Voip_Tx Mixer", "MI2S_TX_Voip", "MI2S_TX"},
 	{"Voip_Tx Mixer", "SLIM_0_TX_Voip", "SLIMBUS_0_TX"},
 	{"Voip_Tx Mixer", "INTERNAL_BT_SCO_TX_Voip", "INT_BT_SCO_TX"},
 	{"Voip_Tx Mixer", "AFE_PCM_TX_Voip", "PCM_TX"},
@@ -1563,10 +1899,17 @@
 	{"INTFM_UL_HL", NULL, "INT_FM_TX"},
 	{"AUX_PCM_RX", NULL, "AUXPCM_DL_HL"},
 	{"AUXPCM_UL_HL", NULL, "AUX_PCM_TX"},
+	{"PCM_RX_DL_HL", "Switch", "SLIM0_DL_HL"},
+	{"PCM_RX", NULL, "PCM_RX_DL_HL"},
+	{"MI2S_UL_HL", NULL, "MI2S_TX"},
+	{"SEC_I2S_RX", NULL, "SEC_I2S_DL_HL"},
 	{"SLIMBUS_0_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
 	{"SLIMBUS_0_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
 	{"SLIMBUS_0_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"SLIMBUS_0_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
 	{"SLIMBUS_0_RX", NULL, "SLIMBUS_0_RX Port Mixer"},
+	{"AFE_PCM_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"PCM_RX", NULL, "AFE_PCM_RX Port Mixer"},
 
 	{"AUXPCM_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
 	{"AUXPCM_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
@@ -1575,6 +1918,8 @@
 	{"Voice Stub Tx Mixer", "STUB_TX_HL", "STUB_TX"},
 	{"Voice Stub Tx Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
 	{"Voice Stub Tx Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"Voice Stub Tx Mixer", "STUB_1_TX_HL", "STUB_1_TX"},
+	{"Voice Stub Tx Mixer", "MI2S_TX", "MI2S_TX"},
 	{"VOICE_STUB_UL", NULL, "Voice Stub Tx Mixer"},
 
 	{"STUB_RX Mixer", "Voice Stub", "VOICE_STUB_DL"},
@@ -1582,11 +1927,48 @@
 	{"SLIMBUS_1_RX Mixer", "Voice Stub", "VOICE_STUB_DL"},
 	{"SLIMBUS_1_RX", NULL, "SLIMBUS_1_RX Mixer"},
 	{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"MI2S_RX", NULL, "MI2S_RX_Voice Mixer"},
+
+	{"SLIMBUS_3_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+	{"SLIMBUS_3_RX", NULL, "SLIMBUS_3_RX_Voice Mixer"},
 
 	{"SLIMBUS_1_RX Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
 	{"SLIMBUS_1_RX", NULL, "SLIMBUS_1_RX Port Mixer"},
 	{"INTERNAL_BT_SCO_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
 	{"INT_BT_SCO_RX", NULL, "INTERNAL_BT_SCO_RX Port Mixer"},
+	{"SLIMBUS_3_RX Port Mixer", "INTERNAL_BT_SCO_RX", "INT_BT_SCO_RX"},
+	{"SLIMBUS_3_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
+	{"SLIMBUS_3_RX", NULL, "SLIMBUS_3_RX Port Mixer"},
+
+
+	{"HDMI_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
+	{"HDMI", NULL, "HDMI_RX Port Mixer"},
+
+	{"SEC_I2S_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
+	{"SEC_I2S_RX", NULL, "SEC_I2S_RX Port Mixer"},
+
+	{"MI2S_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
+	{"MI2S_RX", NULL, "MI2S_RX Port Mixer"},
+	/* Backend Enablement */
+
+	{"BE_OUT", NULL, "PRI_I2S_RX"},
+	{"BE_OUT", NULL, "SEC_I2S_RX"},
+	{"BE_OUT", NULL, "SLIMBUS_0_RX"},
+	{"BE_OUT", NULL, "HDMI"},
+	{"BE_OUT", NULL, "MI2S_RX"},
+	{"PRI_I2S_TX", NULL, "BE_IN"},
+	{"MI2S_TX", NULL, "BE_IN"},
+	{"SLIMBUS_0_TX", NULL, "BE_IN" },
+	{"BE_OUT", NULL, "INT_BT_SCO_RX"},
+	{"INT_BT_SCO_TX", NULL, "BE_IN"},
+	{"BE_OUT", NULL, "INT_FM_RX"},
+	{"INT_FM_TX", NULL, "BE_IN"},
+	{"BE_OUT", NULL, "PCM_RX"},
+	{"PCM_TX", NULL, "BE_IN"},
+	{"BE_OUT", NULL, "SLIMBUS_3_RX"},
+	{"BE_OUT", NULL, "AUX_PCM_RX"},
+	{"AUX_PCM_TX", NULL, "BE_IN"},
 };
 
 static int msm_pcm_routing_hw_params(struct snd_pcm_substream *substream,
@@ -1601,7 +1983,8 @@
 	}
 
 	mutex_lock(&routing_lock);
-	msm_bedais[be_id].hw_params = params;
+	msm_bedais[be_id].sample_rate = params_rate(params);
+	msm_bedais[be_id].channel = params_channels(params);
 	mutex_unlock(&routing_lock);
 	return 0;
 }
@@ -1619,7 +2002,6 @@
 	}
 
 	bedai = &msm_bedais[be_id];
-
 	session_type = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
 		0 : 1);
 
@@ -1631,8 +2013,8 @@
 	}
 
 	bedai->active = 0;
-	bedai->hw_params = NULL;
-
+	bedai->sample_rate = 0;
+	bedai->channel = 0;
 	mutex_unlock(&routing_lock);
 
 	return 0;
@@ -1651,15 +2033,8 @@
 		return -EINVAL;
 	}
 
-
 	bedai = &msm_bedais[be_id];
 
-	if (bedai->hw_params == NULL) {
-		pr_err("%s: HW param is not configured", __func__);
-		return -EINVAL;
-	}
-
-
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
 		path_type = ADM_PATH_PLAYBACK;
 		session_type = SESSION_TYPE_RX;
@@ -1679,23 +2054,23 @@
 	 * is started.
 	 */
 	bedai->active = 1;
-
 	for_each_set_bit(i, &bedai->fe_sessions, MSM_FRONTEND_DAI_MM_SIZE) {
 		if (fe_dai_map[i][session_type] != INVALID_SESSION) {
 
-			channels = params_channels(bedai->hw_params);
-			if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) &&
-				(channels > 2))
+			channels = bedai->channel;
+			if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK ||
+				substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+				&& (channels > 2))
 				adm_multi_ch_copp_open(bedai->port_id,
 				path_type,
-				params_rate(bedai->hw_params),
+				bedai->sample_rate,
 				channels,
 				DEFAULT_COPP_TOPOLOGY);
 			else
 				adm_open(bedai->port_id,
 				path_type,
-				params_rate(bedai->hw_params),
-				params_channels(bedai->hw_params),
+				bedai->sample_rate,
+				channels,
 				DEFAULT_COPP_TOPOLOGY);
 
 			msm_pcm_routing_build_matrix(i,
@@ -1716,7 +2091,7 @@
 };
 
 static unsigned int msm_routing_read(struct snd_soc_platform *platform,
-				 unsigned int reg)
+				unsigned int reg)
 {
 	dev_dbg(platform->dev, "reg %x\n", reg);
 	return 0;
@@ -1734,7 +2109,7 @@
 static int msm_routing_probe(struct snd_soc_platform *platform)
 {
 	snd_soc_dapm_new_controls(&platform->dapm, msm_qdsp6_widgets,
-			    ARRAY_SIZE(msm_qdsp6_widgets));
+			   ARRAY_SIZE(msm_qdsp6_widgets));
 	snd_soc_dapm_add_routes(&platform->dapm, intercon,
 		ARRAY_SIZE(intercon));
 
@@ -1767,7 +2142,6 @@
 	snd_soc_add_platform_controls(platform,
 				compressed_vol_mixer_controls,
 			ARRAY_SIZE(compressed_vol_mixer_controls));
-
 	return 0;
 }
 
@@ -1780,9 +2154,12 @@
 
 static __devinit int msm_routing_pcm_probe(struct platform_device *pdev)
 {
+	if (pdev->dev.of_node)
+		dev_set_name(&pdev->dev, "%s", "msm-pcm-routing");
+
 	dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev));
 	return snd_soc_register_platform(&pdev->dev,
-				   &msm_soc_routing_platform);
+				  &msm_soc_routing_platform);
 }
 
 static int msm_routing_pcm_remove(struct platform_device *pdev)
@@ -1791,10 +2168,17 @@
 	return 0;
 }
 
+static const struct of_device_id msm_pcm_routing_dt_match[] = {
+	{.compatible = "qcom,msm-pcm-routing"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_pcm_routing_dt_match);
+
 static struct platform_driver msm_routing_pcm_driver = {
 	.driver = {
 		.name = "msm-pcm-routing",
 		.owner = THIS_MODULE,
+		.of_match_table = msm_pcm_routing_dt_match,
 	},
 	.probe = msm_routing_pcm_probe,
 	.remove = __devexit_p(msm_routing_pcm_remove),
@@ -1809,10 +2193,8 @@
 		return 0;
 	}
 	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
-		if ((test_bit(fedai_id,
-			&msm_bedais[i].fe_sessions))) {
+		if (test_bit(fedai_id, &msm_bedais[i].fe_sessions))
 			return msm_bedais[i].active;
-		}
 	}
 	return 0;
 }
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index b971787..32e18d8 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -13,29 +13,34 @@
 #define _MSM_PCM_ROUTING_H
 #include <sound/apr_audio-v2.h>
 
-#define LPASS_BE_PRI_I2S_RX "(Backend) PRIMARY_I2S_RX"
-#define LPASS_BE_PRI_I2S_TX "(Backend) PRIMARY_I2S_TX"
-#define LPASS_BE_SLIMBUS_0_RX "(Backend) SLIMBUS_0_RX"
-#define LPASS_BE_SLIMBUS_0_TX "(Backend) SLIMBUS_0_TX"
-#define LPASS_BE_HDMI "(Backend) HDMI"
-#define LPASS_BE_INT_BT_SCO_RX "(Backend) INT_BT_SCO_RX"
-#define LPASS_BE_INT_BT_SCO_TX "(Backend) INT_BT_SCO_TX"
-#define LPASS_BE_INT_FM_RX "(Backend) INT_FM_RX"
-#define LPASS_BE_INT_FM_TX "(Backend) INT_FM_TX"
-#define LPASS_BE_AFE_PCM_RX "(Backend) RT_PROXY_DAI_001_RX"
-#define LPASS_BE_AFE_PCM_TX "(Backend) RT_PROXY_DAI_002_TX"
-#define LPASS_BE_AUXPCM_RX "(Backend) AUX_PCM_RX"
-#define LPASS_BE_AUXPCM_TX "(Backend) AUX_PCM_TX"
-#define LPASS_BE_VOICE_PLAYBACK_TX "(Backend) VOICE_PLAYBACK_TX"
-#define LPASS_BE_INCALL_RECORD_RX "(Backend) INCALL_RECORD_TX"
-#define LPASS_BE_INCALL_RECORD_TX "(Backend) INCALL_RECORD_RX"
-#define LPASS_BE_SEC_I2S_RX "(Backend) SECONDARY_I2S_RX"
+#define LPASS_BE_PRI_I2S_RX "PRIMARY_I2S_RX"
+#define LPASS_BE_PRI_I2S_TX "PRIMARY_I2S_TX"
+#define LPASS_BE_SLIMBUS_0_RX "SLIMBUS_0_RX"
+#define LPASS_BE_SLIMBUS_0_TX "SLIMBUS_0_TX"
+#define LPASS_BE_HDMI "HDMI"
+#define LPASS_BE_INT_BT_SCO_RX "INT_BT_SCO_RX"
+#define LPASS_BE_INT_BT_SCO_TX "INT_BT_SCO_TX"
+#define LPASS_BE_INT_FM_RX "INT_FM_RX"
+#define LPASS_BE_INT_FM_TX "INT_FM_TX"
+#define LPASS_BE_AFE_PCM_RX "RT_PROXY_DAI_001_RX"
+#define LPASS_BE_AFE_PCM_TX "RT_PROXY_DAI_002_TX"
+#define LPASS_BE_AUXPCM_RX "AUX_PCM_RX"
+#define LPASS_BE_AUXPCM_TX "AUX_PCM_TX"
+#define LPASS_BE_VOICE_PLAYBACK_TX "VOICE_PLAYBACK_TX"
+#define LPASS_BE_INCALL_RECORD_RX "INCALL_RECORD_TX"
+#define LPASS_BE_INCALL_RECORD_TX "INCALL_RECORD_RX"
+#define LPASS_BE_SEC_I2S_RX "SECONDARY_I2S_RX"
 
-#define LPASS_BE_MI2S_RX "(Backend) MI2S_RX"
-#define LPASS_BE_STUB_RX "(Backend) STUB_RX"
-#define LPASS_BE_STUB_TX "(Backend) STUB_TX"
-#define LPASS_BE_SLIMBUS_1_RX "(Backend) SLIMBUS_1_RX"
-#define LPASS_BE_SLIMBUS_1_TX "(Backend) SLIMBUS_1_TX"
+#define LPASS_BE_MI2S_RX "MI2S_RX"
+#define LPASS_BE_MI2S_TX "MI2S_TX"
+#define LPASS_BE_STUB_RX "STUB_RX"
+#define LPASS_BE_STUB_TX "STUB_TX"
+#define LPASS_BE_SLIMBUS_1_RX "SLIMBUS_1_RX"
+#define LPASS_BE_SLIMBUS_1_TX "SLIMBUS_1_TX"
+#define LPASS_BE_STUB_1_TX "STUB_1_TX"
+#define LPASS_BE_SLIMBUS_3_RX "SLIMBUS_3_RX"
+#define LPASS_BE_SLIMBUS_4_RX "SLIMBUS_4_RX"
+#define LPASS_BE_SLIMBUS_4_TX "SLIMBUS_4_TX"
 
 /* For multimedia front-ends, asm session is allocated dynamically.
  * Hence, asm session/multimedia front-end mapping has to be maintained.
@@ -53,6 +58,7 @@
 	MSM_FRONTEND_DAI_AFE_RX,
 	MSM_FRONTEND_DAI_AFE_TX,
 	MSM_FRONTEND_DAI_VOICE_STUB,
+	MSM_FRONTEND_DAI_VOLTE,
 	MSM_FRONTEND_DAI_MAX,
 };
 
@@ -77,10 +83,16 @@
 	MSM_BACKEND_DAI_INCALL_RECORD_RX,
 	MSM_BACKEND_DAI_INCALL_RECORD_TX,
 	MSM_BACKEND_DAI_MI2S_RX,
+	MSM_BACKEND_DAI_MI2S_TX,
 	MSM_BACKEND_DAI_SEC_I2S_RX,
 	MSM_BACKEND_DAI_SLIMBUS_1_RX,
 	MSM_BACKEND_DAI_SLIMBUS_1_TX,
-	MSM_BACKEND_DAI_INVALID,
+	MSM_BACKEND_DAI_SLIMBUS_4_RX,
+	MSM_BACKEND_DAI_SLIMBUS_4_TX,
+	MSM_BACKEND_DAI_SLIMBUS_3_RX,
+	MSM_BACKEND_DAI_EXTPROC_RX,
+	MSM_BACKEND_DAI_EXTPROC_TX,
+	MSM_BACKEND_DAI_EXTPROC_EC_TX,
 	MSM_BACKEND_DAI_MAX,
 };
 
@@ -90,6 +102,9 @@
  */
 void msm_pcm_routing_reg_phy_stream(int fedai_id, int dspst_id,
 	int stream_type);
+void msm_pcm_routing_reg_psthr_stream(int fedai_id, int dspst_id,
+		int stream_type);
+
 void msm_pcm_routing_dereg_phy_stream(int fedai_id, int stream_type);
 
 int lpa_set_volume(unsigned volume);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
new file mode 100644
index 0000000..206e881
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
@@ -0,0 +1,506 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <asm/dma.h>
+
+#include "msm-pcm-voice-v2.h"
+#include "q6voice.h"
+
+static struct msm_voice voice_info[VOICE_SESSION_INDEX_MAX];
+
+static struct snd_pcm_hardware msm_pcm_hardware = {
+
+	.info =                 SNDRV_PCM_INFO_INTERLEAVED,
+	.formats =              SNDRV_PCM_FMTBIT_S16_LE,
+	.rates =                SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+	.rate_min =             8000,
+	.rate_max =             16000,
+	.channels_min =         1,
+	.channels_max =         1,
+
+	.buffer_bytes_max =     4096 * 2,
+	.period_bytes_min =     4096,
+	.period_bytes_max =     4096,
+	.periods_min =          2,
+	.periods_max =          2,
+
+	.fifo_size =            0,
+};
+static int is_volte(struct msm_voice *pvolte)
+{
+	if (pvolte == &voice_info[VOLTE_SESSION_INDEX])
+		return true;
+	else
+		return false;
+}
+
+static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_voice *prtd = runtime->private_data;
+
+	pr_debug("%s\n", __func__);
+
+	if (!prtd->playback_start)
+		prtd->playback_start = 1;
+
+	return 0;
+}
+
+static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_voice *prtd = runtime->private_data;
+
+	pr_debug("%s\n", __func__);
+
+	if (!prtd->capture_start)
+		prtd->capture_start = 1;
+
+	return 0;
+}
+static int msm_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_voice *voice;
+
+	if (!strncmp("VoLTE", substream->pcm->id, 5)) {
+		voice = &voice_info[VOLTE_SESSION_INDEX];
+		pr_debug("%s: Open VoLTE Substream Id=%s\n",
+				__func__, substream->pcm->id);
+	} else {
+		voice = &voice_info[VOICE_SESSION_INDEX];
+		pr_debug("%s: Open VOICE Substream Id=%s\n",
+				__func__, substream->pcm->id);
+	}
+	mutex_lock(&voice->lock);
+
+	runtime->hw = msm_pcm_hardware;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		voice->playback_substream = substream;
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		voice->capture_substream = substream;
+
+	voice->instance++;
+	pr_debug("%s: Instance = %d, Stream ID = %s\n",
+			__func__ , voice->instance, substream->pcm->id);
+	runtime->private_data = voice;
+
+	mutex_unlock(&voice->lock);
+
+	return 0;
+}
+static int msm_pcm_playback_close(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_voice *prtd = runtime->private_data;
+
+	pr_debug("%s\n", __func__);
+
+	if (prtd->playback_start)
+		prtd->playback_start = 0;
+
+	prtd->playback_substream = NULL;
+
+	return 0;
+}
+static int msm_pcm_capture_close(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_voice *prtd = runtime->private_data;
+
+	pr_debug("%s\n", __func__);
+
+	if (prtd->capture_start)
+		prtd->capture_start = 0;
+	prtd->capture_substream = NULL;
+
+	return 0;
+}
+static int msm_pcm_close(struct snd_pcm_substream *substream)
+{
+
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_voice *prtd = runtime->private_data;
+	uint16_t session_id = 0;
+	int ret = 0;
+
+	mutex_lock(&prtd->lock);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_close(substream);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_close(substream);
+
+	prtd->instance--;
+	if (!prtd->playback_start && !prtd->capture_start) {
+		pr_debug("end voice call\n");
+		if (is_volte(prtd))
+			session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+		else
+			session_id = voc_get_session_id(VOICE_SESSION_NAME);
+		voc_end_voice_call(session_id);
+	}
+	mutex_unlock(&prtd->lock);
+
+	return ret;
+}
+static int msm_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct msm_voice *prtd = runtime->private_data;
+	uint16_t session_id = 0;
+
+	mutex_lock(&prtd->lock);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_prepare(substream);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_prepare(substream);
+
+	if (prtd->playback_start && prtd->capture_start) {
+		if (is_volte(prtd))
+			session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+		else
+			session_id = voc_get_session_id(VOICE_SESSION_NAME);
+		voc_start_voice_call(session_id);
+	}
+	mutex_unlock(&prtd->lock);
+
+	return ret;
+}
+
+static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+
+	pr_debug("%s: Voice\n", __func__);
+
+	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+	return 0;
+}
+
+static int msm_voice_volume_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = 0;
+	return 0;
+}
+
+static int msm_voice_volume_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int volume = ucontrol->value.integer.value[0];
+	pr_debug("%s: volume: %d\n", __func__, volume);
+	voc_set_rx_vol_index(voc_get_session_id(VOICE_SESSION_NAME),
+						RX_PATH, volume);
+	return 0;
+}
+
+static int msm_volte_volume_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = 0;
+	return 0;
+}
+
+static int msm_volte_volume_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int volume = ucontrol->value.integer.value[0];
+	pr_debug("%s: volume: %d\n", __func__, volume);
+	voc_set_rx_vol_index(voc_get_session_id(VOLTE_SESSION_NAME),
+						RX_PATH, volume);
+	return 0;
+}
+
+static int msm_voice_mute_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = 0;
+	return 0;
+}
+
+static int msm_voice_mute_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int mute = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: mute=%d\n", __func__, mute);
+
+	voc_set_tx_mute(voc_get_session_id(VOICE_SESSION_NAME), TX_PATH, mute);
+
+	return 0;
+}
+
+static int msm_volte_mute_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = 0;
+	return 0;
+}
+
+static int msm_volte_mute_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int mute = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: mute=%d\n", __func__, mute);
+
+	voc_set_tx_mute(voc_get_session_id(VOLTE_SESSION_NAME), TX_PATH, mute);
+
+	return 0;
+}
+
+static int msm_voice_rx_device_mute_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] =
+		voc_get_rx_device_mute(voc_get_session_id(VOICE_SESSION_NAME));
+	return 0;
+}
+
+static int msm_voice_rx_device_mute_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int mute = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: mute=%d\n", __func__, mute);
+
+	voc_set_rx_device_mute(voc_get_session_id(VOICE_SESSION_NAME), mute);
+
+	return 0;
+}
+
+static int msm_volte_rx_device_mute_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] =
+		voc_get_rx_device_mute(voc_get_session_id(VOLTE_SESSION_NAME));
+	return 0;
+}
+
+static int msm_volte_rx_device_mute_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	int mute = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: mute=%d\n", __func__, mute);
+
+	voc_set_rx_device_mute(voc_get_session_id(VOLTE_SESSION_NAME), mute);
+
+	return 0;
+}
+
+static const char const *tty_mode[] = {"OFF", "HCO", "VCO", "FULL"};
+static const struct soc_enum msm_tty_mode_enum[] = {
+		SOC_ENUM_SINGLE_EXT(4, tty_mode),
+};
+
+static int msm_voice_tty_mode_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] =
+		voc_get_tty_mode(voc_get_session_id(VOICE_SESSION_NAME));
+	return 0;
+}
+
+static int msm_voice_tty_mode_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int tty_mode = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: tty_mode=%d\n", __func__, tty_mode);
+
+	voc_set_tty_mode(voc_get_session_id(VOICE_SESSION_NAME), tty_mode);
+
+	return 0;
+}
+static int msm_voice_widevoice_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int wv_enable = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: wv enable=%d\n", __func__, wv_enable);
+
+	voc_set_widevoice_enable(voc_get_session_id(VOICE_SESSION_NAME),
+				 wv_enable);
+
+	return 0;
+}
+
+static int msm_voice_widevoice_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] =
+	       voc_get_widevoice_enable(voc_get_session_id(VOICE_SESSION_NAME));
+	return 0;
+}
+
+
+static int msm_voice_slowtalk_put(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	int st_enable = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: st enable=%d\n", __func__, st_enable);
+
+	voc_set_pp_enable(voc_get_session_id(VOICE_SESSION_NAME),
+			MODULE_ID_VOICE_MODULE_ST, st_enable);
+
+	return 0;
+}
+
+static int msm_voice_slowtalk_get(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] =
+		voc_get_pp_enable(voc_get_session_id(VOICE_SESSION_NAME),
+				MODULE_ID_VOICE_MODULE_ST);
+	return 0;
+}
+
+static int msm_voice_fens_put(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	int fens_enable = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: fens enable=%d\n", __func__, fens_enable);
+
+	voc_set_pp_enable(voc_get_session_id(VOICE_SESSION_NAME),
+			MODULE_ID_VOICE_MODULE_FENS, fens_enable);
+
+	return 0;
+}
+
+static int msm_voice_fens_get(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] =
+		voc_get_pp_enable(voc_get_session_id(VOICE_SESSION_NAME),
+				MODULE_ID_VOICE_MODULE_FENS);
+	return 0;
+}
+
+static struct snd_kcontrol_new msm_voice_controls[] = {
+	SOC_SINGLE_EXT("Voice Rx Device Mute", SND_SOC_NOPM, 0, 1, 0,
+				msm_voice_rx_device_mute_get,
+				msm_voice_rx_device_mute_put),
+	SOC_SINGLE_EXT("Voice Tx Mute", SND_SOC_NOPM, 0, 1, 0,
+				msm_voice_mute_get, msm_voice_mute_put),
+	SOC_SINGLE_EXT("Voice Rx Volume", SND_SOC_NOPM, 0, 5, 0,
+				msm_voice_volume_get, msm_voice_volume_put),
+	SOC_ENUM_EXT("TTY Mode", msm_tty_mode_enum[0], msm_voice_tty_mode_get,
+				msm_voice_tty_mode_put),
+	SOC_SINGLE_EXT("Widevoice Enable", SND_SOC_NOPM, 0, 1, 0,
+			msm_voice_widevoice_get, msm_voice_widevoice_put),
+	SOC_SINGLE_EXT("Slowtalk Enable", SND_SOC_NOPM, 0, 1, 0,
+				msm_voice_slowtalk_get, msm_voice_slowtalk_put),
+	SOC_SINGLE_EXT("FENS Enable", SND_SOC_NOPM, 0, 1, 0,
+				msm_voice_fens_get, msm_voice_fens_put),
+	SOC_SINGLE_EXT("VoLTE Rx Device Mute", SND_SOC_NOPM, 0, 1, 0,
+			msm_volte_rx_device_mute_get,
+			msm_volte_rx_device_mute_put),
+	SOC_SINGLE_EXT("VoLTE Tx Mute", SND_SOC_NOPM, 0, 1, 0,
+				msm_volte_mute_get, msm_volte_mute_put),
+	SOC_SINGLE_EXT("VoLTE Rx Volume", SND_SOC_NOPM, 0, 5, 0,
+				msm_volte_volume_get, msm_volte_volume_put),
+};
+
+static struct snd_pcm_ops msm_pcm_ops = {
+	.open           = msm_pcm_open,
+	.hw_params	= msm_pcm_hw_params,
+	.close          = msm_pcm_close,
+	.prepare        = msm_pcm_prepare,
+};
+
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	int ret = 0;
+
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	return ret;
+}
+
+static int msm_pcm_voice_probe(struct snd_soc_platform *platform)
+{
+	snd_soc_add_platform_controls(platform, msm_voice_controls,
+					ARRAY_SIZE(msm_voice_controls));
+
+	return 0;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.ops		= &msm_pcm_ops,
+	.pcm_new	= msm_asoc_pcm_new,
+	.probe		= msm_pcm_voice_probe,
+};
+
+static __devinit int msm_pcm_probe(struct platform_device *pdev)
+{
+	pr_debug("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
+	return snd_soc_register_platform(&pdev->dev,
+				   &msm_soc_platform);
+}
+
+static int msm_pcm_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static struct platform_driver msm_pcm_driver = {
+	.driver = {
+		.name = "msm-pcm-voice",
+		.owner = THIS_MODULE,
+	},
+	.probe = msm_pcm_probe,
+	.remove = __devexit_p(msm_pcm_remove),
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	memset(&voice_info, 0, sizeof(voice_info));
+	mutex_init(&voice_info[VOICE_SESSION_INDEX].lock);
+	mutex_init(&voice_info[VOLTE_SESSION_INDEX].lock);
+
+	return platform_driver_register(&msm_pcm_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	platform_driver_unregister(&msm_pcm_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("Voice PCM module platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h
new file mode 100644
index 0000000..64c0848
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MSM_PCM_VOICE_H
+#define _MSM_PCM_VOICE_H
+#include <sound/apr_audio.h>
+
+enum {
+	VOICE_SESSION_INDEX,
+	VOLTE_SESSION_INDEX,
+	VOICE_SESSION_INDEX_MAX,
+};
+
+struct msm_voice {
+	struct snd_pcm_substream *playback_substream;
+	struct snd_pcm_substream *capture_substream;
+
+	int instance;
+
+	struct mutex lock;
+
+	uint32_t samp_rate;
+	uint32_t channel_mode;
+
+	int playback_start;
+	int capture_start;
+};
+
+#endif /*_MSM_PCM_VOICE_H*/
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
new file mode 100644
index 0000000..630405a
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
@@ -0,0 +1,1180 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <asm/dma.h>
+
+#include "msm-pcm-q6-v2.h"
+#include "msm-pcm-routing-v2.h"
+#include "q6voice.h"
+
+#define VOIP_MAX_Q_LEN 10
+#define VOIP_MAX_VOC_PKT_SIZE 640
+#define VOIP_MIN_VOC_PKT_SIZE 320
+
+/* Length of the DSP frame info header added to the voc packet. */
+#define DSP_FRAME_HDR_LEN 1
+
+#define MODE_IS127		0x2
+#define MODE_4GV_NB		0x3
+#define MODE_4GV_WB		0x4
+#define MODE_AMR		0x5
+#define MODE_AMR_WB		0xD
+#define MODE_PCM		0xC
+
+enum format {
+	FORMAT_S16_LE = 2,
+	FORMAT_SPECIAL = 31,
+};
+
+
+enum amr_rate_type {
+	AMR_RATE_4750, /* AMR 4.75 kbps */
+	AMR_RATE_5150, /* AMR 5.15 kbps */
+	AMR_RATE_5900, /* AMR 5.90 kbps */
+	AMR_RATE_6700, /* AMR 6.70 kbps */
+	AMR_RATE_7400, /* AMR 7.40 kbps */
+	AMR_RATE_7950, /* AMR 7.95 kbps */
+	AMR_RATE_10200, /* AMR 10.20 kbps */
+	AMR_RATE_12200, /* AMR 12.20 kbps */
+	AMR_RATE_6600, /* AMR-WB 6.60 kbps */
+	AMR_RATE_8850, /* AMR-WB 8.85 kbps */
+	AMR_RATE_12650, /* AMR-WB 12.65 kbps */
+	AMR_RATE_14250, /* AMR-WB 14.25 kbps */
+	AMR_RATE_15850, /* AMR-WB 15.85 kbps */
+	AMR_RATE_18250, /* AMR-WB 18.25 kbps */
+	AMR_RATE_19850, /* AMR-WB 19.85 kbps */
+	AMR_RATE_23050, /* AMR-WB 23.05 kbps */
+	AMR_RATE_23850, /* AMR-WB 23.85 kbps */
+	AMR_RATE_UNDEF
+};
+
+enum voip_state {
+	VOIP_STOPPED,
+	VOIP_STARTED,
+};
+
+struct voip_frame {
+	union {
+	uint32_t frame_type;
+	uint32_t packet_rate;
+	} header;
+	uint32_t len;
+	uint8_t voc_pkt[VOIP_MAX_VOC_PKT_SIZE];
+};
+
+struct voip_buf_node {
+	struct list_head list;
+	struct voip_frame frame;
+};
+
+struct voip_drv_info {
+	enum  voip_state state;
+
+	struct snd_pcm_substream *playback_substream;
+	struct snd_pcm_substream *capture_substream;
+
+	struct list_head in_queue;
+	struct list_head free_in_queue;
+
+	struct list_head out_queue;
+	struct list_head free_out_queue;
+
+	wait_queue_head_t out_wait;
+	wait_queue_head_t in_wait;
+
+	struct mutex lock;
+	struct mutex in_lock;
+	struct mutex out_lock;
+
+	spinlock_t dsp_lock;
+
+	uint32_t mode;
+	uint32_t rate_type;
+	uint32_t rate;
+	uint32_t dtx_mode;
+
+	uint8_t capture_start;
+	uint8_t playback_start;
+
+	uint8_t playback_instance;
+	uint8_t capture_instance;
+
+	unsigned int play_samp_rate;
+	unsigned int cap_samp_rate;
+
+	unsigned int pcm_size;
+	unsigned int pcm_count;
+	unsigned int pcm_playback_irq_pos;      /* IRQ position */
+	unsigned int pcm_playback_buf_pos;      /* position in buffer */
+
+	unsigned int pcm_capture_size;
+	unsigned int pcm_capture_count;
+	unsigned int pcm_capture_irq_pos;       /* IRQ position */
+	unsigned int pcm_capture_buf_pos;       /* position in buffer */
+};
+
+static int voip_get_media_type(uint32_t mode,
+				unsigned int samp_rate);
+static int voip_get_rate_type(uint32_t mode,
+				uint32_t rate,
+				uint32_t *rate_type);
+static int msm_voip_mode_rate_config_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol);
+static int msm_voip_mode_rate_config_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol);
+
+static struct voip_drv_info voip_info;
+
+static struct snd_pcm_hardware msm_pcm_hardware = {
+	.info =                 (SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_INTERLEAVED),
+	.formats =              SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_SPECIAL,
+	.rates =                SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+	.rate_min =             8000,
+	.rate_max =             16000,
+	.channels_min =         1,
+	.channels_max =         1,
+	.buffer_bytes_max =	sizeof(struct voip_buf_node) * VOIP_MAX_Q_LEN,
+	.period_bytes_min =	VOIP_MIN_VOC_PKT_SIZE,
+	.period_bytes_max =	VOIP_MAX_VOC_PKT_SIZE,
+	.periods_min =		VOIP_MAX_Q_LEN,
+	.periods_max =		VOIP_MAX_Q_LEN,
+	.fifo_size =            0,
+};
+
+
+static int msm_voip_mute_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int mute = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: mute=%d\n", __func__, mute);
+
+	voc_set_tx_mute(voc_get_session_id(VOIP_SESSION_NAME), TX_PATH, mute);
+
+	return 0;
+}
+
+static int msm_voip_mute_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = 0;
+	return 0;
+}
+
+static int msm_voip_volume_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int volume = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: volume: %d\n", __func__, volume);
+
+	voc_set_rx_vol_index(voc_get_session_id(VOIP_SESSION_NAME),
+			     RX_PATH,
+			     volume);
+	return 0;
+}
+static int msm_voip_volume_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = 0;
+	return 0;
+}
+
+static int msm_voip_dtx_mode_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	mutex_lock(&voip_info.lock);
+
+	voip_info.dtx_mode  = ucontrol->value.integer.value[0];
+
+	pr_debug("%s: dtx: %d\n", __func__, voip_info.dtx_mode);
+
+	mutex_unlock(&voip_info.lock);
+
+	return 0;
+}
+static int msm_voip_dtx_mode_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	mutex_lock(&voip_info.lock);
+
+	ucontrol->value.integer.value[0] = voip_info.dtx_mode;
+
+	mutex_unlock(&voip_info.lock);
+
+	return 0;
+}
+
+static struct snd_kcontrol_new msm_voip_controls[] = {
+	SOC_SINGLE_EXT("Voip Tx Mute", SND_SOC_NOPM, 0, 1, 0,
+				msm_voip_mute_get, msm_voip_mute_put),
+	SOC_SINGLE_EXT("Voip Rx Volume", SND_SOC_NOPM, 0, 5, 0,
+				msm_voip_volume_get, msm_voip_volume_put),
+	SOC_SINGLE_MULTI_EXT("Voip Mode Rate Config", SND_SOC_NOPM, 0, 23850,
+				0, 2, msm_voip_mode_rate_config_get,
+				msm_voip_mode_rate_config_put),
+	SOC_SINGLE_EXT("Voip Dtx Mode", SND_SOC_NOPM, 0, 1, 0,
+				msm_voip_dtx_mode_get, msm_voip_dtx_mode_put),
+};
+
+static int msm_pcm_voip_probe(struct snd_soc_platform *platform)
+{
+	snd_soc_add_platform_controls(platform, msm_voip_controls,
+					ARRAY_SIZE(msm_voip_controls));
+
+	return 0;
+}
+
+/* sample rate supported */
+static unsigned int supported_sample_rates[] = {8000, 16000};
+
+/* capture path */
+static void voip_process_ul_pkt(uint8_t *voc_pkt,
+					uint32_t pkt_len,
+					void *private_data)
+{
+	struct voip_buf_node *buf_node = NULL;
+	struct voip_drv_info *prtd = private_data;
+	unsigned long dsp_flags;
+
+	if (prtd->capture_substream == NULL)
+		return;
+
+	/* Copy up-link packet into out_queue. */
+	spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+
+	/* discarding UL packets till start is received */
+	if (!list_empty(&prtd->free_out_queue) && prtd->capture_start) {
+		buf_node = list_first_entry(&prtd->free_out_queue,
+					struct voip_buf_node, list);
+		list_del(&buf_node->list);
+		switch (prtd->mode) {
+		case MODE_AMR_WB:
+		case MODE_AMR: {
+			/* Remove the DSP frame info header. Header format:
+			 * Bits 0-3: Frame rate
+			 * Bits 4-7: Frame type
+			 */
+			buf_node->frame.header.frame_type =
+						((*voc_pkt) & 0xF0) >> 4;
+			voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+			buf_node->frame.len = pkt_len - DSP_FRAME_HDR_LEN;
+			memcpy(&buf_node->frame.voc_pkt[0],
+				voc_pkt,
+				buf_node->frame.len);
+			list_add_tail(&buf_node->list, &prtd->out_queue);
+			break;
+		}
+		case MODE_IS127:
+		case MODE_4GV_NB:
+		case MODE_4GV_WB: {
+			/* Remove the DSP frame info header.
+			 * Header format:
+			 * Bits 0-3: frame rate
+			 */
+			buf_node->frame.header.packet_rate = (*voc_pkt) & 0x0F;
+			voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+			buf_node->frame.len = pkt_len - DSP_FRAME_HDR_LEN;
+
+			memcpy(&buf_node->frame.voc_pkt[0],
+				voc_pkt,
+				buf_node->frame.len);
+
+			list_add_tail(&buf_node->list, &prtd->out_queue);
+			break;
+		}
+		default: {
+			buf_node->frame.len = pkt_len;
+			memcpy(&buf_node->frame.voc_pkt[0],
+				voc_pkt,
+				buf_node->frame.len);
+			list_add_tail(&buf_node->list, &prtd->out_queue);
+		}
+		}
+		pr_debug("ul_pkt: pkt_len =%d, frame.len=%d\n", pkt_len,
+			buf_node->frame.len);
+		prtd->pcm_capture_irq_pos += prtd->pcm_capture_count;
+		spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+		snd_pcm_period_elapsed(prtd->capture_substream);
+	} else {
+		spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+		pr_err("UL data dropped\n");
+	}
+
+	wake_up(&prtd->out_wait);
+}
+
+/* playback path */
+static void voip_process_dl_pkt(uint8_t *voc_pkt,
+					uint32_t *pkt_len,
+					void *private_data)
+{
+	struct voip_buf_node *buf_node = NULL;
+	struct voip_drv_info *prtd = private_data;
+	unsigned long dsp_flags;
+
+
+	if (prtd->playback_substream == NULL)
+		return;
+
+	spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+
+	if (!list_empty(&prtd->in_queue) && prtd->playback_start) {
+		buf_node = list_first_entry(&prtd->in_queue,
+				struct voip_buf_node, list);
+		list_del(&buf_node->list);
+		switch (prtd->mode) {
+		case MODE_AMR:
+		case MODE_AMR_WB: {
+			/* Add the DSP frame info header. Header format:
+			 * Bits 0-3: Frame rate
+			 * Bits 4-7: Frame type
+			 */
+			*voc_pkt = ((buf_node->frame.header.frame_type &
+					0x0F) << 4) | (prtd->rate_type & 0x0F);
+			voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+			*pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN;
+			memcpy(voc_pkt,
+				&buf_node->frame.voc_pkt[0],
+				buf_node->frame.len);
+			list_add_tail(&buf_node->list, &prtd->free_in_queue);
+			break;
+		}
+		case MODE_IS127:
+		case MODE_4GV_NB:
+		case MODE_4GV_WB: {
+			/* Add the DSP frame info header. Header format:
+			 * Bits 0-3 : Frame rate
+			*/
+			*voc_pkt = buf_node->frame.header.packet_rate & 0x0F;
+			voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+			*pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN;
+
+			memcpy(voc_pkt,
+				&buf_node->frame.voc_pkt[0],
+				buf_node->frame.len);
+
+			list_add_tail(&buf_node->list, &prtd->free_in_queue);
+			break;
+		}
+		default: {
+			*pkt_len = buf_node->frame.len;
+
+			memcpy(voc_pkt,
+				&buf_node->frame.voc_pkt[0],
+				buf_node->frame.len);
+
+			list_add_tail(&buf_node->list, &prtd->free_in_queue);
+		}
+		}
+		pr_debug("dl_pkt: pkt_len=%d, frame_len=%d\n", *pkt_len,
+			buf_node->frame.len);
+		prtd->pcm_playback_irq_pos += prtd->pcm_count;
+		spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+		snd_pcm_period_elapsed(prtd->playback_substream);
+	} else {
+		*pkt_len = 0;
+		spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+		pr_err("DL data not available\n");
+	}
+	wake_up(&prtd->in_wait);
+}
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+	.count = ARRAY_SIZE(supported_sample_rates),
+	.list = supported_sample_rates,
+	.mask = 0,
+};
+
+static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = runtime->private_data;
+
+	prtd->play_samp_rate = runtime->rate;
+	prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
+	prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+	prtd->pcm_playback_irq_pos = 0;
+	prtd->pcm_playback_buf_pos = 0;
+
+	return 0;
+}
+
+static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = runtime->private_data;
+	int ret = 0;
+
+	prtd->cap_samp_rate = runtime->rate;
+	prtd->pcm_capture_size  = snd_pcm_lib_buffer_bytes(substream);
+	prtd->pcm_capture_count = snd_pcm_lib_period_bytes(substream);
+	prtd->pcm_capture_irq_pos = 0;
+	prtd->pcm_capture_buf_pos = 0;
+	return ret;
+}
+
+static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = runtime->private_data;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		pr_debug("%s: Trigger start\n", __func__);
+		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+			prtd->capture_start = 1;
+		else
+			prtd->playback_start = 1;
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+		pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+			prtd->playback_start = 0;
+		else
+			prtd->capture_start = 0;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int msm_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = &voip_info;
+	int ret = 0;
+
+	pr_debug("%s, VoIP\n", __func__);
+	mutex_lock(&prtd->lock);
+
+	runtime->hw = msm_pcm_hardware;
+
+	ret = snd_pcm_hw_constraint_list(runtime, 0,
+					SNDRV_PCM_HW_PARAM_RATE,
+					&constraints_sample_rates);
+	if (ret < 0)
+		pr_debug("snd_pcm_hw_constraint_list failed\n");
+
+	ret = snd_pcm_hw_constraint_integer(runtime,
+					SNDRV_PCM_HW_PARAM_PERIODS);
+	if (ret < 0) {
+		pr_debug("snd_pcm_hw_constraint_integer failed\n");
+		goto err;
+	}
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		prtd->playback_substream = substream;
+		prtd->playback_instance++;
+	} else {
+		prtd->capture_substream = substream;
+		prtd->capture_instance++;
+	}
+	runtime->private_data = prtd;
+err:
+	mutex_unlock(&prtd->lock);
+
+	return ret;
+}
+
+static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
+	snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+	struct voip_buf_node *buf_node = NULL;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = runtime->private_data;
+
+	int count = frames_to_bytes(runtime, frames);
+	pr_debug("%s: count = %d, frames=%d\n", __func__, count, (int)frames);
+
+	ret = wait_event_interruptible_timeout(prtd->in_wait,
+				(!list_empty(&prtd->free_in_queue) ||
+				prtd->state == VOIP_STOPPED),
+				1 * HZ);
+	if (ret > 0) {
+		mutex_lock(&prtd->in_lock);
+		if (count <= VOIP_MAX_VOC_PKT_SIZE) {
+			buf_node =
+				list_first_entry(&prtd->free_in_queue,
+						struct voip_buf_node, list);
+			list_del(&buf_node->list);
+			if (prtd->mode == MODE_PCM) {
+				ret = copy_from_user(&buf_node->frame.voc_pkt,
+							buf, count);
+				buf_node->frame.len = count;
+			} else
+				ret = copy_from_user(&buf_node->frame,
+							buf, count);
+			list_add_tail(&buf_node->list, &prtd->in_queue);
+		} else {
+			pr_err("%s: Write cnt %d is > VOIP_MAX_VOC_PKT_SIZE\n",
+				__func__, count);
+			ret = -ENOMEM;
+		}
+
+		mutex_unlock(&prtd->in_lock);
+	} else if (ret == 0) {
+		pr_err("%s: No free DL buffs\n", __func__);
+		ret = -ETIMEDOUT;
+	} else {
+		pr_err("%s: playback copy  was interrupted\n", __func__);
+	}
+
+	return  ret;
+}
+static int msm_pcm_capture_copy(struct snd_pcm_substream *substream,
+		int channel, snd_pcm_uframes_t hwoff, void __user *buf,
+						snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+	int count = 0;
+	struct voip_buf_node *buf_node = NULL;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = runtime->private_data;
+
+	count = frames_to_bytes(runtime, frames);
+
+	pr_debug("%s: count = %d\n", __func__, count);
+
+	ret = wait_event_interruptible_timeout(prtd->out_wait,
+				(!list_empty(&prtd->out_queue) ||
+				prtd->state == VOIP_STOPPED),
+				1 * HZ);
+
+	if (ret > 0) {
+		mutex_lock(&prtd->out_lock);
+
+		if (count <= VOIP_MAX_VOC_PKT_SIZE) {
+			buf_node = list_first_entry(&prtd->out_queue,
+					struct voip_buf_node, list);
+			list_del(&buf_node->list);
+			if (prtd->mode == MODE_PCM)
+				ret = copy_to_user(buf,
+						   &buf_node->frame.voc_pkt,
+						   count);
+			else
+				ret = copy_to_user(buf,
+						   &buf_node->frame,
+						   count);
+			if (ret) {
+				pr_err("%s: Copy to user retuned %d\n",
+					__func__, ret);
+				ret = -EFAULT;
+			}
+			list_add_tail(&buf_node->list,
+						&prtd->free_out_queue);
+		} else {
+			pr_err("%s: Read count %d > VOIP_MAX_VOC_PKT_SIZE\n",
+				__func__, count);
+			ret = -ENOMEM;
+		}
+
+		mutex_unlock(&prtd->out_lock);
+
+	} else if (ret == 0) {
+		pr_err("%s: No UL data available\n", __func__);
+		ret = -ETIMEDOUT;
+	} else {
+		pr_err("%s: Read was interrupted\n", __func__);
+		ret = -ERESTARTSYS;
+	}
+	return ret;
+}
+static int msm_pcm_copy(struct snd_pcm_substream *substream, int a,
+	 snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+	int ret = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_copy(substream, a, hwoff, buf, frames);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_copy(substream, a, hwoff, buf, frames);
+
+	return ret;
+}
+
+static int msm_pcm_close(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct list_head *ptr = NULL;
+	struct list_head *next = NULL;
+	struct voip_buf_node *buf_node = NULL;
+	struct snd_dma_buffer *p_dma_buf, *c_dma_buf;
+	struct snd_pcm_substream *p_substream, *c_substream;
+	struct snd_pcm_runtime *runtime;
+	struct voip_drv_info *prtd;
+
+	if (substream == NULL) {
+		pr_err("substream is NULL\n");
+		return -EINVAL;
+	}
+	runtime = substream->runtime;
+	prtd = runtime->private_data;
+
+	wake_up(&prtd->out_wait);
+
+	mutex_lock(&prtd->lock);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		prtd->playback_instance--;
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		prtd->capture_instance--;
+
+	if (!prtd->playback_instance && !prtd->capture_instance) {
+		if (prtd->state == VOIP_STARTED) {
+			prtd->state = VOIP_STOPPED;
+			voc_end_voice_call(
+					voc_get_session_id(VOIP_SESSION_NAME));
+			voc_register_mvs_cb(NULL, NULL, prtd);
+		}
+		/* release all buffer */
+		/* release in_queue and free_in_queue */
+		pr_debug("release all buffer\n");
+		p_substream = prtd->playback_substream;
+		if (p_substream == NULL) {
+			pr_debug("p_substream is NULL\n");
+			goto capt;
+		}
+		p_dma_buf = &p_substream->dma_buffer;
+		if (p_dma_buf == NULL) {
+			pr_debug("p_dma_buf is NULL\n");
+			goto capt;
+		}
+		if (p_dma_buf->area != NULL) {
+			mutex_lock(&prtd->in_lock);
+			list_for_each_safe(ptr, next, &prtd->in_queue) {
+				buf_node = list_entry(ptr,
+						struct voip_buf_node, list);
+				list_del(&buf_node->list);
+			}
+			list_for_each_safe(ptr, next, &prtd->free_in_queue) {
+				buf_node = list_entry(ptr,
+						struct voip_buf_node, list);
+				list_del(&buf_node->list);
+			}
+			dma_free_coherent(p_substream->pcm->card->dev,
+				runtime->hw.buffer_bytes_max, p_dma_buf->area,
+				p_dma_buf->addr);
+			p_dma_buf->area = NULL;
+			mutex_unlock(&prtd->in_lock);
+		}
+		/* release out_queue and free_out_queue */
+capt:		c_substream = prtd->capture_substream;
+		if (c_substream == NULL) {
+			pr_debug("c_substream is NULL\n");
+			goto done;
+		}
+		c_dma_buf = &c_substream->dma_buffer;
+		if (c_substream == NULL) {
+			pr_debug("c_dma_buf is NULL.\n");
+			goto done;
+		}
+		if (c_dma_buf->area != NULL) {
+			mutex_lock(&prtd->out_lock);
+			list_for_each_safe(ptr, next, &prtd->out_queue) {
+				buf_node = list_entry(ptr,
+						struct voip_buf_node, list);
+				list_del(&buf_node->list);
+			}
+			list_for_each_safe(ptr, next, &prtd->free_out_queue) {
+				buf_node = list_entry(ptr,
+						struct voip_buf_node, list);
+				list_del(&buf_node->list);
+			}
+			dma_free_coherent(c_substream->pcm->card->dev,
+				runtime->hw.buffer_bytes_max, c_dma_buf->area,
+				c_dma_buf->addr);
+			c_dma_buf->area = NULL;
+			mutex_unlock(&prtd->out_lock);
+		}
+done:
+		prtd->capture_substream = NULL;
+		prtd->playback_substream = NULL;
+	}
+	mutex_unlock(&prtd->lock);
+
+	return ret;
+}
+static int msm_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = runtime->private_data;
+	uint32_t media_type = 0;
+	uint32_t rate_type = 0;
+
+	mutex_lock(&prtd->lock);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_prepare(substream);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_prepare(substream);
+
+	if ((runtime->format != FORMAT_SPECIAL) &&
+		 ((prtd->mode == MODE_AMR) || (prtd->mode == MODE_AMR_WB) ||
+		 (prtd->mode == MODE_IS127) || (prtd->mode == MODE_4GV_NB) ||
+		 (prtd->mode == MODE_4GV_WB))) {
+		pr_err("mode:%d and format:%u are not mached\n",
+			prtd->mode, (uint32_t)runtime->format);
+		ret =  -EINVAL;
+		goto done;
+	}
+
+	if ((runtime->format != FORMAT_S16_LE) &&
+		(prtd->mode == MODE_PCM)) {
+		pr_err("mode:%d and format:%u are not mached\n",
+			prtd->mode, (uint32_t)runtime->format);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (prtd->playback_instance && prtd->capture_instance
+				&& (prtd->state != VOIP_STARTED)) {
+
+		ret = voip_get_rate_type(prtd->mode,
+					prtd->rate,
+					&rate_type);
+		if (ret < 0) {
+			pr_err("fail at getting rate_type\n");
+			ret = -EINVAL;
+			goto done;
+		}
+		prtd->rate_type = rate_type;
+		media_type = voip_get_media_type(prtd->mode,
+						prtd->play_samp_rate);
+		if (media_type < 0) {
+			pr_err("fail at getting media_type\n");
+			ret = -EINVAL;
+			goto done;
+		}
+		pr_debug(" media_type=%d, rate_type=%d\n", media_type,
+			rate_type);
+		if ((prtd->play_samp_rate == 8000) &&
+					(prtd->cap_samp_rate == 8000))
+			voc_config_vocoder(media_type, rate_type,
+					VSS_NETWORK_ID_VOIP_NB,
+					voip_info.dtx_mode);
+		else if ((prtd->play_samp_rate == 16000) &&
+					(prtd->cap_samp_rate == 16000))
+			voc_config_vocoder(media_type, rate_type,
+					VSS_NETWORK_ID_VOIP_WB,
+					voip_info.dtx_mode);
+		else {
+			pr_debug("%s: Invalid rate playback %d, capture %d\n",
+				 __func__, prtd->play_samp_rate,
+				 prtd->cap_samp_rate);
+			goto done;
+		}
+		voc_register_mvs_cb(voip_process_ul_pkt,
+					voip_process_dl_pkt, prtd);
+		voc_start_voice_call(voc_get_session_id(VOIP_SESSION_NAME));
+
+		prtd->state = VOIP_STARTED;
+	}
+done:
+	mutex_unlock(&prtd->lock);
+
+	return ret;
+}
+
+static snd_pcm_uframes_t
+msm_pcm_playback_pointer(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = runtime->private_data;
+
+	pr_debug("%s\n", __func__);
+	if (prtd->pcm_playback_irq_pos >= prtd->pcm_size)
+		prtd->pcm_playback_irq_pos = 0;
+	return bytes_to_frames(runtime, (prtd->pcm_playback_irq_pos));
+}
+
+static snd_pcm_uframes_t
+msm_pcm_capture_pointer(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct voip_drv_info *prtd = runtime->private_data;
+
+	if (prtd->pcm_capture_irq_pos >= prtd->pcm_capture_size)
+		prtd->pcm_capture_irq_pos = 0;
+	return bytes_to_frames(runtime, (prtd->pcm_capture_irq_pos));
+}
+
+static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
+{
+	snd_pcm_uframes_t ret = 0;
+	 pr_debug("%s\n", __func__);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = msm_pcm_playback_pointer(substream);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ret = msm_pcm_capture_pointer(substream);
+	return ret;
+}
+
+static int msm_pcm_mmap(struct snd_pcm_substream *substream,
+			struct vm_area_struct *vma)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+
+	pr_debug("%s\n", __func__);
+	dma_mmap_coherent(substream->pcm->card->dev, vma,
+				     runtime->dma_area,
+				     runtime->dma_addr,
+				     runtime->dma_bytes);
+	return 0;
+}
+
+static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
+	struct voip_buf_node *buf_node = NULL;
+	int i = 0, offset = 0;
+
+	pr_debug("%s: voip\n", __func__);
+
+	mutex_lock(&voip_info.lock);
+
+	dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
+	dma_buf->dev.dev = substream->pcm->card->dev;
+	dma_buf->private_data = NULL;
+
+	dma_buf->area = dma_alloc_coherent(substream->pcm->card->dev,
+			runtime->hw.buffer_bytes_max,
+			&dma_buf->addr, GFP_KERNEL);
+	if (!dma_buf->area) {
+		pr_err("%s:MSM VOIP dma_alloc failed\n", __func__);
+		mutex_unlock(&voip_info.lock);
+		return -ENOMEM;
+	}
+
+	dma_buf->bytes = runtime->hw.buffer_bytes_max;
+	memset(dma_buf->area, 0, runtime->hw.buffer_bytes_max);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		for (i = 0; i < VOIP_MAX_Q_LEN; i++) {
+			buf_node = (void *)dma_buf->area + offset;
+
+			mutex_lock(&voip_info.in_lock);
+			list_add_tail(&buf_node->list,
+					&voip_info.free_in_queue);
+			mutex_unlock(&voip_info.in_lock);
+			offset = offset + sizeof(struct voip_buf_node);
+		}
+	} else {
+		for (i = 0; i < VOIP_MAX_Q_LEN; i++) {
+			buf_node = (void *) dma_buf->area + offset;
+			mutex_lock(&voip_info.out_lock);
+			list_add_tail(&buf_node->list,
+					&voip_info.free_out_queue);
+			mutex_unlock(&voip_info.out_lock);
+			offset = offset + sizeof(struct voip_buf_node);
+		}
+	}
+
+	mutex_unlock(&voip_info.lock);
+
+	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+	return 0;
+}
+
+static int msm_voip_mode_rate_config_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	mutex_lock(&voip_info.lock);
+
+	ucontrol->value.integer.value[0] = voip_info.mode;
+	ucontrol->value.integer.value[1] = voip_info.rate;
+
+	mutex_unlock(&voip_info.lock);
+
+	return 0;
+}
+
+static int msm_voip_mode_rate_config_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	mutex_lock(&voip_info.lock);
+
+	voip_info.mode = ucontrol->value.integer.value[0];
+	voip_info.rate = ucontrol->value.integer.value[1];
+
+	pr_debug("%s: mode=%d,rate=%d\n", __func__, voip_info.mode,
+		voip_info.rate);
+
+	mutex_unlock(&voip_info.lock);
+
+	return 0;
+}
+
+static int voip_get_rate_type(uint32_t mode, uint32_t rate,
+				 uint32_t *rate_type)
+{
+	int ret = 0;
+
+	switch (mode) {
+	case MODE_AMR: {
+		switch (rate) {
+		case 4750:
+			*rate_type = AMR_RATE_4750;
+			break;
+		case 5150:
+			*rate_type = AMR_RATE_5150;
+			break;
+		case 5900:
+			*rate_type = AMR_RATE_5900;
+			break;
+		case 6700:
+			*rate_type = AMR_RATE_6700;
+			break;
+		case 7400:
+			*rate_type = AMR_RATE_7400;
+			break;
+		case 7950:
+			*rate_type = AMR_RATE_7950;
+			break;
+		case 10200:
+			*rate_type = AMR_RATE_10200;
+			break;
+		case 12200:
+			*rate_type = AMR_RATE_12200;
+			break;
+		default:
+			pr_err("wrong rate for AMR NB.\n");
+			ret = -EINVAL;
+			break;
+		}
+		break;
+	}
+	case MODE_AMR_WB: {
+		switch (rate) {
+		case 6600:
+			*rate_type = AMR_RATE_6600 - AMR_RATE_6600;
+			break;
+		case 8850:
+			*rate_type = AMR_RATE_8850 - AMR_RATE_6600;
+			break;
+		case 12650:
+			*rate_type = AMR_RATE_12650 - AMR_RATE_6600;
+			break;
+		case 14250:
+			*rate_type = AMR_RATE_14250 - AMR_RATE_6600;
+			break;
+		case 15850:
+			*rate_type = AMR_RATE_15850 - AMR_RATE_6600;
+			break;
+		case 18250:
+			*rate_type = AMR_RATE_18250 - AMR_RATE_6600;
+			break;
+		case 19850:
+			*rate_type = AMR_RATE_19850 - AMR_RATE_6600;
+			break;
+		case 23050:
+			*rate_type = AMR_RATE_23050 - AMR_RATE_6600;
+			break;
+		case 23850:
+			*rate_type = AMR_RATE_23850 - AMR_RATE_6600;
+			break;
+		default:
+			pr_err("wrong rate for AMR_WB.\n");
+			ret = -EINVAL;
+			break;
+		}
+		break;
+	}
+	case MODE_PCM: {
+		*rate_type = 0;
+		break;
+	}
+	case MODE_IS127:
+	case MODE_4GV_NB:
+	case MODE_4GV_WB: {
+		switch (rate) {
+		case VOC_0_RATE:
+		case VOC_8_RATE:
+		case VOC_4_RATE:
+		case VOC_2_RATE:
+		case VOC_1_RATE:
+			*rate_type = rate;
+			break;
+		default:
+			pr_err("wrong rate for IS127/4GV_NB/WB.\n");
+			ret = -EINVAL;
+			break;
+		}
+		break;
+	}
+	default:
+		pr_err("wrong mode type.\n");
+		ret = -EINVAL;
+	}
+	pr_debug("%s, mode=%d, rate=%u, rate_type=%d\n",
+		__func__, mode, rate, *rate_type);
+	return ret;
+}
+
+static int voip_get_media_type(uint32_t mode,
+				unsigned int samp_rate)
+{
+	uint32_t media_type;
+
+	pr_debug("%s: mode=%d, samp_rate=%d\n", __func__,
+		mode, samp_rate);
+	switch (mode) {
+	case MODE_AMR:
+		media_type = VSS_MEDIA_ID_AMR_NB_MODEM;
+		break;
+	case MODE_AMR_WB:
+		media_type = VSS_MEDIA_ID_AMR_WB_MODEM;
+		break;
+	case MODE_PCM:
+		if (samp_rate == 8000)
+			media_type = VSS_MEDIA_ID_PCM_NB;
+		else
+			media_type = VSS_MEDIA_ID_PCM_WB;
+		break;
+	case MODE_IS127: /* EVRC-A */
+		media_type = VSS_MEDIA_ID_EVRC_MODEM;
+		break;
+	case MODE_4GV_NB: /* EVRC-B */
+		media_type = VSS_MEDIA_ID_4GV_NB_MODEM;
+		break;
+	case MODE_4GV_WB: /* EVRC-WB */
+		media_type = VSS_MEDIA_ID_4GV_WB_MODEM;
+		break;
+	default:
+		pr_debug(" input mode is not supported\n");
+		media_type = -EINVAL;
+	}
+
+	pr_debug("%s: media_type is 0x%x\n", __func__, media_type);
+
+	return media_type;
+}
+
+
+static struct snd_pcm_ops msm_pcm_ops = {
+	.open           = msm_pcm_open,
+	.copy		= msm_pcm_copy,
+	.hw_params	= msm_pcm_hw_params,
+	.close          = msm_pcm_close,
+	.prepare        = msm_pcm_prepare,
+	.trigger        = msm_pcm_trigger,
+	.pointer        = msm_pcm_pointer,
+	.mmap		= msm_pcm_mmap,
+};
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_card *card = rtd->card->snd_card;
+	int ret = 0;
+
+	pr_debug("msm_asoc_pcm_new\n");
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	return ret;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.ops		= &msm_pcm_ops,
+	.pcm_new	= msm_asoc_pcm_new,
+	.probe		= msm_pcm_voip_probe,
+};
+
+static __devinit int msm_pcm_probe(struct platform_device *pdev)
+{
+	if (pdev->dev.of_node)
+		dev_set_name(&pdev->dev, "%s", "msm-voip-dsp");
+
+	pr_debug("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
+	return snd_soc_register_platform(&pdev->dev,
+				   &msm_soc_platform);
+}
+
+static int msm_pcm_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_voip_dt_match[] = {
+	{.compatible = "qcom,msm-voip-dsp"},
+};
+MODULE_DEVICE_TABLE(of, msm_voip_dt_match);
+
+static struct platform_driver msm_pcm_driver = {
+	.driver = {
+		.name = "msm-voip-dsp",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_voip_dt_match,
+	},
+	.probe = msm_pcm_probe,
+	.remove = __devexit_p(msm_pcm_remove),
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	memset(&voip_info, 0, sizeof(voip_info));
+	voip_info.mode = MODE_PCM;
+	mutex_init(&voip_info.lock);
+	mutex_init(&voip_info.in_lock);
+	mutex_init(&voip_info.out_lock);
+
+	spin_lock_init(&voip_info.dsp_lock);
+
+	init_waitqueue_head(&voip_info.out_wait);
+	init_waitqueue_head(&voip_info.in_wait);
+
+	INIT_LIST_HEAD(&voip_info.in_queue);
+	INIT_LIST_HEAD(&voip_info.free_in_queue);
+	INIT_LIST_HEAD(&voip_info.out_queue);
+	INIT_LIST_HEAD(&voip_info.free_out_queue);
+
+	return platform_driver_register(&msm_pcm_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	platform_driver_unregister(&msm_pcm_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("PCM module platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index f982134..0bb88e8 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -84,6 +84,8 @@
 				uint32_t bufsz, uint32_t bufcnt);
 static void q6asm_reset_buf_state(struct audio_client *ac);
 
+static int q6asm_map_channels(u8 *channel_mapping, uint32_t channels);
+
 
 #ifdef CONFIG_DEBUG_FS
 #define OUT_BUFFER_SIZE 56
@@ -196,8 +198,7 @@
 		out_cold_index*/
 		if (out_cold_index != 1) {
 			do_gettimeofday(&out_cold_tv);
-			pr_debug("COLD: apr_send_pkt at %ld"
-				"sec %ld microsec\n",\
+			pr_debug("COLD: apr_send_pkt at %ld sec %ld microsec\n",
 				out_cold_tv.tv_sec,\
 				out_cold_tv.tv_usec);
 			out_cold_index = 1;
@@ -222,8 +223,7 @@
 		*/
 		if (in_cont_index == 7) {
 			do_gettimeofday(&in_cont_tv);
-			pr_err("In_CONT:previous read buffer done"
-				"at %ld sec %ld microsec\n",\
+			pr_err("In_CONT:previous read buffer done at %ld sec %ld microsec\n",
 				in_cont_tv.tv_sec, in_cont_tv.tv_usec);
 		}
 		in_cont_index++;
@@ -253,8 +253,8 @@
 		if ((strncmp(((char *)ab->data), zero_pattern, 2)) &&
 		(!strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
 			do_gettimeofday(&out_warm_tv);
-			pr_debug("WARM:apr_send_pkt at"
-			"%ld sec %ld microsec\n", out_warm_tv.tv_sec,\
+			pr_debug("WARM:apr_send_pkt at %ld sec %ld microsec\n",
+			 out_warm_tv.tv_sec,\
 			out_warm_tv.tv_usec);
 			pr_debug("Warm Pattern Matched");
 		}
@@ -263,8 +263,8 @@
 		else if ((!strncmp(((char *)ab->data), zero_pattern, 2))
 		&& (strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
 			do_gettimeofday(&out_cont_tv);
-			pr_debug("CONT:apr_send_pkt at"
-			"%ld sec %ld microsec\n", out_cont_tv.tv_sec,\
+			pr_debug("CONT:apr_send_pkt at %ld sec %ld microsec\n",
+			out_cont_tv.tv_sec,\
 			out_cont_tv.tv_usec);
 			pr_debug("Cont Pattern Matched");
 		}
@@ -410,8 +410,7 @@
 		ion_unmap_kernel(port->buf[0].client, port->buf[0].handle);
 		ion_free(port->buf[0].client, port->buf[0].handle);
 		ion_client_destroy(port->buf[0].client);
-		pr_debug("%s:data[%p]phys[%p][%p]"
-			", client[%p] handle[%p]\n",
+		pr_debug("%s:data[%p]phys[%p][%p] , client[%p] handle[%p]\n",
 			__func__,
 			(void *)port->buf[0].data,
 			(void *)port->buf[0].phys,
@@ -479,13 +478,16 @@
 
 int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode)
 {
+	ac->io_mode &= 0xFF00;
+	pr_debug("%s ac->mode after anding with FF00:0x[%x],\n",
+		__func__, ac->io_mode);
 	if (ac == NULL) {
 		pr_err("%s APR handle NULL\n", __func__);
 		return -EINVAL;
 	}
 	if ((mode == ASYNC_IO_MODE) || (mode == SYNC_IO_MODE)) {
-		ac->io_mode = mode;
-		pr_debug("%s:Set Mode to %d\n", __func__, ac->io_mode);
+		ac->io_mode |= mode;
+		pr_debug("%s:Set Mode to 0x[%x]\n", __func__, ac->io_mode);
 		return 0;
 	} else {
 		pr_err("%s:Not an valid IO Mode:%d\n", __func__, ac->io_mode);
@@ -500,8 +502,8 @@
 					(apr_fn)q6asm_mmapcallback,\
 					0x0FFFFFFFF, &this_mmap);
 		if (this_mmap.apr == NULL) {
-			pr_debug("%s Unable to register"
-				"APR ASM common port\n", __func__);
+			pr_debug("%s Unable to register APR ASM common port\n",
+			 __func__);
 			goto fail;
 		}
 	}
@@ -624,8 +626,7 @@
 						(UINT_MAX, "audio_client");
 					if (IS_ERR_OR_NULL((void *)
 						buf[cnt].client)) {
-						pr_err("%s: ION create client"
-						" for AUDIO failed\n",
+						pr_err("%s: ION create client for AUDIO failed\n",
 						__func__);
 						goto fail;
 					}
@@ -634,8 +635,7 @@
 						(0x1 << ION_AUDIO_HEAP_ID));
 					if (IS_ERR_OR_NULL((void *)
 						buf[cnt].handle)) {
-						pr_err("%s: ION memory"
-					" allocation for AUDIO failed\n",
+						pr_err("%s: ION memory allocation for AUDIO failed\n",
 							__func__);
 						goto fail;
 					}
@@ -646,8 +646,7 @@
 						&buf[cnt].phys,
 						(size_t *)&len);
 					if (rc) {
-						pr_err("%s: ION Get Physical"
-						" for AUDIO failed, rc = %d\n",
+						pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
 							__func__, rc);
 						goto fail;
 					}
@@ -657,8 +656,8 @@
 							 0);
 					if (IS_ERR_OR_NULL((void *)
 						buf[cnt].data)) {
-						pr_err("%s: ION memory"
-				" mapping for AUDIO failed\n", __func__);
+						pr_err("%s: ION memory mapping for AUDIO failed\n",
+						 __func__);
 						goto fail;
 					}
 					memset((void *)buf[cnt].data, 0, bufsz);
@@ -752,8 +751,7 @@
 	}
 	memset((void *)buf[0].data, 0, (bufsz * bufcnt));
 	if (!buf[0].data) {
-		pr_err("%s:invalid vaddr,"
-			" iomap failed\n", __func__);
+		pr_err("%s:invalid vaddr, iomap failed\n", __func__);
 		mutex_unlock(&ac->cmd_lock);
 		goto fail;
 	}
@@ -822,8 +820,7 @@
 	}
 	sid = (data->token >> 8) & 0x0F;
 	ac = q6asm_get_audio_client(sid);
-	pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x]"
-		"token[0x%x]payload_s[%d] src[%d] dest[%d]sid[%d]dir[%d]\n",
+	pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]sid[%d]dir[%d]\n",
 		__func__, payload[0], payload[1], data->opcode, data->token,
 		data->payload_size, data->src_port, data->dest_port, sid, dir);
 	pr_debug("%s:Payload = [0x%x] status[0x%x]\n",
@@ -918,8 +915,8 @@
 		return 0;
 	}
 
-	pr_debug("%s: session[%d]opcode[0x%x]"
-		"token[0x%x]payload_s[%d] src[%d] dest[%d]\n", __func__,
+	pr_debug("%s: session[%d]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]\n",
+		 __func__,
 		ac->session, data->opcode,
 		data->token, data->payload_size, data->src_port,
 		data->dest_port);
@@ -1060,9 +1057,8 @@
 		pr_err("ASM_SESSION_EVENTX_OVERFLOW\n");
 		break;
 	case ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3:
-		pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3, "
-				"payload[0] = %d, payload[1] = %d, "
-				"payload[2] = %d\n", __func__,
+		pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3, payload[0] = %d, payload[1] = %d, payload[2] = %d\n",
+				 __func__,
 				 payload[0], payload[1], payload[2]);
 		ac->time_stamp = (uint64_t)(((uint64_t)payload[1] << 32) |
 				payload[2]);
@@ -1073,9 +1069,8 @@
 		break;
 	case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
 	case ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY:
-		pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, "
-				"payload[0] = %d, payload[1] = %d, "
-				"payload[2] = %d, payload[3] = %d\n", __func__,
+		pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, payload[0] = %d, payload[1] = %d, payload[2] = %d, payload[3] = %d\n",
+				 __func__,
 				payload[0], payload[1], payload[2],
 				payload[3]);
 		break;
@@ -1112,8 +1107,8 @@
 		if (port->buf[idx].used == dir) {
 			/* To make it more robust, we could loop and get the
 			next avail buf, its risky though */
-			pr_debug("%s:Next buf idx[0x%x] not available,"
-				"dir[%d]\n", __func__, idx, dir);
+			pr_debug("%s:Next buf idx[0x%x] not available, dir[%d]\n",
+			 __func__, idx, dir);
 			mutex_unlock(&port->lock);
 			return NULL;
 		}
@@ -1162,8 +1157,8 @@
 		 * To make it more robust, we could loop and get the
 		 * next avail buf, its risky though
 		 */
-		pr_debug("%s:Next buf idx[0x%x] not available,"
-			"dir[%d]\n", __func__, idx, dir);
+		pr_debug("%s:Next buf idx[0x%x] not available, dir[%d]\n",
+		 __func__, idx, dir);
 		return NULL;
 	}
 	*size = port->buf[idx].actual_size;
@@ -1427,6 +1422,7 @@
 	pr_debug("wr_format[0x%x]rd_format[0x%x]",
 				wr_format, rd_format);
 
+	ac->io_mode |= NT_MODE;
 	q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
 	open.hdr.opcode = ASM_STREAM_CMD_OPEN_READWRITE_V2;
 
@@ -1593,8 +1589,8 @@
 	struct asm_aac_enc_cfg_v2 enc_cfg;
 	int rc = 0;
 
-	pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d]"
-		"format[%d]", __func__, ac->session, frames_per_buf,
+	pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d] format[%d]",
+		 __func__, ac->session, frames_per_buf,
 		sample_rate, channels, bit_rate, mode, format);
 
 	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
@@ -1632,8 +1628,41 @@
 int q6asm_set_encdec_chan_map(struct audio_client *ac,
 			uint32_t num_channels)
 {
-	/* Todo: */
+	struct asm_dec_out_chan_map_param chan_map;
+	u8 *channel_mapping;
+	int rc = 0;
+	pr_debug("%s: Session %d, num_channels = %d\n",
+			 __func__, ac->session, num_channels);
+	q6asm_add_hdr(ac, &chan_map.hdr, sizeof(chan_map), TRUE);
+	chan_map.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	chan_map.encdec.param_id = ASM_PARAM_ID_DEC_OUTPUT_CHAN_MAP;
+	chan_map.encdec.param_size = sizeof(struct asm_dec_out_chan_map_param) -
+			 (sizeof(struct apr_hdr) +
+			 sizeof(struct asm_stream_cmd_set_encdec_param));
+	chan_map.num_channels = num_channels;
+	channel_mapping = chan_map.channel_mapping;
+	memset(channel_mapping, PCM_CHANNEL_NULL, MAX_CHAN_MAP_CHANNELS);
+	if (q6asm_map_channels(channel_mapping, num_channels))
+		return -EINVAL;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &chan_map);
+	if (rc < 0) {
+		pr_err("%s:Command opcode[0x%x]paramid[0x%x] failed\n",
+			   __func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM,
+			   ASM_PARAM_ID_DEC_OUTPUT_CHAN_MAP);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+				 (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s:timeout opcode[0x%x]\n", __func__,
+			   chan_map.hdr.opcode);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
 	return 0;
+fail_cmd:
+		return rc;
 }
 
 int q6asm_enc_cfg_blk_pcm(struct audio_client *ac,
@@ -1665,23 +1694,8 @@
 
 	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
 
-	if (channels == 1)  {
-		channel_mapping[0] = PCM_CHANNEL_FL;
-	} else if (channels == 2) {
-		channel_mapping[0] = PCM_CHANNEL_FL;
-		channel_mapping[1] = PCM_CHANNEL_FR;
-	} else if (channels == 6) {
-		channel_mapping[0] = PCM_CHANNEL_FC;
-		channel_mapping[1] = PCM_CHANNEL_FL;
-		channel_mapping[2] = PCM_CHANNEL_FR;
-		channel_mapping[3] = PCM_CHANNEL_LB;
-		channel_mapping[4] = PCM_CHANNEL_RB;
-		channel_mapping[5] = PCM_CHANNEL_LFE;
-	} else {
-		pr_err("%s: ERROR.unsupported num_ch = %u\n", __func__,
-				channels);
+	if (q6asm_map_channels(channel_mapping, channels))
 		return -EINVAL;
-	}
 
 	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
 	if (rc < 0) {
@@ -1700,6 +1714,96 @@
 	return -EINVAL;
 }
 
+int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac,
+			uint32_t rate, uint32_t channels)
+{
+	struct asm_multi_channel_pcm_enc_cfg_v2  enc_cfg;
+	u8 *channel_mapping;
+	u32 frames_per_buf = 0;
+
+	int rc = 0;
+
+	pr_debug("%s: Session %d, rate = %d, channels = %d\n", __func__,
+			 ac->session, rate, channels);
+
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+	enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) -
+				 sizeof(enc_cfg.encdec);
+	enc_cfg.encblk.frames_per_buf = frames_per_buf;
+	enc_cfg.encblk.enc_cfg_blk_size  = enc_cfg.encdec.param_size -
+				sizeof(struct asm_enc_cfg_blk_param_v2);
+
+	enc_cfg.num_channels = 0;/*channels;*/
+	enc_cfg.bits_per_sample = 16;
+	enc_cfg.sample_rate = 0;/*rate;*/
+	enc_cfg.is_signed = 1;
+	channel_mapping = enc_cfg.channel_mapping;  /* ??? PHANI */
+
+	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	if (q6asm_map_channels(channel_mapping, channels))
+		return -EINVAL;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("Comamnd open failed\n");
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("timeout opcode[0x%x] ", enc_cfg.hdr.opcode);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+static int q6asm_map_channels(u8 *channel_mapping, uint32_t channels)
+{
+	u8 *lchannel_mapping;
+	lchannel_mapping = channel_mapping;
+	pr_debug("%s channels passed: %d\n", __func__, channels);
+	if (channels == 1)  {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+	} else if (channels == 2) {
+		lchannel_mapping[0] = PCM_CHANNEL_FL;
+		lchannel_mapping[1] = PCM_CHANNEL_FR;
+	} else if (channels == 3) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+	} else if (channels == 4) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+		lchannel_mapping[3] = PCM_CHANNEL_LB;
+	} else if (channels == 5) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+		lchannel_mapping[3] = PCM_CHANNEL_LB;
+		lchannel_mapping[4] = PCM_CHANNEL_RB;
+	} else if (channels == 6) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+		lchannel_mapping[3] = PCM_CHANNEL_LB;
+		lchannel_mapping[4] = PCM_CHANNEL_RB;
+		lchannel_mapping[5] = PCM_CHANNEL_LFE;
+	} else {
+		pr_err("%s: ERROR.unsupported num_ch = %u\n",
+		 __func__, channels);
+		return -EINVAL;
+	}
+	return 0;
+}
+
 int q6asm_enable_sbrps(struct audio_client *ac,
 			uint32_t sbr_ps_enable)
 {
@@ -1791,8 +1895,8 @@
 	struct asm_v13k_enc_cfg enc_cfg;
 	int rc = 0;
 
-	pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x]"
-		"reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]", __func__,
+	pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]",
+		 __func__,
 		ac->session, frames_per_buf, min_rate, max_rate,
 		reduced_rate_level, rate_modulation_cmd);
 
@@ -1833,8 +1937,8 @@
 	struct asm_evrc_enc_cfg enc_cfg;
 	int rc = 0;
 
-	pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x]"
-		"rate_modulation_cmd[0x%4x]", __func__, ac->session,
+	pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] rate_modulation_cmd[0x%4x]",
+		 __func__, ac->session,
 		frames_per_buf,	min_rate, max_rate, rate_modulation_cmd);
 
 	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
@@ -1972,23 +2076,8 @@
 
 	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
 
-	if (channels == 1)  {
-		channel_mapping[0] = PCM_CHANNEL_FL;
-	} else if (channels == 2) {
-		channel_mapping[0] = PCM_CHANNEL_FL;
-		channel_mapping[1] = PCM_CHANNEL_FR;
-	} else if (channels == 6) {
-		channel_mapping[0] = PCM_CHANNEL_FC;
-		channel_mapping[1] = PCM_CHANNEL_FL;
-		channel_mapping[2] = PCM_CHANNEL_FR;
-		channel_mapping[3] = PCM_CHANNEL_LB;
-		channel_mapping[4] = PCM_CHANNEL_RB;
-		channel_mapping[5] = PCM_CHANNEL_LFE;
-	} else {
-		pr_err("%s: ERROR.unsupported num_ch = %u\n", __func__,
-				channels);
+	if (q6asm_map_channels(channel_mapping, channels))
 		return -EINVAL;
-	}
 
 	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
 	if (rc < 0) {
@@ -2056,8 +2145,7 @@
 	struct asm_wma_cfg *wma_cfg = (struct asm_wma_cfg *)cfg;
 	int rc = 0;
 
-	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d],"
-		"balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n",
+	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n",
 		ac->session, wma_cfg->format_tag, wma_cfg->sample_rate,
 		wma_cfg->ch_cfg, wma_cfg->avg_bytes_per_sec,
 		wma_cfg->block_align, wma_cfg->valid_bits_per_sample,
@@ -2065,8 +2153,9 @@
 
 	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
 
-	fmt.hdr.opcode = ASM_MEDIA_FMT_WMA_V9_V2;
-
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+					sizeof(fmt.fmtblk);
 	fmt.fmtag = wma_cfg->format_tag;
 	fmt.num_channels = wma_cfg->ch_cfg;
 	fmt.sample_rate = wma_cfg->sample_rate;
@@ -2100,9 +2189,7 @@
 	struct asm_wmapro_cfg *wmapro_cfg = (struct asm_wmapro_cfg *)cfg;
 	int rc = 0;
 
-	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d],"
-		"balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x],"
-		"adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n",
+	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x], adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n",
 		ac->session, wmapro_cfg->format_tag, wmapro_cfg->sample_rate,
 		wmapro_cfg->ch_cfg,  wmapro_cfg->avg_bytes_per_sec,
 		wmapro_cfg->block_align, wmapro_cfg->valid_bits_per_sample,
@@ -2111,7 +2198,9 @@
 
 	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
 
-	fmt.hdr.opcode = ASM_MEDIA_FMT_WMA_V10PRO_V2;
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+						sizeof(fmt.fmtblk);
 
 	fmt.fmtag = wmapro_cfg->format_tag;
 	fmt.num_channels = wmapro_cfg->ch_cfg;
@@ -2147,12 +2236,10 @@
 	struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
 	struct avs_shared_map_region_payload  *mregions = NULL;
 	struct audio_port_data *port = NULL;
-	struct audio_buffer *ab = NULL;
 	void	*mmap_region_cmd = NULL;
 	void	*payload = NULL;
 	struct asm_buffer_node *buffer_node = NULL;
 	int	rc = 0;
-	int	i = 0;
 	int	cmd_size = 0;
 
 	if (!ac || ac->apr == NULL || ac->mmap_apr == NULL) {
@@ -2181,21 +2268,18 @@
 	mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_EBI_POOL;
 	mmap_regions->num_regions = bufcnt & 0x00ff;
 	mmap_regions->property_flag = 0x00;
-	pr_debug("map_regions->nregions = %d\n", mmap_regions->num_regions);
 	payload = ((u8 *) mmap_region_cmd +
 		sizeof(struct avs_cmd_shared_mem_map_regions));
 	mregions = (struct avs_shared_map_region_payload *)payload;
 
 	ac->port[dir].tmp_hdl = 0;
 	port = &ac->port[dir];
-	for (i = 0; i < bufcnt; i++) {
-		ab = &port->buf[i];
-		mregions->shm_addr_lsw = ab->phys;
-		/* Using only 32 bit address */
-		mregions->shm_addr_msw = 0;
-		mregions->mem_size_bytes = ab->size;
-		++mregions;
-	}
+	pr_debug("%s, buf_add 0x%x, bufsz: %d\n", __func__, buf_add, bufsz);
+	mregions->shm_addr_lsw = buf_add;
+	/* Using only 32 bit address */
+	mregions->shm_addr_msw = 0;
+	mregions->mem_size_bytes = bufsz;
+	++mregions;
 
 	rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) mmap_region_cmd);
 	if (rc < 0) {
@@ -2295,7 +2379,7 @@
 	void	*payload = NULL;
 	struct asm_buffer_node *buffer_node = NULL;
 	int	rc = 0;
-	int	i = 0;
+	int    i = 0;
 	int	cmd_size = 0;
 
 	if (!ac || ac->apr == NULL || ac->mmap_apr == NULL) {
@@ -2351,7 +2435,6 @@
 	rc = wait_event_timeout(ac->cmd_wait,
 			(atomic_read(&ac->cmd_state) == 0)
 			 , 5*HZ);
-			 /*ac->port[dir].tmp_hdl), 5*HZ);*/
 	if (!rc) {
 		pr_err("timeout. waited for memory_map\n");
 		rc = -EINVAL;
@@ -2843,8 +2926,6 @@
 					read.buf_addr_lsw,
 					read.hdr.token,
 					read.seq_id);
-		pr_debug("q6asm_read_nolock mem-map handle is %x",
-				read.mem_map_handle);
 		rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
 		if (rc < 0) {
 			pr_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc);
@@ -2865,6 +2946,8 @@
 	struct list_head *ptr, *next;
 	struct audio_buffer        *ab;
 	struct audio_port_data     *port;
+	u32 lbuf_addr_lsw;
+	u32 liomode;
 
 	if (!ac || ac->apr == NULL) {
 		pr_err("%s: APR handle NULL\n", __func__);
@@ -2884,11 +2967,21 @@
 	write.buf_size = param->len;
 	write.timestamp_msw = param->msw_ts;
 	write.timestamp_lsw = param->lsw_ts;
-	pr_debug("%s: token[0x%x], buf_addr_lsw[0x%x], buf_size[0x%x],"
-		"ts_msw[0x%x], ts_lsw[0x%x]\n",
-		__func__, write.hdr.token, write.buf_addr_lsw,
+	liomode = (ASYNC_IO_MODE | NT_MODE);
+
+	if (ac->io_mode == liomode) {
+		pr_info("%s: subtracting 32 for header\n", __func__);
+		lbuf_addr_lsw = (write.buf_addr_lsw - 32);
+	} else{
+		lbuf_addr_lsw = write.buf_addr_lsw;
+	}
+
+	pr_debug("%s: token[0x%x], buf_addr_lsw[0x%x], buf_size[0x%x], ts_msw[0x%x], ts_lsw[0x%x], lbuf_addr_lsw: 0x[%x]\n",
+		__func__,
+		write.hdr.token, write.buf_addr_lsw,
 		write.buf_size, write.timestamp_msw,
-		write.timestamp_lsw);
+		write.timestamp_lsw, lbuf_addr_lsw);
+
 	/* Use 0xFF00 for disabling timestamps */
 	if (param->flags == 0xFF00)
 		write.flags = (0x00000000 | (param->flags & 0x800000FF));
@@ -2899,21 +2992,12 @@
 	list_for_each_safe(ptr, next, &ac->port[IN].mem_map_handle) {
 		buf_node = list_entry(ptr, struct asm_buffer_node,
 						list);
-		if (buf_node->buf_addr_lsw == (uint32_t)write.buf_addr_lsw) {
+		if (buf_node->buf_addr_lsw == lbuf_addr_lsw) {
 			write.mem_map_handle = buf_node->mmap_hdl;
-			pr_debug("%s:buf_node->mmap_hdl = 0x%x,"
-				"write.mem_map_handle = 0x%x\n",
-					__func__,
-					buf_node->mmap_hdl,
-					(uint32_t)write.mem_map_handle);
 			break;
 		}
 	}
 
-	pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x],"
-			"mem_map_handle[0x%x]\n", __func__, ac->session,
-		write.buf_addr_lsw, write.buf_size, write.mem_map_handle);
-
 	rc = apr_send_pkt(ac->apr, (uint32_t *) &write);
 	if (rc < 0) {
 		pr_debug("[%s] write op[0x%x]rc[%d]\n", __func__,
@@ -2932,6 +3016,8 @@
 	struct asm_data_cmd_read_v2 read;
 	struct asm_buffer_node *buf_node = NULL;
 	struct list_head *ptr, *next;
+	u32 lbuf_addr_lsw;
+	u32 liomode;
 
 	if (!ac || ac->apr == NULL) {
 		pr_err("%s: APR handle NULL\n", __func__);
@@ -2947,16 +3033,21 @@
 	read.buf_addr_msw = 0;
 	read.buf_size = param->len;
 	read.seq_id = param->uid;
-
-	list_for_each_safe(ptr, next, &ac->port[IN].mem_map_handle) {
-		buf_node = list_entry(ptr, struct asm_buffer_node,
-						list);
-			if (buf_node->buf_addr_lsw == param->paddr)
-				read.mem_map_handle = buf_node->mmap_hdl;
+	liomode = (NT_MODE | ASYNC_IO_MODE);
+	if (ac->io_mode == liomode) {
+		pr_info("%s: subtracting 32 for header\n", __func__);
+		lbuf_addr_lsw = (read.buf_addr_lsw - 32);
+	} else{
+		lbuf_addr_lsw = read.buf_addr_lsw;
 	}
 
-	pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x]", __func__, ac->session,
-		read.buf_addr_lsw, read.buf_size);
+	list_for_each_safe(ptr, next, &ac->port[IN].mem_map_handle) {
+		buf_node = list_entry(ptr, struct asm_buffer_node, list);
+			if (buf_node->buf_addr_lsw == lbuf_addr_lsw) {
+				read.mem_map_handle = buf_node->mmap_hdl;
+				break;
+		}
+	}
 
 	rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
 	if (rc < 0) {
@@ -3013,8 +3104,7 @@
 						list);
 		write.mem_map_handle = buf_node->mmap_hdl;
 
-		pr_debug("%s:ab->phys[0x%x]bufadd[0x%x]"
-			"token[0x%x]buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
+		pr_debug("%s:ab->phys[0x%x]bufadd[0x%x] token[0x%x]buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
 						, __func__,
 						ab->phys,
 						write.buf_addr_lsw,
@@ -3081,8 +3171,7 @@
 			write.flags = (0x80000000 | flags);
 		port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1);
 
-		pr_err("%s:ab->phys[0x%x]bufadd[0x%x]token[0x%x]"
-			"buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
+		pr_debug("%s:ab->phys[0x%x]bufadd[0x%x]token[0x%x] buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
 							, __func__,
 							ab->phys,
 							write.buf_addr_lsw,
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
new file mode 100644
index 0000000..4e11dfe
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -0,0 +1,3916 @@
+/*  Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+
+#include <asm/mach-types.h>
+#include <mach/qdsp6v2/audio_acdb.h>
+#include <mach/qdsp6v2/rtac.h>
+#include <mach/socinfo.h>
+
+#include "sound/apr_audio-v2.h"
+#include "sound/q6afe-v2.h"
+
+#include "q6voice.h"
+
+#define TIMEOUT_MS 200
+
+
+#define CMD_STATUS_SUCCESS 0
+#define CMD_STATUS_FAIL 1
+
+#define VOC_PATH_PASSIVE 0
+#define VOC_PATH_FULL 1
+#define VOC_PATH_VOLTE_PASSIVE 2
+
+/* CVP CAL Size: 245760 = 240 * 1024 */
+#define CVP_CAL_SIZE 245760
+/* CVS CAL Size: 49152 = 48 * 1024 */
+#define CVS_CAL_SIZE 49152
+
+static struct common_data common;
+
+static int voice_send_enable_vocproc_cmd(struct voice_data *v);
+static int voice_send_netid_timing_cmd(struct voice_data *v);
+static int voice_send_attach_vocproc_cmd(struct voice_data *v);
+static int voice_send_set_device_cmd(struct voice_data *v);
+static int voice_send_disable_vocproc_cmd(struct voice_data *v);
+static int voice_send_vol_index_cmd(struct voice_data *v);
+static int voice_send_cvp_map_memory_cmd(struct voice_data *v);
+static int voice_send_cvp_unmap_memory_cmd(struct voice_data *v);
+static int voice_send_cvs_map_memory_cmd(struct voice_data *v);
+static int voice_send_cvs_unmap_memory_cmd(struct voice_data *v);
+static int voice_send_cvs_register_cal_cmd(struct voice_data *v);
+static int voice_send_cvs_deregister_cal_cmd(struct voice_data *v);
+static int voice_send_cvp_register_cal_cmd(struct voice_data *v);
+static int voice_send_cvp_deregister_cal_cmd(struct voice_data *v);
+static int voice_send_cvp_register_vol_cal_table_cmd(struct voice_data *v);
+static int voice_send_cvp_deregister_vol_cal_table_cmd(struct voice_data *v);
+static int voice_send_set_widevoice_enable_cmd(struct voice_data *v);
+static int voice_send_set_pp_enable_cmd(struct voice_data *v,
+					uint32_t module_id, int enable);
+static int voice_cvs_stop_playback(struct voice_data *v);
+static int voice_cvs_start_playback(struct voice_data *v);
+static int voice_cvs_start_record(struct voice_data *v, uint32_t rec_mode);
+static int voice_cvs_stop_record(struct voice_data *v);
+
+static int32_t qdsp_mvm_callback(struct apr_client_data *data, void *priv);
+static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv);
+static int32_t qdsp_cvp_callback(struct apr_client_data *data, void *priv);
+
+static u16 voice_get_mvm_handle(struct voice_data *v)
+{
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return 0;
+	}
+
+	pr_debug("%s: mvm_handle %d\n", __func__, v->mvm_handle);
+
+	return v->mvm_handle;
+}
+
+static void voice_set_mvm_handle(struct voice_data *v, u16 mvm_handle)
+{
+	pr_debug("%s: mvm_handle %d\n", __func__, mvm_handle);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return;
+	}
+
+	v->mvm_handle = mvm_handle;
+}
+
+static u16 voice_get_cvs_handle(struct voice_data *v)
+{
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return 0;
+	}
+
+	pr_debug("%s: cvs_handle %d\n", __func__, v->cvs_handle);
+
+	return v->cvs_handle;
+}
+
+static void voice_set_cvs_handle(struct voice_data *v, u16 cvs_handle)
+{
+	pr_debug("%s: cvs_handle %d\n", __func__, cvs_handle);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return;
+	}
+
+	v->cvs_handle = cvs_handle;
+}
+
+static u16 voice_get_cvp_handle(struct voice_data *v)
+{
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return 0;
+	}
+
+	pr_debug("%s: cvp_handle %d\n", __func__, v->cvp_handle);
+
+	return v->cvp_handle;
+}
+
+static void voice_set_cvp_handle(struct voice_data *v, u16 cvp_handle)
+{
+	pr_debug("%s: cvp_handle %d\n", __func__, cvp_handle);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return;
+	}
+
+	v->cvp_handle = cvp_handle;
+}
+
+uint16_t voc_get_session_id(char *name)
+{
+	u16 session_id = 0;
+
+	if (name != NULL) {
+		if (!strncmp(name, "Voice session", 13))
+			session_id = common.voice[VOC_PATH_PASSIVE].session_id;
+		else if (!strncmp(name, "VoLTE session", 13))
+			session_id =
+			common.voice[VOC_PATH_VOLTE_PASSIVE].session_id;
+		else
+			session_id = common.voice[VOC_PATH_FULL].session_id;
+
+		pr_debug("%s: %s has session id 0x%x\n", __func__, name,
+				session_id);
+	}
+
+	return session_id;
+}
+
+static struct voice_data *voice_get_session(u16 session_id)
+{
+	struct voice_data *v = NULL;
+
+	if ((session_id >= SESSION_ID_BASE) &&
+	    (session_id < SESSION_ID_BASE + MAX_VOC_SESSIONS)) {
+		v = &common.voice[session_id - SESSION_ID_BASE];
+	}
+
+	pr_debug("%s: session_id 0x%x session handle 0x%x\n",
+			 __func__, session_id, (unsigned int)v);
+
+	return v;
+}
+
+static bool is_voice_session(u16 session_id)
+{
+	return (session_id == common.voice[VOC_PATH_PASSIVE].session_id);
+}
+
+static bool is_voip_session(u16 session_id)
+{
+	return (session_id == common.voice[VOC_PATH_FULL].session_id);
+}
+
+static bool is_volte_session(u16 session_id)
+{
+	return (session_id == common.voice[VOC_PATH_VOLTE_PASSIVE].session_id);
+}
+
+static int voice_apr_register(void)
+{
+	pr_debug("%s\n", __func__);
+
+	mutex_lock(&common.common_lock);
+
+	/* register callback to APR */
+	if (common.apr_q6_mvm == NULL) {
+		pr_debug("%s: Start to register MVM callback\n", __func__);
+
+		common.apr_q6_mvm = apr_register("ADSP", "MVM",
+						 qdsp_mvm_callback,
+						 0xFFFFFFFF, &common);
+
+		if (common.apr_q6_mvm == NULL) {
+			pr_err("%s: Unable to register MVM\n", __func__);
+			goto err;
+		}
+	}
+
+	if (common.apr_q6_cvs == NULL) {
+		pr_debug("%s: Start to register CVS callback\n", __func__);
+
+		common.apr_q6_cvs = apr_register("ADSP", "CVS",
+						 qdsp_cvs_callback,
+						 0xFFFFFFFF, &common);
+
+		if (common.apr_q6_cvs == NULL) {
+			pr_err("%s: Unable to register CVS\n", __func__);
+			goto err;
+		}
+
+		rtac_set_voice_handle(RTAC_CVS, common.apr_q6_cvs);
+	}
+
+	if (common.apr_q6_cvp == NULL) {
+		pr_debug("%s: Start to register CVP callback\n", __func__);
+
+		common.apr_q6_cvp = apr_register("ADSP", "CVP",
+						 qdsp_cvp_callback,
+						 0xFFFFFFFF, &common);
+
+		if (common.apr_q6_cvp == NULL) {
+			pr_err("%s: Unable to register CVP\n", __func__);
+			goto err;
+		}
+
+		rtac_set_voice_handle(RTAC_CVP, common.apr_q6_cvp);
+	}
+
+	mutex_unlock(&common.common_lock);
+
+	return 0;
+
+err:
+	if (common.apr_q6_cvs != NULL) {
+		apr_deregister(common.apr_q6_cvs);
+		common.apr_q6_cvs = NULL;
+		rtac_set_voice_handle(RTAC_CVS, NULL);
+	}
+	if (common.apr_q6_mvm != NULL) {
+		apr_deregister(common.apr_q6_mvm);
+		common.apr_q6_mvm = NULL;
+	}
+
+	mutex_unlock(&common.common_lock);
+
+	return -ENODEV;
+}
+
+static int voice_send_dual_control_cmd(struct voice_data *v)
+{
+	int ret = 0;
+	struct mvm_modem_dual_control_session_cmd mvm_voice_ctl_cmd;
+	void *apr_mvm;
+	u16 mvm_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s: VoLTE command to MVM\n", __func__);
+	if (is_volte_session(v->session_id)) {
+		mvm_handle = voice_get_mvm_handle(v);
+		mvm_voice_ctl_cmd.hdr.hdr_field = APR_HDR_FIELD(
+						APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+		mvm_voice_ctl_cmd.hdr.pkt_size = APR_PKT_SIZE(
+						APR_HDR_SIZE,
+						sizeof(mvm_voice_ctl_cmd) -
+						APR_HDR_SIZE);
+		pr_debug("%s: send mvm Voice Ctl pkt size = %d\n",
+			__func__, mvm_voice_ctl_cmd.hdr.pkt_size);
+		mvm_voice_ctl_cmd.hdr.src_port = v->session_id;
+		mvm_voice_ctl_cmd.hdr.dest_port = mvm_handle;
+		mvm_voice_ctl_cmd.hdr.token = 0;
+		mvm_voice_ctl_cmd.hdr.opcode =
+					VSS_IMVM_CMD_SET_POLICY_DUAL_CONTROL;
+		mvm_voice_ctl_cmd.voice_ctl.enable_flag = true;
+		v->mvm_state = CMD_STATUS_FAIL;
+
+		ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_voice_ctl_cmd);
+		if (ret < 0) {
+			pr_err("%s: Error sending MVM Voice CTL CMD\n",
+							__func__);
+			ret = -EINVAL;
+			goto fail;
+		}
+		ret = wait_event_timeout(v->mvm_wait,
+				(v->mvm_state == CMD_STATUS_SUCCESS),
+				msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait_event timeout\n", __func__);
+			ret = -EINVAL;
+			goto fail;
+		}
+	}
+	ret = 0;
+fail:
+	return ret;
+}
+
+
+static int voice_create_mvm_cvs_session(struct voice_data *v)
+{
+	int ret = 0;
+	struct mvm_create_ctl_session_cmd mvm_session_cmd;
+	struct cvs_create_passive_ctl_session_cmd cvs_session_cmd;
+	struct cvs_create_full_ctl_session_cmd cvs_full_ctl_cmd;
+	struct mvm_attach_stream_cmd attach_stream_cmd;
+	void *apr_mvm, *apr_cvs, *apr_cvp;
+	u16 mvm_handle, cvs_handle, cvp_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+	apr_cvs = common.apr_q6_cvs;
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_mvm || !apr_cvs || !apr_cvp) {
+		pr_err("%s: apr_mvm or apr_cvs or apr_cvp is NULL\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+	cvs_handle = voice_get_cvs_handle(v);
+	cvp_handle = voice_get_cvp_handle(v);
+
+	pr_debug("%s: mvm_hdl=%d, cvs_hdl=%d\n", __func__,
+		mvm_handle, cvs_handle);
+	/* send cmd to create mvm session and wait for response */
+
+	if (!mvm_handle) {
+		if (is_voice_session(v->session_id) ||
+				is_volte_session(v->session_id)) {
+			mvm_session_cmd.hdr.hdr_field = APR_HDR_FIELD(
+						APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+			mvm_session_cmd.hdr.pkt_size = APR_PKT_SIZE(
+						APR_HDR_SIZE,
+						sizeof(mvm_session_cmd) -
+						APR_HDR_SIZE);
+			pr_debug("%s: send mvm create session pkt size = %d\n",
+				 __func__, mvm_session_cmd.hdr.pkt_size);
+			mvm_session_cmd.hdr.src_port = v->session_id;
+			mvm_session_cmd.hdr.dest_port = 0;
+			mvm_session_cmd.hdr.token = 0;
+			mvm_session_cmd.hdr.opcode =
+				VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION;
+			if (is_volte_session(v->session_id)) {
+				strlcpy(mvm_session_cmd.mvm_session.name,
+				"default volte voice",
+				sizeof(mvm_session_cmd.mvm_session.name));
+			} else {
+			strlcpy(mvm_session_cmd.mvm_session.name,
+				"default modem voice",
+				sizeof(mvm_session_cmd.mvm_session.name));
+			}
+
+			v->mvm_state = CMD_STATUS_FAIL;
+
+			ret = apr_send_pkt(apr_mvm,
+					(uint32_t *) &mvm_session_cmd);
+			if (ret < 0) {
+				pr_err("%s: Error sending MVM_CONTROL_SESSION\n",
+				       __func__);
+				goto fail;
+			}
+			ret = wait_event_timeout(v->mvm_wait,
+					(v->mvm_state == CMD_STATUS_SUCCESS),
+					msecs_to_jiffies(TIMEOUT_MS));
+			if (!ret) {
+				pr_err("%s: wait_event timeout\n", __func__);
+				goto fail;
+			}
+		} else {
+			pr_debug("%s: creating MVM full ctrl\n", __func__);
+			mvm_session_cmd.hdr.hdr_field =
+					APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+			mvm_session_cmd.hdr.pkt_size =
+					APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(mvm_session_cmd) -
+					APR_HDR_SIZE);
+			mvm_session_cmd.hdr.src_port = v->session_id;
+			mvm_session_cmd.hdr.dest_port = 0;
+			mvm_session_cmd.hdr.token = 0;
+			mvm_session_cmd.hdr.opcode =
+				VSS_IMVM_CMD_CREATE_FULL_CONTROL_SESSION;
+			strlcpy(mvm_session_cmd.mvm_session.name,
+				"default voip",
+				sizeof(mvm_session_cmd.mvm_session.name));
+
+			v->mvm_state = CMD_STATUS_FAIL;
+
+			ret = apr_send_pkt(apr_mvm,
+					(uint32_t *) &mvm_session_cmd);
+			if (ret < 0) {
+				pr_err("Fail in sending MVM_CONTROL_SESSION\n");
+				goto fail;
+			}
+			ret = wait_event_timeout(v->mvm_wait,
+					 (v->mvm_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+			if (!ret) {
+				pr_err("%s: wait_event timeout\n", __func__);
+				goto fail;
+			}
+		}
+		/* Get the created MVM handle. */
+		mvm_handle = voice_get_mvm_handle(v);
+	}
+	/* send cmd to create cvs session */
+	if (!cvs_handle) {
+		if (is_voice_session(v->session_id) ||
+			is_volte_session(v->session_id)) {
+			pr_debug("%s: creating CVS passive session\n",
+				 __func__);
+
+			cvs_session_cmd.hdr.hdr_field = APR_HDR_FIELD(
+						APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+			cvs_session_cmd.hdr.pkt_size =
+						APR_PKT_SIZE(APR_HDR_SIZE,
+						sizeof(cvs_session_cmd) -
+						APR_HDR_SIZE);
+			cvs_session_cmd.hdr.src_port = v->session_id;
+			cvs_session_cmd.hdr.dest_port = 0;
+			cvs_session_cmd.hdr.token = 0;
+			cvs_session_cmd.hdr.opcode =
+				VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION;
+			if (is_volte_session(v->session_id)) {
+				strlcpy(mvm_session_cmd.mvm_session.name,
+				"default volte voice",
+				sizeof(mvm_session_cmd.mvm_session.name));
+			} else {
+			strlcpy(cvs_session_cmd.cvs_session.name,
+				"default modem voice",
+				sizeof(cvs_session_cmd.cvs_session.name));
+			}
+			v->cvs_state = CMD_STATUS_FAIL;
+
+			ret = apr_send_pkt(apr_cvs,
+					(uint32_t *) &cvs_session_cmd);
+			if (ret < 0) {
+				pr_err("Fail in sending STREAM_CONTROL_SESSION\n");
+				goto fail;
+			}
+			ret = wait_event_timeout(v->cvs_wait,
+					 (v->cvs_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+			if (!ret) {
+				pr_err("%s: wait_event timeout\n", __func__);
+				goto fail;
+			}
+			/* Get the created CVS handle. */
+			cvs_handle = voice_get_cvs_handle(v);
+
+		} else {
+			pr_debug("%s: creating CVS full session\n", __func__);
+
+			cvs_full_ctl_cmd.hdr.hdr_field =
+					APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE),
+					APR_PKT_VER);
+
+			cvs_full_ctl_cmd.hdr.pkt_size =
+					APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(cvs_full_ctl_cmd) -
+					APR_HDR_SIZE);
+
+			cvs_full_ctl_cmd.hdr.src_port = v->session_id;
+			cvs_full_ctl_cmd.hdr.dest_port = 0;
+			cvs_full_ctl_cmd.hdr.token = 0;
+			cvs_full_ctl_cmd.hdr.opcode =
+				VSS_ISTREAM_CMD_CREATE_FULL_CONTROL_SESSION;
+			cvs_full_ctl_cmd.cvs_session.direction = 2;
+			cvs_full_ctl_cmd.cvs_session.enc_media_type =
+						common.mvs_info.media_type;
+			cvs_full_ctl_cmd.cvs_session.dec_media_type =
+						common.mvs_info.media_type;
+			cvs_full_ctl_cmd.cvs_session.network_id =
+					       common.mvs_info.network_type;
+			strlcpy(cvs_full_ctl_cmd.cvs_session.name,
+				"default q6 voice",
+				sizeof(cvs_full_ctl_cmd.cvs_session.name));
+
+			v->cvs_state = CMD_STATUS_FAIL;
+
+			ret = apr_send_pkt(apr_cvs,
+					   (uint32_t *) &cvs_full_ctl_cmd);
+
+			if (ret < 0) {
+				pr_err("%s: Err %d sending CREATE_FULL_CTRL\n",
+					__func__, ret);
+				goto fail;
+			}
+			ret = wait_event_timeout(v->cvs_wait,
+					(v->cvs_state == CMD_STATUS_SUCCESS),
+					msecs_to_jiffies(TIMEOUT_MS));
+			if (!ret) {
+				pr_err("%s: wait_event timeout\n", __func__);
+				goto fail;
+			}
+			/* Get the created CVS handle. */
+			cvs_handle = voice_get_cvs_handle(v);
+
+			/* Attach MVM to CVS. */
+			pr_debug("%s: Attach MVM to stream\n", __func__);
+
+			attach_stream_cmd.hdr.hdr_field =
+					APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE),
+					APR_PKT_VER);
+			attach_stream_cmd.hdr.pkt_size =
+					APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(attach_stream_cmd) -
+					APR_HDR_SIZE);
+			attach_stream_cmd.hdr.src_port = v->session_id;
+			attach_stream_cmd.hdr.dest_port = mvm_handle;
+			attach_stream_cmd.hdr.token = 0;
+			attach_stream_cmd.hdr.opcode =
+						VSS_IMVM_CMD_ATTACH_STREAM;
+			attach_stream_cmd.attach_stream.handle = cvs_handle;
+
+			v->mvm_state = CMD_STATUS_FAIL;
+			ret = apr_send_pkt(apr_mvm,
+					   (uint32_t *) &attach_stream_cmd);
+			if (ret < 0) {
+				pr_err("%s: Error %d sending ATTACH_STREAM\n",
+				       __func__, ret);
+				goto fail;
+			}
+			ret = wait_event_timeout(v->mvm_wait,
+					 (v->mvm_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+			if (!ret) {
+				pr_err("%s: wait_event timeout\n", __func__);
+				goto fail;
+			}
+		}
+	}
+	return 0;
+
+fail:
+	return -EINVAL;
+}
+
+static int voice_destroy_mvm_cvs_session(struct voice_data *v)
+{
+	int ret = 0;
+	struct mvm_detach_stream_cmd detach_stream;
+	struct apr_hdr mvm_destroy;
+	struct apr_hdr cvs_destroy;
+	void *apr_mvm, *apr_cvs;
+	u16 mvm_handle, cvs_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_mvm || !apr_cvs) {
+		pr_err("%s: apr_mvm or apr_cvs is NULL\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+	cvs_handle = voice_get_cvs_handle(v);
+
+	/* MVM, CVS sessions are destroyed only for Full control sessions. */
+	if (is_voip_session(v->session_id)) {
+		pr_debug("%s: MVM detach stream\n", __func__);
+
+		/* Detach voice stream. */
+		detach_stream.hdr.hdr_field =
+					APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE),
+					APR_PKT_VER);
+		detach_stream.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(detach_stream) - APR_HDR_SIZE);
+		detach_stream.hdr.src_port = v->session_id;
+		detach_stream.hdr.dest_port = mvm_handle;
+		detach_stream.hdr.token = 0;
+		detach_stream.hdr.opcode = VSS_IMVM_CMD_DETACH_STREAM;
+		detach_stream.detach_stream.handle = cvs_handle;
+
+		v->mvm_state = CMD_STATUS_FAIL;
+
+		ret = apr_send_pkt(apr_mvm, (uint32_t *) &detach_stream);
+		if (ret < 0) {
+			pr_err("%s: Error %d sending DETACH_STREAM\n",
+			       __func__, ret);
+			goto fail;
+		}
+		ret = wait_event_timeout(v->mvm_wait,
+					 (v->mvm_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait event timeout\n", __func__);
+			goto fail;
+		}
+		/* Destroy CVS. */
+		pr_debug("%s: CVS destroy session\n", __func__);
+
+		cvs_destroy.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						      APR_HDR_LEN(APR_HDR_SIZE),
+						      APR_PKT_VER);
+		cvs_destroy.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(cvs_destroy) - APR_HDR_SIZE);
+		cvs_destroy.src_port = v->session_id;
+		cvs_destroy.dest_port = cvs_handle;
+		cvs_destroy.token = 0;
+		cvs_destroy.opcode = APRV2_IBASIC_CMD_DESTROY_SESSION;
+
+		v->cvs_state = CMD_STATUS_FAIL;
+
+		ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_destroy);
+		if (ret < 0) {
+			pr_err("%s: Error %d sending CVS DESTROY\n",
+			       __func__, ret);
+			goto fail;
+		}
+		ret = wait_event_timeout(v->cvs_wait,
+					 (v->cvs_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait event timeout\n", __func__);
+
+			goto fail;
+		}
+		cvs_handle = 0;
+		voice_set_cvs_handle(v, cvs_handle);
+
+		/* Destroy MVM. */
+		pr_debug("MVM destroy session\n");
+
+		mvm_destroy.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						      APR_HDR_LEN(APR_HDR_SIZE),
+						      APR_PKT_VER);
+		mvm_destroy.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					    sizeof(mvm_destroy) - APR_HDR_SIZE);
+		mvm_destroy.src_port = v->session_id;
+		mvm_destroy.dest_port = mvm_handle;
+		mvm_destroy.token = 0;
+		mvm_destroy.opcode = APRV2_IBASIC_CMD_DESTROY_SESSION;
+
+		v->mvm_state = CMD_STATUS_FAIL;
+
+		ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_destroy);
+		if (ret < 0) {
+			pr_err("%s: Error %d sending MVM DESTROY\n",
+			       __func__, ret);
+
+			goto fail;
+		}
+		ret = wait_event_timeout(v->mvm_wait,
+					 (v->mvm_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait event timeout\n", __func__);
+
+			goto fail;
+		}
+		mvm_handle = 0;
+		voice_set_mvm_handle(v, mvm_handle);
+	}
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static int voice_send_tty_mode_cmd(struct voice_data *v)
+{
+	int ret = 0;
+	struct mvm_set_tty_mode_cmd mvm_tty_mode_cmd;
+	void *apr_mvm;
+	u16 mvm_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+
+	if (v->tty_mode) {
+		/* send tty mode cmd to mvm */
+		mvm_tty_mode_cmd.hdr.hdr_field = APR_HDR_FIELD(
+						APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+		mvm_tty_mode_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+						sizeof(mvm_tty_mode_cmd) -
+						APR_HDR_SIZE);
+		pr_debug("%s: pkt size = %d\n",
+			 __func__, mvm_tty_mode_cmd.hdr.pkt_size);
+		mvm_tty_mode_cmd.hdr.src_port = v->session_id;
+		mvm_tty_mode_cmd.hdr.dest_port = mvm_handle;
+		mvm_tty_mode_cmd.hdr.token = 0;
+		mvm_tty_mode_cmd.hdr.opcode = VSS_ISTREAM_CMD_SET_TTY_MODE;
+		mvm_tty_mode_cmd.tty_mode.mode = v->tty_mode;
+		pr_debug("tty mode =%d\n", mvm_tty_mode_cmd.tty_mode.mode);
+
+		v->mvm_state = CMD_STATUS_FAIL;
+		ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_tty_mode_cmd);
+		if (ret < 0) {
+			pr_err("%s: Error %d sending SET_TTY_MODE\n",
+			       __func__, ret);
+			goto fail;
+		}
+		ret = wait_event_timeout(v->mvm_wait,
+					 (v->mvm_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait_event timeout\n", __func__);
+			goto fail;
+		}
+	}
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static int voice_set_dtx(struct voice_data *v)
+{
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+	struct cvs_set_enc_dtx_mode_cmd cvs_set_dtx;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	cvs_handle = voice_get_cvs_handle(v);
+
+	/* Set DTX */
+	cvs_set_dtx.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					      APR_HDR_LEN(APR_HDR_SIZE),
+					      APR_PKT_VER);
+	cvs_set_dtx.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(cvs_set_dtx) - APR_HDR_SIZE);
+	cvs_set_dtx.hdr.src_port = v->session_id;
+	cvs_set_dtx.hdr.dest_port = cvs_handle;
+	cvs_set_dtx.hdr.token = 0;
+	cvs_set_dtx.hdr.opcode = VSS_ISTREAM_CMD_SET_ENC_DTX_MODE;
+	cvs_set_dtx.dtx_mode.enable = common.mvs_info.dtx_mode;
+
+	pr_debug("%s: Setting DTX %d\n", __func__, common.mvs_info.dtx_mode);
+
+	v->cvs_state = CMD_STATUS_FAIL;
+
+	ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_dtx);
+	if (ret < 0) {
+		pr_err("%s: Error %d sending SET_DTX\n", __func__, ret);
+		return -EINVAL;
+	}
+
+	ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int voice_config_cvs_vocoder(struct voice_data *v)
+{
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+	/* Set media type. */
+	struct cvs_set_media_type_cmd cvs_set_media_cmd;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	cvs_handle = voice_get_cvs_handle(v);
+
+	cvs_set_media_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvs_set_media_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				      sizeof(cvs_set_media_cmd) - APR_HDR_SIZE);
+	cvs_set_media_cmd.hdr.src_port = v->session_id;
+	cvs_set_media_cmd.hdr.dest_port = cvs_handle;
+	cvs_set_media_cmd.hdr.token = 0;
+	cvs_set_media_cmd.hdr.opcode = VSS_ISTREAM_CMD_SET_MEDIA_TYPE;
+	cvs_set_media_cmd.media_type.tx_media_id = common.mvs_info.media_type;
+	cvs_set_media_cmd.media_type.rx_media_id = common.mvs_info.media_type;
+
+	v->cvs_state = CMD_STATUS_FAIL;
+
+	ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_media_cmd);
+	if (ret < 0) {
+		pr_err("%s: Error %d sending SET_MEDIA_TYPE\n",
+		       __func__, ret);
+
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+
+		goto fail;
+	}
+	/* Set encoder properties. */
+	switch (common.mvs_info.media_type) {
+	case VSS_MEDIA_ID_EVRC_MODEM: {
+		struct cvs_set_cdma_enc_minmax_rate_cmd cvs_set_cdma_rate;
+
+		pr_debug("Setting EVRC min-max rate\n");
+
+		cvs_set_cdma_rate.hdr.hdr_field =
+					APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE),
+					APR_PKT_VER);
+		cvs_set_cdma_rate.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				      sizeof(cvs_set_cdma_rate) - APR_HDR_SIZE);
+		cvs_set_cdma_rate.hdr.src_port = v->session_id;
+		cvs_set_cdma_rate.hdr.dest_port = cvs_handle;
+		cvs_set_cdma_rate.hdr.token = 0;
+		cvs_set_cdma_rate.hdr.opcode =
+				VSS_ISTREAM_CMD_CDMA_SET_ENC_MINMAX_RATE;
+		cvs_set_cdma_rate.cdma_rate.min_rate = common.mvs_info.rate;
+		cvs_set_cdma_rate.cdma_rate.max_rate = common.mvs_info.rate;
+
+		v->cvs_state = CMD_STATUS_FAIL;
+
+		ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_cdma_rate);
+		if (ret < 0) {
+			pr_err("%s: Error %d sending SET_EVRC_MINMAX_RATE\n",
+			       __func__, ret);
+			goto fail;
+		}
+		ret = wait_event_timeout(v->cvs_wait,
+					 (v->cvs_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait_event timeout\n", __func__);
+
+			goto fail;
+		}
+		break;
+	}
+	case VSS_MEDIA_ID_AMR_NB_MODEM: {
+		struct cvs_set_amr_enc_rate_cmd cvs_set_amr_rate;
+
+		pr_debug("Setting AMR rate\n");
+
+		cvs_set_amr_rate.hdr.hdr_field =
+				APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE),
+				APR_PKT_VER);
+		cvs_set_amr_rate.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				       sizeof(cvs_set_amr_rate) - APR_HDR_SIZE);
+		cvs_set_amr_rate.hdr.src_port = v->session_id;
+		cvs_set_amr_rate.hdr.dest_port = cvs_handle;
+		cvs_set_amr_rate.hdr.token = 0;
+		cvs_set_amr_rate.hdr.opcode =
+					VSS_ISTREAM_CMD_VOC_AMR_SET_ENC_RATE;
+		cvs_set_amr_rate.amr_rate.mode = common.mvs_info.rate;
+
+		v->cvs_state = CMD_STATUS_FAIL;
+
+		ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_amr_rate);
+		if (ret < 0) {
+			pr_err("%s: Error %d sending SET_AMR_RATE\n",
+			       __func__, ret);
+			goto fail;
+		}
+		ret = wait_event_timeout(v->cvs_wait,
+					 (v->cvs_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait_event timeout\n", __func__);
+			goto fail;
+		}
+
+		ret = voice_set_dtx(v);
+		if (ret < 0)
+			goto fail;
+
+		break;
+	}
+	case VSS_MEDIA_ID_AMR_WB_MODEM: {
+		struct cvs_set_amrwb_enc_rate_cmd cvs_set_amrwb_rate;
+
+		pr_debug("Setting AMR WB rate\n");
+
+		cvs_set_amrwb_rate.hdr.hdr_field =
+				APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE),
+				APR_PKT_VER);
+		cvs_set_amrwb_rate.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+						sizeof(cvs_set_amrwb_rate) -
+						APR_HDR_SIZE);
+		cvs_set_amrwb_rate.hdr.src_port = v->session_id;
+		cvs_set_amrwb_rate.hdr.dest_port = cvs_handle;
+		cvs_set_amrwb_rate.hdr.token = 0;
+		cvs_set_amrwb_rate.hdr.opcode =
+					VSS_ISTREAM_CMD_VOC_AMRWB_SET_ENC_RATE;
+		cvs_set_amrwb_rate.amrwb_rate.mode = common.mvs_info.rate;
+
+		v->cvs_state = CMD_STATUS_FAIL;
+
+		ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_amrwb_rate);
+		if (ret < 0) {
+			pr_err("%s: Error %d sending SET_AMRWB_RATE\n",
+			       __func__, ret);
+			goto fail;
+		}
+		ret = wait_event_timeout(v->cvs_wait,
+					 (v->cvs_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait_event timeout\n", __func__);
+			goto fail;
+		}
+
+		ret = voice_set_dtx(v);
+		if (ret < 0)
+			goto fail;
+
+		break;
+	}
+	case VSS_MEDIA_ID_G729:
+	case VSS_MEDIA_ID_G711_ALAW:
+	case VSS_MEDIA_ID_G711_MULAW: {
+		ret = voice_set_dtx(v);
+
+		break;
+	}
+	default:
+		/* Do nothing. */
+		break;
+	}
+	return 0;
+
+fail:
+	return -EINVAL;
+}
+
+static int voice_send_start_voice_cmd(struct voice_data *v)
+{
+	struct apr_hdr mvm_start_voice_cmd;
+	int ret = 0;
+	void *apr_mvm;
+	u16 mvm_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+
+	mvm_start_voice_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	mvm_start_voice_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(mvm_start_voice_cmd) - APR_HDR_SIZE);
+	pr_debug("send mvm_start_voice_cmd pkt size = %d\n",
+				mvm_start_voice_cmd.pkt_size);
+	mvm_start_voice_cmd.src_port = v->session_id;
+	mvm_start_voice_cmd.dest_port = mvm_handle;
+	mvm_start_voice_cmd.token = 0;
+	mvm_start_voice_cmd.opcode = VSS_IMVM_CMD_START_VOICE;
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_start_voice_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending VSS_IMVM_CMD_START_VOICE\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->mvm_wait,
+				 (v->mvm_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static int voice_send_disable_vocproc_cmd(struct voice_data *v)
+{
+	struct apr_hdr cvp_disable_cmd;
+	int ret = 0;
+	void *apr_cvp;
+	u16 cvp_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr regist failed\n", __func__);
+		return -EINVAL;
+	}
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* disable vocproc and wait for respose */
+	cvp_disable_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvp_disable_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_disable_cmd) - APR_HDR_SIZE);
+	pr_debug("cvp_disable_cmd pkt size = %d, cvp_handle=%d\n",
+		cvp_disable_cmd.pkt_size, cvp_handle);
+	cvp_disable_cmd.src_port = v->session_id;
+	cvp_disable_cmd.dest_port = cvp_handle;
+	cvp_disable_cmd.token = 0;
+	cvp_disable_cmd.opcode = VSS_IVOCPROC_CMD_DISABLE;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_disable_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending VSS_IVOCPROC_CMD_DISABLE\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+			(v->cvp_state == CMD_STATUS_SUCCESS),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static int voice_send_set_device_cmd(struct voice_data *v)
+{
+	struct cvp_set_device_cmd  cvp_setdev_cmd;
+	int ret = 0;
+	void *apr_cvp;
+	u16 cvp_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* set device and wait for response */
+	cvp_setdev_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvp_setdev_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_setdev_cmd) - APR_HDR_SIZE);
+	pr_debug(" send create cvp setdev, pkt size = %d\n",
+			cvp_setdev_cmd.hdr.pkt_size);
+	cvp_setdev_cmd.hdr.src_port = v->session_id;
+	cvp_setdev_cmd.hdr.dest_port = cvp_handle;
+	cvp_setdev_cmd.hdr.token = 0;
+	cvp_setdev_cmd.hdr.opcode = VSS_IVOCPROC_CMD_SET_DEVICE;
+
+	/* Use default topology if invalid value in ACDB */
+	cvp_setdev_cmd.cvp_set_device.tx_topology_id =
+				get_voice_tx_topology();
+	if (cvp_setdev_cmd.cvp_set_device.tx_topology_id == 0)
+		cvp_setdev_cmd.cvp_set_device.tx_topology_id =
+				VSS_IVOCPROC_TOPOLOGY_ID_TX_SM_ECNS;
+
+	cvp_setdev_cmd.cvp_set_device.rx_topology_id =
+				get_voice_rx_topology();
+	if (cvp_setdev_cmd.cvp_set_device.rx_topology_id == 0)
+		cvp_setdev_cmd.cvp_set_device.rx_topology_id =
+				VSS_IVOCPROC_TOPOLOGY_ID_RX_DEFAULT;
+	cvp_setdev_cmd.cvp_set_device.tx_port_id = v->dev_tx.port_id;
+	cvp_setdev_cmd.cvp_set_device.rx_port_id = v->dev_rx.port_id;
+	pr_debug("topology=%d , tx_port_id=%d, rx_port_id=%d\n",
+		cvp_setdev_cmd.cvp_set_device.tx_topology_id,
+		cvp_setdev_cmd.cvp_set_device.tx_port_id,
+		cvp_setdev_cmd.cvp_set_device.rx_port_id);
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_setdev_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending VOCPROC_FULL_CONTROL_SESSION\n");
+		goto fail;
+	}
+	pr_debug("wait for cvp create session event\n");
+	ret = wait_event_timeout(v->cvp_wait,
+			(v->cvp_state == CMD_STATUS_SUCCESS),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static int voice_send_stop_voice_cmd(struct voice_data *v)
+{
+	struct apr_hdr mvm_stop_voice_cmd;
+	int ret = 0;
+	void *apr_mvm;
+	u16 mvm_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+
+	mvm_stop_voice_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	mvm_stop_voice_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(mvm_stop_voice_cmd) - APR_HDR_SIZE);
+	pr_debug("send mvm_stop_voice_cmd pkt size = %d\n",
+				mvm_stop_voice_cmd.pkt_size);
+	mvm_stop_voice_cmd.src_port = v->session_id;
+	mvm_stop_voice_cmd.dest_port = mvm_handle;
+	mvm_stop_voice_cmd.token = 0;
+	mvm_stop_voice_cmd.opcode = VSS_IMVM_CMD_STOP_VOICE;
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_stop_voice_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending VSS_IMVM_CMD_STOP_VOICE\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->mvm_wait,
+				 (v->mvm_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static int voice_send_cvs_register_cal_cmd(struct voice_data *v)
+{
+	struct cvs_register_cal_data_cmd cvs_reg_cal_cmd;
+	struct acdb_cal_block cal_block;
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+	uint32_t cal_paddr;
+
+	/* get the cvs cal data */
+	get_all_vocstrm_cal(&cal_block);
+	if (cal_block.cal_size == 0)
+		goto fail;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	if (is_voip_session(v->session_id)) {
+		if (common.cvs_cal.buf) {
+			cal_paddr = common.cvs_cal.phy;
+
+			memcpy(common.cvs_cal.buf,
+				(void *) cal_block.cal_kvaddr,
+				cal_block.cal_size);
+		} else {
+			return -EINVAL;
+		}
+	} else {
+		cal_paddr = cal_block.cal_paddr;
+	}
+
+	cvs_handle = voice_get_cvs_handle(v);
+
+	/* fill in the header */
+	cvs_reg_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvs_reg_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvs_reg_cal_cmd) - APR_HDR_SIZE);
+	cvs_reg_cal_cmd.hdr.src_port = v->session_id;
+	cvs_reg_cal_cmd.hdr.dest_port = cvs_handle;
+	cvs_reg_cal_cmd.hdr.token = 0;
+	cvs_reg_cal_cmd.hdr.opcode = VSS_ISTREAM_CMD_REGISTER_CALIBRATION_DATA;
+
+	cvs_reg_cal_cmd.cvs_cal_data.phys_addr = cal_paddr;
+	cvs_reg_cal_cmd.cvs_cal_data.mem_size = cal_block.cal_size;
+
+	v->cvs_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_reg_cal_cmd);
+	if (ret < 0) {
+		pr_err("Fail: sending cvs cal,\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvs_wait,
+			(v->cvs_state == CMD_STATUS_SUCCESS),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	return 0;
+fail:
+	return -EINVAL;
+
+}
+
+static int voice_send_cvs_deregister_cal_cmd(struct voice_data *v)
+{
+	struct cvs_deregister_cal_data_cmd cvs_dereg_cal_cmd;
+	struct acdb_cal_block cal_block;
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+
+	get_all_vocstrm_cal(&cal_block);
+	if (cal_block.cal_size == 0)
+		return 0;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	cvs_handle = voice_get_cvs_handle(v);
+
+	/* fill in the header */
+	cvs_dereg_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvs_dereg_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvs_dereg_cal_cmd) - APR_HDR_SIZE);
+	cvs_dereg_cal_cmd.hdr.src_port = v->session_id;
+	cvs_dereg_cal_cmd.hdr.dest_port = cvs_handle;
+	cvs_dereg_cal_cmd.hdr.token = 0;
+	cvs_dereg_cal_cmd.hdr.opcode =
+			VSS_ISTREAM_CMD_DEREGISTER_CALIBRATION_DATA;
+
+	v->cvs_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_dereg_cal_cmd);
+	if (ret < 0) {
+		pr_err("Fail: sending cvs cal,\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvs_wait,
+			(v->cvs_state == CMD_STATUS_SUCCESS),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	return 0;
+fail:
+	return -EINVAL;
+
+}
+
+static int voice_send_cvp_map_memory_cmd(struct voice_data *v)
+{
+	struct vss_map_memory_cmd cvp_map_mem_cmd;
+	struct acdb_cal_block cal_block;
+	int ret = 0;
+	void *apr_cvp;
+	u16 cvp_handle;
+	uint32_t cal_paddr;
+
+	/* get all cvp cal data */
+	get_all_cvp_cal(&cal_block);
+	if (cal_block.cal_size == 0)
+		goto fail;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	if (is_voip_session(v->session_id)) {
+		if (common.cvp_cal.buf)
+			cal_paddr = common.cvp_cal.phy;
+		else
+			return -EINVAL;
+	} else {
+		cal_paddr = cal_block.cal_paddr;
+	}
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* fill in the header */
+	cvp_map_mem_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvp_map_mem_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_map_mem_cmd) - APR_HDR_SIZE);
+	cvp_map_mem_cmd.hdr.src_port = v->session_id;
+	cvp_map_mem_cmd.hdr.dest_port = cvp_handle;
+	cvp_map_mem_cmd.hdr.token = 0;
+	cvp_map_mem_cmd.hdr.opcode = VSS_ICOMMON_CMD_MAP_MEMORY;
+
+	pr_debug("%s, phy_addr:0x%x, mem_size:%d\n", __func__,
+		cal_paddr, cal_block.cal_size);
+	cvp_map_mem_cmd.vss_map_mem.phys_addr = cal_paddr;
+	cvp_map_mem_cmd.vss_map_mem.mem_size = cal_block.cal_size;
+	cvp_map_mem_cmd.vss_map_mem.mem_pool_id =
+				VSS_ICOMMON_MAP_MEMORY_SHMEM8_4K_POOL;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_map_mem_cmd);
+	if (ret < 0) {
+		pr_err("Fail: sending cvp cal,\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+			(v->cvp_state == CMD_STATUS_SUCCESS),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	return 0;
+fail:
+	return -EINVAL;
+
+}
+
+static int voice_send_cvp_unmap_memory_cmd(struct voice_data *v)
+{
+	struct vss_unmap_memory_cmd cvp_unmap_mem_cmd;
+	struct acdb_cal_block cal_block;
+	int ret = 0;
+	void *apr_cvp;
+	u16 cvp_handle;
+	uint32_t cal_paddr;
+
+	get_all_cvp_cal(&cal_block);
+	if (cal_block.cal_size == 0)
+		return 0;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	if (is_voip_session(v->session_id))
+		cal_paddr = common.cvp_cal.phy;
+	else
+		cal_paddr = cal_block.cal_paddr;
+
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* fill in the header */
+	cvp_unmap_mem_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvp_unmap_mem_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_unmap_mem_cmd) - APR_HDR_SIZE);
+	cvp_unmap_mem_cmd.hdr.src_port = v->session_id;
+	cvp_unmap_mem_cmd.hdr.dest_port = cvp_handle;
+	cvp_unmap_mem_cmd.hdr.token = 0;
+	cvp_unmap_mem_cmd.hdr.opcode = VSS_ICOMMON_CMD_UNMAP_MEMORY;
+
+	cvp_unmap_mem_cmd.vss_unmap_mem.phys_addr = cal_paddr;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_unmap_mem_cmd);
+	if (ret < 0) {
+		pr_err("Fail: sending cvp cal,\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+			(v->cvp_state == CMD_STATUS_SUCCESS),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	return 0;
+fail:
+	return -EINVAL;
+
+}
+
+static int voice_send_cvs_map_memory_cmd(struct voice_data *v)
+{
+	struct vss_map_memory_cmd cvs_map_mem_cmd;
+	struct acdb_cal_block cal_block;
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+	uint32_t cal_paddr;
+
+	/* get all cvs cal data */
+	get_all_vocstrm_cal(&cal_block);
+	if (cal_block.cal_size == 0)
+		goto fail;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	if (is_voip_session(v->session_id)) {
+		if (common.cvs_cal.buf)
+			cal_paddr = common.cvs_cal.phy;
+		else
+			return -EINVAL;
+	} else {
+		cal_paddr = cal_block.cal_paddr;
+	}
+
+	cvs_handle = voice_get_cvs_handle(v);
+
+	/* fill in the header */
+	cvs_map_mem_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvs_map_mem_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvs_map_mem_cmd) - APR_HDR_SIZE);
+	cvs_map_mem_cmd.hdr.src_port = v->session_id;
+	cvs_map_mem_cmd.hdr.dest_port = cvs_handle;
+	cvs_map_mem_cmd.hdr.token = 0;
+	cvs_map_mem_cmd.hdr.opcode = VSS_ICOMMON_CMD_MAP_MEMORY;
+
+	pr_debug("%s, phys_addr: 0x%x, mem_size: %d\n", __func__,
+		cal_paddr, cal_block.cal_size);
+	cvs_map_mem_cmd.vss_map_mem.phys_addr = cal_paddr;
+	cvs_map_mem_cmd.vss_map_mem.mem_size = cal_block.cal_size;
+	cvs_map_mem_cmd.vss_map_mem.mem_pool_id =
+				VSS_ICOMMON_MAP_MEMORY_SHMEM8_4K_POOL;
+
+	v->cvs_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_map_mem_cmd);
+	if (ret < 0) {
+		pr_err("Fail: sending cvs cal,\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvs_wait,
+			(v->cvs_state == CMD_STATUS_SUCCESS),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	return 0;
+fail:
+	return -EINVAL;
+
+}
+
+static int voice_send_cvs_unmap_memory_cmd(struct voice_data *v)
+{
+	struct vss_unmap_memory_cmd cvs_unmap_mem_cmd;
+	struct acdb_cal_block cal_block;
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+	uint32_t cal_paddr;
+
+	get_all_vocstrm_cal(&cal_block);
+	if (cal_block.cal_size == 0)
+		return 0;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	if (is_voip_session(v->session_id))
+		cal_paddr = common.cvs_cal.phy;
+	else
+		cal_paddr = cal_block.cal_paddr;
+
+	cvs_handle = voice_get_cvs_handle(v);
+
+	/* fill in the header */
+	cvs_unmap_mem_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvs_unmap_mem_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvs_unmap_mem_cmd) - APR_HDR_SIZE);
+	cvs_unmap_mem_cmd.hdr.src_port = v->session_id;
+	cvs_unmap_mem_cmd.hdr.dest_port = cvs_handle;
+	cvs_unmap_mem_cmd.hdr.token = 0;
+	cvs_unmap_mem_cmd.hdr.opcode = VSS_ICOMMON_CMD_UNMAP_MEMORY;
+
+	cvs_unmap_mem_cmd.vss_unmap_mem.phys_addr = cal_paddr;
+
+	v->cvs_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_unmap_mem_cmd);
+	if (ret < 0) {
+		pr_err("Fail: sending cvs cal,\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvs_wait,
+			(v->cvs_state == CMD_STATUS_SUCCESS),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	return 0;
+fail:
+	return -EINVAL;
+
+}
+
+static int voice_send_cvp_register_cal_cmd(struct voice_data *v)
+{
+	struct cvp_register_cal_data_cmd cvp_reg_cal_cmd;
+	struct acdb_cal_block cal_block;
+	int ret = 0;
+	void *apr_cvp;
+	u16 cvp_handle;
+	uint32_t cal_paddr;
+
+      /* get the cvp cal data */
+	get_all_vocproc_cal(&cal_block);
+	if (cal_block.cal_size == 0)
+		goto fail;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	if (is_voip_session(v->session_id)) {
+		if (common.cvp_cal.buf) {
+			cal_paddr = common.cvp_cal.phy;
+
+			memcpy(common.cvp_cal.buf,
+				(void *)cal_block.cal_kvaddr,
+				cal_block.cal_size);
+		} else {
+			return -EINVAL;
+		}
+	} else {
+		cal_paddr = cal_block.cal_paddr;
+	}
+
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* fill in the header */
+	cvp_reg_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvp_reg_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_reg_cal_cmd) - APR_HDR_SIZE);
+	cvp_reg_cal_cmd.hdr.src_port = v->session_id;
+	cvp_reg_cal_cmd.hdr.dest_port = cvp_handle;
+	cvp_reg_cal_cmd.hdr.token = 0;
+	cvp_reg_cal_cmd.hdr.opcode = VSS_IVOCPROC_CMD_REGISTER_CALIBRATION_DATA;
+
+	cvp_reg_cal_cmd.cvp_cal_data.phys_addr = cal_paddr;
+	cvp_reg_cal_cmd.cvp_cal_data.mem_size = cal_block.cal_size;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_reg_cal_cmd);
+	if (ret < 0) {
+		pr_err("Fail: sending cvp cal,\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+			(v->cvp_state == CMD_STATUS_SUCCESS),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	return 0;
+fail:
+	return -EINVAL;
+
+}
+
+static int voice_send_cvp_deregister_cal_cmd(struct voice_data *v)
+{
+	struct cvp_deregister_cal_data_cmd cvp_dereg_cal_cmd;
+	struct acdb_cal_block cal_block;
+	int ret = 0;
+	void *apr_cvp;
+	u16 cvp_handle;
+
+	get_all_vocproc_cal(&cal_block);
+	if (cal_block.cal_size == 0)
+		return 0;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* fill in the header */
+	cvp_dereg_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvp_dereg_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_dereg_cal_cmd) - APR_HDR_SIZE);
+	cvp_dereg_cal_cmd.hdr.src_port = v->session_id;
+	cvp_dereg_cal_cmd.hdr.dest_port = cvp_handle;
+	cvp_dereg_cal_cmd.hdr.token = 0;
+	cvp_dereg_cal_cmd.hdr.opcode =
+			VSS_IVOCPROC_CMD_DEREGISTER_CALIBRATION_DATA;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_dereg_cal_cmd);
+	if (ret < 0) {
+		pr_err("Fail: sending cvp cal,\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+			(v->cvp_state == CMD_STATUS_SUCCESS),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	return 0;
+fail:
+	return -EINVAL;
+
+}
+
+static int voice_send_cvp_register_vol_cal_table_cmd(struct voice_data *v)
+{
+	struct cvp_register_vol_cal_table_cmd cvp_reg_cal_tbl_cmd;
+	struct acdb_cal_block vol_block;
+	struct acdb_cal_block voc_block;
+	int ret = 0;
+	void *apr_cvp;
+	u16 cvp_handle;
+	uint32_t cal_paddr;
+
+	/* get the cvp vol cal data */
+	get_all_vocvol_cal(&vol_block);
+	get_all_vocproc_cal(&voc_block);
+
+	if (vol_block.cal_size == 0)
+		goto fail;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	if (is_voip_session(v->session_id)) {
+		if (common.cvp_cal.buf) {
+			cal_paddr = common.cvp_cal.phy + voc_block.cal_size;
+
+			memcpy(common.cvp_cal.buf + voc_block.cal_size,
+				(void *) vol_block.cal_kvaddr,
+				vol_block.cal_size);
+		} else {
+			return -EINVAL;
+		}
+	} else {
+		cal_paddr = vol_block.cal_paddr;
+	}
+
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* fill in the header */
+	cvp_reg_cal_tbl_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvp_reg_cal_tbl_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_reg_cal_tbl_cmd) - APR_HDR_SIZE);
+	cvp_reg_cal_tbl_cmd.hdr.src_port = v->session_id;
+	cvp_reg_cal_tbl_cmd.hdr.dest_port = cvp_handle;
+	cvp_reg_cal_tbl_cmd.hdr.token = 0;
+	cvp_reg_cal_tbl_cmd.hdr.opcode =
+				VSS_IVOCPROC_CMD_REGISTER_VOLUME_CAL_TABLE;
+
+	cvp_reg_cal_tbl_cmd.cvp_vol_cal_tbl.phys_addr = cal_paddr;
+	cvp_reg_cal_tbl_cmd.cvp_vol_cal_tbl.mem_size = vol_block.cal_size;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_reg_cal_tbl_cmd);
+	if (ret < 0) {
+		pr_err("Fail: sending cvp cal table,\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+			(v->cvp_state == CMD_STATUS_SUCCESS),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	return 0;
+fail:
+	return -EINVAL;
+
+}
+
+static int voice_send_cvp_deregister_vol_cal_table_cmd(struct voice_data *v)
+{
+	struct cvp_deregister_vol_cal_table_cmd cvp_dereg_cal_tbl_cmd;
+	struct acdb_cal_block cal_block;
+	int ret = 0;
+	void *apr_cvp;
+	u16 cvp_handle;
+
+	get_all_vocvol_cal(&cal_block);
+	if (cal_block.cal_size == 0)
+		return 0;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* fill in the header */
+	cvp_dereg_cal_tbl_cmd.hdr.hdr_field = APR_HDR_FIELD(
+						APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvp_dereg_cal_tbl_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_dereg_cal_tbl_cmd) - APR_HDR_SIZE);
+	cvp_dereg_cal_tbl_cmd.hdr.src_port = v->session_id;
+	cvp_dereg_cal_tbl_cmd.hdr.dest_port = cvp_handle;
+	cvp_dereg_cal_tbl_cmd.hdr.token = 0;
+	cvp_dereg_cal_tbl_cmd.hdr.opcode =
+				VSS_IVOCPROC_CMD_DEREGISTER_VOLUME_CAL_TABLE;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_dereg_cal_tbl_cmd);
+	if (ret < 0) {
+		pr_err("Fail: sending cvp cal table,\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+			(v->cvp_state == CMD_STATUS_SUCCESS),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	return 0;
+fail:
+	return -EINVAL;
+
+}
+
+static int voice_send_set_widevoice_enable_cmd(struct voice_data *v)
+{
+	struct mvm_set_widevoice_enable_cmd mvm_set_wv_cmd;
+	int ret = 0;
+	void *apr_mvm;
+	u16 mvm_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+
+	/* fill in the header */
+	mvm_set_wv_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	mvm_set_wv_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(mvm_set_wv_cmd) - APR_HDR_SIZE);
+	mvm_set_wv_cmd.hdr.src_port = v->session_id;
+	mvm_set_wv_cmd.hdr.dest_port = mvm_handle;
+	mvm_set_wv_cmd.hdr.token = 0;
+	mvm_set_wv_cmd.hdr.opcode = VSS_IWIDEVOICE_CMD_SET_WIDEVOICE;
+
+	mvm_set_wv_cmd.vss_set_wv.enable = v->wv_enable;
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_wv_cmd);
+	if (ret < 0) {
+		pr_err("Fail: sending mvm set widevoice enable,\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->mvm_wait,
+			(v->mvm_state == CMD_STATUS_SUCCESS),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static int voice_send_set_pp_enable_cmd(struct voice_data *v,
+					uint32_t module_id, int enable)
+{
+	struct cvs_set_pp_enable_cmd cvs_set_pp_cmd;
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	cvs_handle = voice_get_cvs_handle(v);
+
+	/* fill in the header */
+	cvs_set_pp_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cvs_set_pp_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvs_set_pp_cmd) - APR_HDR_SIZE);
+	cvs_set_pp_cmd.hdr.src_port = v->session_id;
+	cvs_set_pp_cmd.hdr.dest_port = cvs_handle;
+	cvs_set_pp_cmd.hdr.token = 0;
+	cvs_set_pp_cmd.hdr.opcode = VSS_ICOMMON_CMD_SET_UI_PROPERTY;
+
+	cvs_set_pp_cmd.vss_set_pp.module_id = module_id;
+	cvs_set_pp_cmd.vss_set_pp.param_id = VOICE_PARAM_MOD_ENABLE;
+	cvs_set_pp_cmd.vss_set_pp.param_size = MOD_ENABLE_PARAM_LEN;
+	cvs_set_pp_cmd.vss_set_pp.reserved = 0;
+	cvs_set_pp_cmd.vss_set_pp.enable = enable;
+	cvs_set_pp_cmd.vss_set_pp.reserved_field = 0;
+	pr_debug("voice_send_set_pp_enable_cmd, module_id=%d, enable=%d\n",
+		module_id, enable);
+
+	v->cvs_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_pp_cmd);
+	if (ret < 0) {
+		pr_err("Fail: sending cvs set slowtalk enable,\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvs_wait,
+		(v->cvs_state == CMD_STATUS_SUCCESS),
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static int voice_setup_vocproc(struct voice_data *v)
+{
+	struct cvp_create_full_ctl_session_cmd cvp_session_cmd;
+	int ret = 0;
+	void *apr_cvp;
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	/* create cvp session and wait for response */
+	cvp_session_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvp_session_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_session_cmd) - APR_HDR_SIZE);
+	pr_debug(" send create cvp session, pkt size = %d\n",
+		cvp_session_cmd.hdr.pkt_size);
+	cvp_session_cmd.hdr.src_port = v->session_id;
+	cvp_session_cmd.hdr.dest_port = 0;
+	cvp_session_cmd.hdr.token = 0;
+	cvp_session_cmd.hdr.opcode =
+			VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION;
+
+	/* Use default topology if invalid value in ACDB */
+	cvp_session_cmd.cvp_session.tx_topology_id =
+				get_voice_tx_topology();
+	if (cvp_session_cmd.cvp_session.tx_topology_id == 0)
+		cvp_session_cmd.cvp_session.tx_topology_id =
+			VSS_IVOCPROC_TOPOLOGY_ID_TX_SM_ECNS;
+
+	cvp_session_cmd.cvp_session.rx_topology_id =
+				get_voice_rx_topology();
+	if (cvp_session_cmd.cvp_session.rx_topology_id == 0)
+		cvp_session_cmd.cvp_session.rx_topology_id =
+			VSS_IVOCPROC_TOPOLOGY_ID_RX_DEFAULT;
+
+	cvp_session_cmd.cvp_session.direction = 2; /*tx and rx*/
+	cvp_session_cmd.cvp_session.network_id = VSS_NETWORK_ID_DEFAULT;
+	cvp_session_cmd.cvp_session.tx_port_id = v->dev_tx.port_id;
+	cvp_session_cmd.cvp_session.rx_port_id = v->dev_rx.port_id;
+
+	pr_debug("topology=%d net_id=%d, dir=%d tx_port_id=%d, rx_port_id=%d\n",
+		cvp_session_cmd.cvp_session.tx_topology_id,
+		cvp_session_cmd.cvp_session.network_id,
+		cvp_session_cmd.cvp_session.direction,
+		cvp_session_cmd.cvp_session.tx_port_id,
+		cvp_session_cmd.cvp_session.rx_port_id);
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_session_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending VOCPROC_FULL_CONTROL_SESSION\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+
+	/* send cvs cal */
+	ret = voice_send_cvs_map_memory_cmd(v);
+	if (!ret)
+		voice_send_cvs_register_cal_cmd(v);
+
+	/* send cvp and vol cal */
+	ret = voice_send_cvp_map_memory_cmd(v);
+	if (!ret) {
+		voice_send_cvp_register_cal_cmd(v);
+		voice_send_cvp_register_vol_cal_table_cmd(v);
+	}
+
+	/* enable vocproc */
+	ret = voice_send_enable_vocproc_cmd(v);
+	if (ret < 0)
+		goto fail;
+
+	/* attach vocproc */
+	ret = voice_send_attach_vocproc_cmd(v);
+	if (ret < 0)
+		goto fail;
+
+	/* send tty mode if tty device is used */
+	voice_send_tty_mode_cmd(v);
+
+	/* enable widevoice if wv_enable is set */
+	if (v->wv_enable)
+		voice_send_set_widevoice_enable_cmd(v);
+
+	/* enable slowtalk if st_enable is set */
+	if (v->st_enable)
+		voice_send_set_pp_enable_cmd(v, MODULE_ID_VOICE_MODULE_ST,
+					v->st_enable);
+	voice_send_set_pp_enable_cmd(v, MODULE_ID_VOICE_MODULE_FENS,
+					v->fens_enable);
+
+	if (is_voip_session(v->session_id))
+		voice_send_netid_timing_cmd(v);
+
+	/* Start in-call music delivery if this feature is enabled */
+	if (v->music_info.play_enable)
+		voice_cvs_start_playback(v);
+
+	/* Start in-call recording if this feature is enabled */
+	if (v->rec_info.rec_enable)
+		voice_cvs_start_record(v, v->rec_info.rec_mode);
+
+	rtac_add_voice(voice_get_cvs_handle(v),
+		voice_get_cvp_handle(v),
+		v->dev_rx.port_id, v->dev_tx.port_id,
+		v->session_id);
+
+	return 0;
+
+fail:
+	return -EINVAL;
+}
+
+static int voice_send_enable_vocproc_cmd(struct voice_data *v)
+{
+	int ret = 0;
+	struct apr_hdr cvp_enable_cmd;
+	void *apr_cvp;
+	u16 cvp_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* enable vocproc and wait for respose */
+	cvp_enable_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvp_enable_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_enable_cmd) - APR_HDR_SIZE);
+	pr_debug("cvp_enable_cmd pkt size = %d, cvp_handle=%d\n",
+		cvp_enable_cmd.pkt_size, cvp_handle);
+	cvp_enable_cmd.src_port = v->session_id;
+	cvp_enable_cmd.dest_port = cvp_handle;
+	cvp_enable_cmd.token = 0;
+	cvp_enable_cmd.opcode = VSS_IVOCPROC_CMD_ENABLE;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_enable_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending VSS_IVOCPROC_CMD_ENABLE\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+				(v->cvp_state == CMD_STATUS_SUCCESS),
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static int voice_send_netid_timing_cmd(struct voice_data *v)
+{
+	int ret = 0;
+	void *apr_mvm;
+	u16 mvm_handle;
+	struct mvm_set_network_cmd mvm_set_network;
+	struct mvm_set_voice_timing_cmd mvm_set_voice_timing;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+
+	ret = voice_config_cvs_vocoder(v);
+	if (ret < 0) {
+		pr_err("%s: Error %d configuring CVS voc",
+					__func__, ret);
+		goto fail;
+	}
+	/* Set network ID. */
+	pr_debug("Setting network ID\n");
+
+	mvm_set_network.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	mvm_set_network.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(mvm_set_network) - APR_HDR_SIZE);
+	mvm_set_network.hdr.src_port = v->session_id;
+	mvm_set_network.hdr.dest_port = mvm_handle;
+	mvm_set_network.hdr.token = 0;
+	mvm_set_network.hdr.opcode = VSS_ICOMMON_CMD_SET_NETWORK;
+	mvm_set_network.network.network_id = common.mvs_info.network_type;
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_network);
+	if (ret < 0) {
+		pr_err("%s: Error %d sending SET_NETWORK\n", __func__, ret);
+		goto fail;
+	}
+
+	ret = wait_event_timeout(v->mvm_wait,
+				(v->mvm_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+
+	/* Set voice timing. */
+	 pr_debug("Setting voice timing\n");
+
+	mvm_set_voice_timing.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	mvm_set_voice_timing.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+						sizeof(mvm_set_voice_timing) -
+						APR_HDR_SIZE);
+	mvm_set_voice_timing.hdr.src_port = v->session_id;
+	mvm_set_voice_timing.hdr.dest_port = mvm_handle;
+	mvm_set_voice_timing.hdr.token = 0;
+	mvm_set_voice_timing.hdr.opcode = VSS_ICOMMON_CMD_SET_VOICE_TIMING;
+	mvm_set_voice_timing.timing.mode = 0;
+	mvm_set_voice_timing.timing.enc_offset = 8000;
+	if ((machine_is_apq8064_sim()) || (machine_is_msm8974_sim())) {
+		pr_debug("%s: Machine is MSM8974 sim\n", __func__);
+		mvm_set_voice_timing.timing.dec_req_offset = 0;
+		mvm_set_voice_timing.timing.dec_offset = 18000;
+	} else {
+		mvm_set_voice_timing.timing.dec_req_offset = 3300;
+		mvm_set_voice_timing.timing.dec_offset = 8300;
+	}
+
+	v->mvm_state = CMD_STATUS_FAIL;
+
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_voice_timing);
+	if (ret < 0) {
+		pr_err("%s: Error %d sending SET_TIMING\n", __func__, ret);
+		goto fail;
+	}
+
+	ret = wait_event_timeout(v->mvm_wait,
+				(v->mvm_state == CMD_STATUS_SUCCESS),
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static int voice_send_attach_vocproc_cmd(struct voice_data *v)
+{
+	int ret = 0;
+	struct mvm_attach_vocproc_cmd mvm_a_vocproc_cmd;
+	void *apr_mvm;
+	u16 mvm_handle, cvp_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* attach vocproc and wait for response */
+	mvm_a_vocproc_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	mvm_a_vocproc_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(mvm_a_vocproc_cmd) - APR_HDR_SIZE);
+	pr_debug("send mvm_a_vocproc_cmd pkt size = %d\n",
+		mvm_a_vocproc_cmd.hdr.pkt_size);
+	mvm_a_vocproc_cmd.hdr.src_port = v->session_id;
+	mvm_a_vocproc_cmd.hdr.dest_port = mvm_handle;
+	mvm_a_vocproc_cmd.hdr.token = 0;
+	mvm_a_vocproc_cmd.hdr.opcode = VSS_IMVM_CMD_ATTACH_VOCPROC;
+	mvm_a_vocproc_cmd.mvm_attach_cvp_handle.handle = cvp_handle;
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_a_vocproc_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending VSS_IMVM_CMD_ATTACH_VOCPROC\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->mvm_wait,
+				 (v->mvm_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static int voice_destroy_vocproc(struct voice_data *v)
+{
+	struct mvm_detach_vocproc_cmd mvm_d_vocproc_cmd;
+	struct apr_hdr cvp_destroy_session_cmd;
+	int ret = 0;
+	void *apr_mvm, *apr_cvp;
+	u16 mvm_handle, cvp_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_mvm || !apr_cvp) {
+		pr_err("%s: apr_mvm or apr_cvp is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* stop playback or recording */
+	v->music_info.force = 1;
+	voice_cvs_stop_playback(v);
+	voice_cvs_stop_record(v);
+	/* send stop voice cmd */
+	voice_send_stop_voice_cmd(v);
+
+	/* detach VOCPROC and wait for response from mvm */
+	mvm_d_vocproc_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	mvm_d_vocproc_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(mvm_d_vocproc_cmd) - APR_HDR_SIZE);
+	pr_debug("mvm_d_vocproc_cmd  pkt size = %d\n",
+		mvm_d_vocproc_cmd.hdr.pkt_size);
+	mvm_d_vocproc_cmd.hdr.src_port = v->session_id;
+	mvm_d_vocproc_cmd.hdr.dest_port = mvm_handle;
+	mvm_d_vocproc_cmd.hdr.token = 0;
+	mvm_d_vocproc_cmd.hdr.opcode = VSS_IMVM_CMD_DETACH_VOCPROC;
+	mvm_d_vocproc_cmd.mvm_detach_cvp_handle.handle = cvp_handle;
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_d_vocproc_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending VSS_IMVM_CMD_DETACH_VOCPROC\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->mvm_wait,
+				 (v->mvm_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+
+	/* deregister cvp and vol cal */
+	voice_send_cvp_deregister_vol_cal_table_cmd(v);
+	voice_send_cvp_deregister_cal_cmd(v);
+	voice_send_cvp_unmap_memory_cmd(v);
+
+	/* deregister cvs cal */
+	voice_send_cvs_deregister_cal_cmd(v);
+	voice_send_cvs_unmap_memory_cmd(v);
+
+	/* destrop cvp session */
+	cvp_destroy_session_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvp_destroy_session_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvp_destroy_session_cmd) - APR_HDR_SIZE);
+	pr_debug("cvp_destroy_session_cmd pkt size = %d\n",
+		cvp_destroy_session_cmd.pkt_size);
+	cvp_destroy_session_cmd.src_port = v->session_id;
+	cvp_destroy_session_cmd.dest_port = cvp_handle;
+	cvp_destroy_session_cmd.token = 0;
+	cvp_destroy_session_cmd.opcode = APRV2_IBASIC_CMD_DESTROY_SESSION;
+
+	v->cvp_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_destroy_session_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending APRV2_IBASIC_CMD_DESTROY_SESSION\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+
+	rtac_remove_voice(voice_get_cvs_handle(v));
+	cvp_handle = 0;
+	voice_set_cvp_handle(v, cvp_handle);
+
+	return 0;
+
+fail:
+	return -EINVAL;
+}
+
+static int voice_send_mute_cmd(struct voice_data *v)
+{
+	struct cvs_set_mute_cmd cvs_mute_cmd;
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	cvs_handle = voice_get_cvs_handle(v);
+
+	/* send mute/unmute to cvs */
+	cvs_mute_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvs_mute_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(cvs_mute_cmd) - APR_HDR_SIZE);
+	cvs_mute_cmd.hdr.src_port = v->session_id;
+	cvs_mute_cmd.hdr.dest_port = cvs_handle;
+	cvs_mute_cmd.hdr.token = 0;
+	cvs_mute_cmd.hdr.opcode = VSS_ISTREAM_CMD_SET_MUTE;
+	cvs_mute_cmd.cvs_set_mute.direction = 0; /*tx*/
+	cvs_mute_cmd.cvs_set_mute.mute_flag = v->dev_tx.mute;
+
+	v->cvs_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_mute_cmd);
+	if (ret < 0) {
+		pr_err("Fail: send STREAM SET MUTE\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret)
+		pr_err("%s: wait_event timeout\n", __func__);
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static int voice_send_rx_device_mute_cmd(struct voice_data *v)
+{
+	struct cvp_set_mute_cmd cvp_mute_cmd;
+	int ret = 0;
+	void *apr_cvp;
+	u16 cvp_handle;
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	cvp_handle = voice_get_cvp_handle(v);
+
+	cvp_mute_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvp_mute_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(cvp_mute_cmd) - APR_HDR_SIZE);
+	cvp_mute_cmd.hdr.src_port = v->session_id;
+	cvp_mute_cmd.hdr.dest_port = cvp_handle;
+	cvp_mute_cmd.hdr.token = 0;
+	cvp_mute_cmd.hdr.opcode = VSS_IVOCPROC_CMD_SET_MUTE;
+	cvp_mute_cmd.cvp_set_mute.direction = 1;
+	cvp_mute_cmd.cvp_set_mute.mute_flag = v->dev_rx.mute;
+	v->cvp_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_mute_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending RX device mute cmd\n");
+		return -EINVAL;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int voice_send_vol_index_cmd(struct voice_data *v)
+{
+	struct cvp_set_rx_volume_index_cmd cvp_vol_cmd;
+	int ret = 0;
+	void *apr_cvp;
+	u16 cvp_handle;
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvp = common.apr_q6_cvp;
+
+	if (!apr_cvp) {
+		pr_err("%s: apr_cvp is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	cvp_handle = voice_get_cvp_handle(v);
+
+	/* send volume index to cvp */
+	cvp_vol_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+						APR_PKT_VER);
+	cvp_vol_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+					sizeof(cvp_vol_cmd) - APR_HDR_SIZE);
+	cvp_vol_cmd.hdr.src_port = v->session_id;
+	cvp_vol_cmd.hdr.dest_port = cvp_handle;
+	cvp_vol_cmd.hdr.token = 0;
+	cvp_vol_cmd.hdr.opcode = VSS_IVOCPROC_CMD_SET_RX_VOLUME_INDEX;
+	cvp_vol_cmd.cvp_set_vol_idx.vol_index = v->dev_rx.volume;
+	v->cvp_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_vol_cmd);
+	if (ret < 0) {
+		pr_err("Fail in sending RX VOL INDEX\n");
+		return -EINVAL;
+	}
+	ret = wait_event_timeout(v->cvp_wait,
+				 (v->cvp_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int voice_cvs_start_record(struct voice_data *v, uint32_t rec_mode)
+{
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+
+	struct cvs_start_record_cmd cvs_start_record;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	cvs_handle = voice_get_cvs_handle(v);
+
+	if (!v->rec_info.recording) {
+		cvs_start_record.hdr.hdr_field = APR_HDR_FIELD(
+					APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE),
+					APR_PKT_VER);
+		cvs_start_record.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				  sizeof(cvs_start_record) - APR_HDR_SIZE);
+		cvs_start_record.hdr.src_port = v->session_id;
+		cvs_start_record.hdr.dest_port = cvs_handle;
+		cvs_start_record.hdr.token = 0;
+		cvs_start_record.hdr.opcode = VSS_ISTREAM_CMD_START_RECORD;
+
+		if (rec_mode == VOC_REC_UPLINK) {
+			cvs_start_record.rec_mode.rx_tap_point =
+						VSS_TAP_POINT_NONE;
+			cvs_start_record.rec_mode.tx_tap_point =
+						VSS_TAP_POINT_STREAM_END;
+		} else if (rec_mode == VOC_REC_DOWNLINK) {
+			cvs_start_record.rec_mode.rx_tap_point =
+						VSS_TAP_POINT_STREAM_END;
+			cvs_start_record.rec_mode.tx_tap_point =
+						VSS_TAP_POINT_NONE;
+		} else if (rec_mode == VOC_REC_BOTH) {
+			cvs_start_record.rec_mode.rx_tap_point =
+						VSS_TAP_POINT_STREAM_END;
+			cvs_start_record.rec_mode.tx_tap_point =
+						VSS_TAP_POINT_STREAM_END;
+		} else {
+			pr_err("%s: Invalid in-call rec_mode %d\n", __func__,
+				rec_mode);
+
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		v->cvs_state = CMD_STATUS_FAIL;
+
+		ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_start_record);
+		if (ret < 0) {
+			pr_err("%s: Error %d sending START_RECORD\n", __func__,
+				ret);
+
+			goto fail;
+		}
+
+		ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+
+		if (!ret) {
+			pr_err("%s: wait_event timeout\n", __func__);
+
+			goto fail;
+		}
+		v->rec_info.recording = 1;
+	} else {
+		pr_debug("%s: Start record already sent\n", __func__);
+	}
+
+	return 0;
+
+fail:
+	return ret;
+}
+
+static int voice_cvs_stop_record(struct voice_data *v)
+{
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+	struct apr_hdr cvs_stop_record;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	cvs_handle = voice_get_cvs_handle(v);
+
+	if (v->rec_info.recording) {
+		cvs_stop_record.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				  APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+		cvs_stop_record.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				  sizeof(cvs_stop_record) - APR_HDR_SIZE);
+		cvs_stop_record.src_port = v->session_id;
+		cvs_stop_record.dest_port = cvs_handle;
+		cvs_stop_record.token = 0;
+		cvs_stop_record.opcode = VSS_ISTREAM_CMD_STOP_RECORD;
+
+		v->cvs_state = CMD_STATUS_FAIL;
+
+		ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_stop_record);
+		if (ret < 0) {
+			pr_err("%s: Error %d sending STOP_RECORD\n",
+				__func__, ret);
+
+			goto fail;
+		}
+
+		ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait_event timeout\n", __func__);
+
+			goto fail;
+		}
+		v->rec_info.recording = 0;
+	} else {
+		pr_debug("%s: Stop record already sent\n", __func__);
+	}
+
+	return 0;
+
+fail:
+
+	return ret;
+}
+
+int voc_start_record(uint32_t port_id, uint32_t set)
+{
+	int ret = 0;
+	int rec_mode = 0;
+	u16 cvs_handle;
+	int i, rec_set = 0;
+
+	for (i = 0; i < MAX_VOC_SESSIONS; i++) {
+		struct voice_data *v = &common.voice[i];
+		pr_debug("%s: i:%d port_id: %d, set: %d\n",
+			__func__, i, port_id, set);
+
+		mutex_lock(&v->lock);
+		rec_mode = v->rec_info.rec_mode;
+		rec_set = set;
+		if (set) {
+			if ((v->rec_route_state.ul_flag != 0) &&
+				(v->rec_route_state.dl_flag != 0)) {
+				pr_debug("%s: i=%d, rec mode already set.\n",
+					__func__, i);
+				mutex_unlock(&v->lock);
+				if (i < MAX_VOC_SESSIONS)
+					continue;
+				else
+					return 0;
+			}
+
+			if (port_id == VOICE_RECORD_TX) {
+				if ((v->rec_route_state.ul_flag == 0)
+				&& (v->rec_route_state.dl_flag == 0)) {
+					rec_mode = VOC_REC_UPLINK;
+					v->rec_route_state.ul_flag = 1;
+				} else if ((v->rec_route_state.ul_flag == 0)
+					&& (v->rec_route_state.dl_flag != 0)) {
+					voice_cvs_stop_record(v);
+					rec_mode = VOC_REC_BOTH;
+					v->rec_route_state.ul_flag = 1;
+				}
+			} else if (port_id == VOICE_RECORD_RX) {
+				if ((v->rec_route_state.ul_flag == 0)
+					&& (v->rec_route_state.dl_flag == 0)) {
+					rec_mode = VOC_REC_DOWNLINK;
+					v->rec_route_state.dl_flag = 1;
+				} else if ((v->rec_route_state.ul_flag != 0)
+					&& (v->rec_route_state.dl_flag == 0)) {
+					voice_cvs_stop_record(v);
+					rec_mode = VOC_REC_BOTH;
+					v->rec_route_state.dl_flag = 1;
+				}
+			}
+			rec_set = 1;
+		} else {
+			if ((v->rec_route_state.ul_flag == 0) &&
+				(v->rec_route_state.dl_flag == 0)) {
+				pr_debug("%s: i=%d, rec already stops.\n",
+					__func__, i);
+				mutex_unlock(&v->lock);
+				if (i < MAX_VOC_SESSIONS)
+					continue;
+				else
+					return 0;
+			}
+
+			if (port_id == VOICE_RECORD_TX) {
+				if ((v->rec_route_state.ul_flag != 0)
+					&& (v->rec_route_state.dl_flag == 0)) {
+					v->rec_route_state.ul_flag = 0;
+					rec_set = 0;
+				} else if ((v->rec_route_state.ul_flag != 0)
+					&& (v->rec_route_state.dl_flag != 0)) {
+					voice_cvs_stop_record(v);
+					v->rec_route_state.ul_flag = 0;
+					rec_mode = VOC_REC_DOWNLINK;
+					rec_set = 1;
+				}
+			} else if (port_id == VOICE_RECORD_RX) {
+				if ((v->rec_route_state.ul_flag == 0)
+					&& (v->rec_route_state.dl_flag != 0)) {
+					v->rec_route_state.dl_flag = 0;
+					rec_set = 0;
+				} else if ((v->rec_route_state.ul_flag != 0)
+					&& (v->rec_route_state.dl_flag != 0)) {
+					voice_cvs_stop_record(v);
+					v->rec_route_state.dl_flag = 0;
+					rec_mode = VOC_REC_UPLINK;
+					rec_set = 1;
+				}
+			}
+		}
+		pr_debug("%s: i=%d, mode =%d, set =%d\n", __func__,
+			i, rec_mode, rec_set);
+		cvs_handle = voice_get_cvs_handle(v);
+
+		if (cvs_handle != 0) {
+			if (rec_set)
+				ret = voice_cvs_start_record(v, rec_mode);
+			else
+				ret = voice_cvs_stop_record(v);
+		}
+
+		/* Cache the value */
+		v->rec_info.rec_enable = rec_set;
+		v->rec_info.rec_mode = rec_mode;
+
+		mutex_unlock(&v->lock);
+	}
+
+	return ret;
+}
+
+static int voice_cvs_start_playback(struct voice_data *v)
+{
+	int ret = 0;
+	struct apr_hdr cvs_start_playback;
+	void *apr_cvs;
+	u16 cvs_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	cvs_handle = voice_get_cvs_handle(v);
+
+	if (!v->music_info.playing && v->music_info.count) {
+		cvs_start_playback.hdr_field = APR_HDR_FIELD(
+					APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE),
+					APR_PKT_VER);
+		cvs_start_playback.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvs_start_playback) - APR_HDR_SIZE);
+		cvs_start_playback.src_port = v->session_id;
+		cvs_start_playback.dest_port = cvs_handle;
+		cvs_start_playback.token = 0;
+		cvs_start_playback.opcode = VSS_ISTREAM_CMD_START_PLAYBACK;
+
+		v->cvs_state = CMD_STATUS_FAIL;
+
+		ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_start_playback);
+
+		if (ret < 0) {
+			pr_err("%s: Error %d sending START_PLAYBACK\n",
+				__func__, ret);
+
+			goto fail;
+		}
+
+		ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait_event timeout\n", __func__);
+
+			goto fail;
+		}
+
+		v->music_info.playing = 1;
+	} else {
+		pr_debug("%s: Start playback already sent\n", __func__);
+	}
+
+	return 0;
+
+fail:
+	return ret;
+}
+
+static int voice_cvs_stop_playback(struct voice_data *v)
+{
+	 int ret = 0;
+	 struct apr_hdr cvs_stop_playback;
+	 void *apr_cvs;
+	 u16 cvs_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+
+	cvs_handle = voice_get_cvs_handle(v);
+
+	if (v->music_info.playing && ((!v->music_info.count) ||
+						(v->music_info.force))) {
+		cvs_stop_playback.hdr_field =
+				APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+		cvs_stop_playback.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+				sizeof(cvs_stop_playback) - APR_HDR_SIZE);
+		cvs_stop_playback.src_port = v->session_id;
+		cvs_stop_playback.dest_port = cvs_handle;
+		cvs_stop_playback.token = 0;
+
+		cvs_stop_playback.opcode = VSS_ISTREAM_CMD_STOP_PLAYBACK;
+
+		v->cvs_state = CMD_STATUS_FAIL;
+
+		ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_stop_playback);
+		if (ret < 0) {
+			pr_err("%s: Error %d sending STOP_PLAYBACK\n",
+			       __func__, ret);
+
+
+			goto fail;
+		}
+
+		ret = wait_event_timeout(v->cvs_wait,
+					 (v->cvs_state == CMD_STATUS_SUCCESS),
+					 msecs_to_jiffies(TIMEOUT_MS));
+		if (!ret) {
+			pr_err("%s: wait_event timeout\n", __func__);
+
+			goto fail;
+		}
+
+		v->music_info.playing = 0;
+		v->music_info.force = 0;
+	} else {
+		pr_debug("%s: Stop playback already sent\n", __func__);
+	}
+
+	return 0;
+
+fail:
+	return ret;
+}
+
+int voc_start_playback(uint32_t set)
+{
+	int ret = 0;
+	u16 cvs_handle;
+	int i;
+
+
+	for (i = 0; i < MAX_VOC_SESSIONS; i++) {
+		struct voice_data *v = &common.voice[i];
+
+		mutex_lock(&v->lock);
+		v->music_info.play_enable = set;
+		if (set)
+			v->music_info.count++;
+		else
+			v->music_info.count--;
+		pr_debug("%s: music_info count =%d\n", __func__,
+			v->music_info.count);
+
+		cvs_handle = voice_get_cvs_handle(v);
+		if (cvs_handle != 0) {
+			if (set)
+				ret = voice_cvs_start_playback(v);
+			else
+				ret = voice_cvs_stop_playback(v);
+		}
+
+		mutex_unlock(&v->lock);
+	}
+
+	return ret;
+}
+
+int voc_disable_cvp(uint16_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+
+	if (v->voc_state == VOC_RUN) {
+		if (v->dev_tx.port_id != RT_PROXY_PORT_001_TX &&
+			v->dev_rx.port_id != RT_PROXY_PORT_001_RX)
+			afe_sidetone(v->dev_tx.port_id, v->dev_rx.port_id,
+					0, 0);
+
+		rtac_remove_voice(voice_get_cvs_handle(v));
+		/* send cmd to dsp to disable vocproc */
+		ret = voice_send_disable_vocproc_cmd(v);
+		if (ret < 0) {
+			pr_err("%s:  disable vocproc failed\n", __func__);
+			goto fail;
+		}
+
+		/* deregister cvp and vol cal */
+		voice_send_cvp_deregister_vol_cal_table_cmd(v);
+		voice_send_cvp_deregister_cal_cmd(v);
+		voice_send_cvp_unmap_memory_cmd(v);
+
+		v->voc_state = VOC_CHANGE;
+	}
+
+fail:	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+int voc_enable_cvp(uint16_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	struct sidetone_cal sidetone_cal_data;
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+
+	if (v->voc_state == VOC_CHANGE) {
+		ret = voice_send_set_device_cmd(v);
+		if (ret < 0) {
+			pr_err("%s:  set device failed\n", __func__);
+			goto fail;
+		}
+		/* send cvp and vol cal */
+		ret = voice_send_cvp_map_memory_cmd(v);
+		if (!ret) {
+			voice_send_cvp_register_cal_cmd(v);
+			voice_send_cvp_register_vol_cal_table_cmd(v);
+		}
+		ret = voice_send_enable_vocproc_cmd(v);
+		if (ret < 0) {
+			pr_err("%s: enable vocproc failed\n", __func__);
+			goto fail;
+
+		}
+		/* send tty mode if tty device is used */
+		voice_send_tty_mode_cmd(v);
+
+		/* enable widevoice if wv_enable is set */
+		if (v->wv_enable)
+			voice_send_set_widevoice_enable_cmd(v);
+
+		/* enable slowtalk */
+		if (v->st_enable)
+			voice_send_set_pp_enable_cmd(v,
+						MODULE_ID_VOICE_MODULE_ST,
+						v->st_enable);
+		/* enable FENS */
+		if (v->fens_enable)
+			voice_send_set_pp_enable_cmd(v,
+						MODULE_ID_VOICE_MODULE_FENS,
+						v->fens_enable);
+
+		get_sidetone_cal(&sidetone_cal_data);
+		if (v->dev_tx.port_id != RT_PROXY_PORT_001_TX &&
+			v->dev_rx.port_id != RT_PROXY_PORT_001_RX) {
+			ret = afe_sidetone(v->dev_tx.port_id,
+					v->dev_rx.port_id,
+					sidetone_cal_data.enable,
+					sidetone_cal_data.gain);
+
+			if (ret < 0)
+				pr_err("%s: AFE command sidetone failed\n",
+					__func__);
+		}
+
+		rtac_add_voice(voice_get_cvs_handle(v),
+			voice_get_cvp_handle(v),
+			v->dev_rx.port_id, v->dev_tx.port_id,
+			v->session_id);
+		v->voc_state = VOC_RUN;
+	}
+
+fail:
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+int voc_set_tx_mute(uint16_t session_id, uint32_t dir, uint32_t mute)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+
+	v->dev_tx.mute = mute;
+
+	if (v->voc_state == VOC_RUN)
+		ret = voice_send_mute_cmd(v);
+
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+int voc_set_rx_device_mute(uint16_t session_id, uint32_t mute)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+
+	v->dev_rx.mute = mute;
+
+	if (v->voc_state == VOC_RUN)
+		ret = voice_send_rx_device_mute_cmd(v);
+
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+int voc_get_rx_device_mute(uint16_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+
+	ret = v->dev_rx.mute;
+
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+int voc_set_tty_mode(uint16_t session_id, uint8_t tty_mode)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+
+	v->tty_mode = tty_mode;
+
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+uint8_t voc_get_tty_mode(uint16_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+
+	ret = v->tty_mode;
+
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+int voc_set_widevoice_enable(uint16_t session_id, uint32_t wv_enable)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	u16 mvm_handle;
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+
+	v->wv_enable = wv_enable;
+
+	mvm_handle = voice_get_mvm_handle(v);
+
+	if (mvm_handle != 0)
+		voice_send_set_widevoice_enable_cmd(v);
+
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+uint32_t voc_get_widevoice_enable(uint16_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+
+	ret = v->wv_enable;
+
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+int voc_set_pp_enable(uint16_t session_id, uint32_t module_id, uint32_t enable)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+	if (module_id == MODULE_ID_VOICE_MODULE_ST)
+		v->st_enable = enable;
+	else if (module_id == MODULE_ID_VOICE_MODULE_FENS)
+		v->fens_enable = enable;
+
+	if (v->voc_state == VOC_RUN) {
+		if (module_id == MODULE_ID_VOICE_MODULE_ST)
+			ret = voice_send_set_pp_enable_cmd(v,
+						MODULE_ID_VOICE_MODULE_ST,
+						enable);
+		else if (module_id == MODULE_ID_VOICE_MODULE_FENS)
+			ret = voice_send_set_pp_enable_cmd(v,
+						MODULE_ID_VOICE_MODULE_FENS,
+						enable);
+	}
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+int voc_get_pp_enable(uint16_t session_id, uint32_t module_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+	if (module_id == MODULE_ID_VOICE_MODULE_ST)
+		ret = v->st_enable;
+	else if (module_id == MODULE_ID_VOICE_MODULE_FENS)
+		ret = v->fens_enable;
+
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+int voc_set_rx_vol_index(uint16_t session_id, uint32_t dir, uint32_t vol_idx)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+
+	v->dev_rx.volume = vol_idx;
+
+	if (v->voc_state == VOC_RUN)
+		ret = voice_send_vol_index_cmd(v);
+
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+int voc_set_rxtx_port(uint16_t session_id, uint32_t port_id, uint32_t dev_type)
+{
+	struct voice_data *v = voice_get_session(session_id);
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	pr_debug("%s: port_id=%d, type=%d\n", __func__, port_id, dev_type);
+
+	mutex_lock(&v->lock);
+
+	if (dev_type == DEV_RX)
+		v->dev_rx.port_id = q6audio_get_port_id(port_id);
+	else
+		v->dev_tx.port_id = q6audio_get_port_id(port_id);
+
+	mutex_unlock(&v->lock);
+
+	return 0;
+}
+
+int voc_set_route_flag(uint16_t session_id, uint8_t path_dir, uint8_t set)
+{
+	struct voice_data *v = voice_get_session(session_id);
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	pr_debug("%s: path_dir=%d, set=%d\n", __func__, path_dir, set);
+
+	mutex_lock(&v->lock);
+
+	if (path_dir == RX_PATH)
+		v->voc_route_state.rx_route_flag = set;
+	else
+		v->voc_route_state.tx_route_flag = set;
+
+	mutex_unlock(&v->lock);
+
+	return 0;
+}
+
+uint8_t voc_get_route_flag(uint16_t session_id, uint8_t path_dir)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return 0;
+	}
+
+	mutex_lock(&v->lock);
+
+	if (path_dir == RX_PATH)
+		ret = v->voc_route_state.rx_route_flag;
+	else
+		ret = v->voc_route_state.tx_route_flag;
+
+	mutex_unlock(&v->lock);
+
+	return ret;
+}
+
+int voc_end_voice_call(uint16_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+
+	if (v->voc_state == VOC_RUN) {
+		if (v->dev_tx.port_id != RT_PROXY_PORT_001_TX &&
+			v->dev_rx.port_id != RT_PROXY_PORT_001_RX)
+			afe_sidetone(v->dev_tx.port_id, v->dev_rx.port_id,
+					0, 0);
+		ret = voice_destroy_vocproc(v);
+		if (ret < 0)
+			pr_err("%s:  destroy voice failed\n", __func__);
+		voice_destroy_mvm_cvs_session(v);
+
+		v->voc_state = VOC_RELEASE;
+	}
+	mutex_unlock(&v->lock);
+	return ret;
+}
+
+int voc_start_voice_call(uint16_t session_id)
+{
+	struct voice_data *v = voice_get_session(session_id);
+	struct sidetone_cal sidetone_cal_data;
+	int ret = 0;
+
+	if (v == NULL) {
+		pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&v->lock);
+
+	if ((v->voc_state == VOC_INIT) ||
+		(v->voc_state == VOC_RELEASE)) {
+		ret = voice_apr_register();
+		if (ret < 0) {
+			pr_err("%s:  apr register failed\n", __func__);
+			goto fail;
+		}
+		ret = voice_create_mvm_cvs_session(v);
+		if (ret < 0) {
+			pr_err("create mvm and cvs failed\n");
+			goto fail;
+		}
+		ret = voice_send_dual_control_cmd(v);
+		if (ret < 0) {
+			pr_err("Err Dual command failed\n");
+			goto fail;
+		}
+		ret = voice_setup_vocproc(v);
+		if (ret < 0) {
+			pr_err("setup voice failed\n");
+			goto fail;
+		}
+		ret = voice_send_start_voice_cmd(v);
+		if (ret < 0) {
+			pr_err("start voice failed\n");
+			goto fail;
+		}
+		get_sidetone_cal(&sidetone_cal_data);
+		if (v->dev_tx.port_id != RT_PROXY_PORT_001_TX &&
+			v->dev_rx.port_id != RT_PROXY_PORT_001_RX) {
+			ret = afe_sidetone(v->dev_tx.port_id,
+					v->dev_rx.port_id,
+					sidetone_cal_data.enable,
+					sidetone_cal_data.gain);
+			if (ret < 0)
+				pr_err("AFE command sidetone failed\n");
+		}
+
+		v->voc_state = VOC_RUN;
+	}
+fail:	mutex_unlock(&v->lock);
+	return ret;
+}
+
+void voc_register_mvs_cb(ul_cb_fn ul_cb,
+			   dl_cb_fn dl_cb,
+			   void *private_data)
+{
+	common.mvs_info.ul_cb = ul_cb;
+	common.mvs_info.dl_cb = dl_cb;
+	common.mvs_info.private_data = private_data;
+}
+
+void voc_config_vocoder(uint32_t media_type,
+			  uint32_t rate,
+			  uint32_t network_type,
+			  uint32_t dtx_mode)
+{
+	common.mvs_info.media_type = media_type;
+	common.mvs_info.rate = rate;
+	common.mvs_info.network_type = network_type;
+	common.mvs_info.dtx_mode = dtx_mode;
+}
+
+static int32_t qdsp_mvm_callback(struct apr_client_data *data, void *priv)
+{
+	uint32_t *ptr = NULL;
+	struct common_data *c = NULL;
+	struct voice_data *v = NULL;
+	int i = 0;
+
+	if ((data == NULL) || (priv == NULL)) {
+		pr_err("%s: data or priv is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	c = priv;
+
+	pr_debug("%s: session_id 0x%x\n", __func__, data->dest_port);
+
+	v = voice_get_session(data->dest_port);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		return -EINVAL;
+	}
+
+	pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
+		data->payload_size, data->opcode);
+
+	if (data->opcode == RESET_EVENTS) {
+		pr_debug("%s: Reset event received in Voice service\n",
+			 __func__);
+
+		apr_reset(c->apr_q6_mvm);
+		c->apr_q6_mvm = NULL;
+
+		/* Sub-system restart is applicable to all sessions. */
+		for (i = 0; i < MAX_VOC_SESSIONS; i++)
+			c->voice[i].mvm_handle = 0;
+
+		return 0;
+	}
+
+	if (data->opcode == APR_BASIC_RSP_RESULT) {
+		if (data->payload_size) {
+			ptr = data->payload;
+
+			/* ping mvm service ACK */
+			switch (ptr[0]) {
+			case VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION:
+			case VSS_IMVM_CMD_CREATE_FULL_CONTROL_SESSION:
+				/* Passive session is used for CS call
+				 * Full session is used for VoIP call. */
+				pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
+				if (!ptr[1]) {
+					pr_debug("%s: MVM handle is %d\n",
+						 __func__, data->src_port);
+					voice_set_mvm_handle(v, data->src_port);
+				} else
+					pr_err("got NACK for sending MVM create session\n");
+				v->mvm_state = CMD_STATUS_SUCCESS;
+				wake_up(&v->mvm_wait);
+				break;
+			case VSS_IMVM_CMD_START_VOICE:
+			case VSS_IMVM_CMD_ATTACH_VOCPROC:
+			case VSS_IMVM_CMD_STOP_VOICE:
+			case VSS_IMVM_CMD_DETACH_VOCPROC:
+			case VSS_ISTREAM_CMD_SET_TTY_MODE:
+			case APRV2_IBASIC_CMD_DESTROY_SESSION:
+			case VSS_IMVM_CMD_ATTACH_STREAM:
+			case VSS_IMVM_CMD_DETACH_STREAM:
+			case VSS_ICOMMON_CMD_SET_NETWORK:
+			case VSS_ICOMMON_CMD_SET_VOICE_TIMING:
+			case VSS_IWIDEVOICE_CMD_SET_WIDEVOICE:
+			case VSS_IMVM_CMD_SET_POLICY_DUAL_CONTROL:
+				pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
+				v->mvm_state = CMD_STATUS_SUCCESS;
+				wake_up(&v->mvm_wait);
+				break;
+			default:
+				pr_debug("%s: not match cmd = 0x%x\n",
+					__func__, ptr[0]);
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv)
+{
+	uint32_t *ptr = NULL;
+	struct common_data *c = NULL;
+	struct voice_data *v = NULL;
+	int i = 0;
+
+	if ((data == NULL) || (priv == NULL)) {
+		pr_err("%s: data or priv is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	c = priv;
+
+	pr_debug("%s: session_id 0x%x\n", __func__, data->dest_port);
+
+	v = voice_get_session(data->dest_port);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		return -EINVAL;
+	}
+
+	pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
+		data->payload_size, data->opcode);
+
+	if (data->opcode == RESET_EVENTS) {
+		pr_debug("%s: Reset event received in Voice service\n",
+			 __func__);
+
+		apr_reset(c->apr_q6_cvs);
+		c->apr_q6_cvs = NULL;
+
+		/* Sub-system restart is applicable to all sessions. */
+		for (i = 0; i < MAX_VOC_SESSIONS; i++)
+			c->voice[i].cvs_handle = 0;
+
+		return 0;
+	}
+
+	if (data->opcode == APR_BASIC_RSP_RESULT) {
+		if (data->payload_size) {
+			ptr = data->payload;
+
+			/*response from  CVS */
+			switch (ptr[0]) {
+			case VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION:
+			case VSS_ISTREAM_CMD_CREATE_FULL_CONTROL_SESSION:
+				if (!ptr[1]) {
+					pr_debug("%s: CVS handle is %d\n",
+						 __func__, data->src_port);
+					voice_set_cvs_handle(v, data->src_port);
+				} else
+					pr_err("got NACK for sending CVS create session\n");
+				v->cvs_state = CMD_STATUS_SUCCESS;
+				wake_up(&v->cvs_wait);
+				break;
+			case VSS_ISTREAM_CMD_SET_MUTE:
+			case VSS_ISTREAM_CMD_SET_MEDIA_TYPE:
+			case VSS_ISTREAM_CMD_VOC_AMR_SET_ENC_RATE:
+			case VSS_ISTREAM_CMD_VOC_AMRWB_SET_ENC_RATE:
+			case VSS_ISTREAM_CMD_SET_ENC_DTX_MODE:
+			case VSS_ISTREAM_CMD_CDMA_SET_ENC_MINMAX_RATE:
+			case APRV2_IBASIC_CMD_DESTROY_SESSION:
+			case VSS_ISTREAM_CMD_REGISTER_CALIBRATION_DATA:
+			case VSS_ISTREAM_CMD_DEREGISTER_CALIBRATION_DATA:
+			case VSS_ICOMMON_CMD_MAP_MEMORY:
+			case VSS_ICOMMON_CMD_UNMAP_MEMORY:
+			case VSS_ICOMMON_CMD_SET_UI_PROPERTY:
+			case VSS_ISTREAM_CMD_START_PLAYBACK:
+			case VSS_ISTREAM_CMD_STOP_PLAYBACK:
+			case VSS_ISTREAM_CMD_START_RECORD:
+			case VSS_ISTREAM_CMD_STOP_RECORD:
+				pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
+				v->cvs_state = CMD_STATUS_SUCCESS;
+				wake_up(&v->cvs_wait);
+				break;
+			case VOICE_CMD_SET_PARAM:
+				rtac_make_voice_callback(RTAC_CVS, ptr,
+							data->payload_size);
+				break;
+			default:
+				pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
+				break;
+			}
+		}
+	} else if (data->opcode == VSS_ISTREAM_EVT_SEND_ENC_BUFFER) {
+		uint32_t *voc_pkt = data->payload;
+		uint32_t pkt_len = data->payload_size;
+
+		if (voc_pkt != NULL && c->mvs_info.ul_cb != NULL) {
+			pr_debug("%s: Media type is 0x%x\n",
+				 __func__, voc_pkt[0]);
+
+			/* Remove media ID from payload. */
+			voc_pkt++;
+			pkt_len = pkt_len - 4;
+
+			c->mvs_info.ul_cb((uint8_t *)voc_pkt,
+					  pkt_len,
+					  c->mvs_info.private_data);
+		} else
+			pr_err("%s: voc_pkt is 0x%x ul_cb is 0x%x\n",
+			       __func__, (unsigned int)voc_pkt,
+			       (unsigned int) c->mvs_info.ul_cb);
+	} else if (data->opcode == VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER) {
+		struct cvs_send_dec_buf_cmd send_dec_buf;
+		int ret = 0;
+		uint32_t pkt_len = 0;
+
+		if (c->mvs_info.dl_cb != NULL) {
+			send_dec_buf.dec_buf.media_id = c->mvs_info.media_type;
+
+			c->mvs_info.dl_cb(
+				(uint8_t *)&send_dec_buf.dec_buf.packet_data,
+				&pkt_len,
+				c->mvs_info.private_data);
+
+			send_dec_buf.hdr.hdr_field =
+					APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+					APR_HDR_LEN(APR_HDR_SIZE),
+					APR_PKT_VER);
+			send_dec_buf.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+			       sizeof(send_dec_buf.dec_buf.media_id) + pkt_len);
+			send_dec_buf.hdr.src_port = v->session_id;
+			send_dec_buf.hdr.dest_port = voice_get_cvs_handle(v);
+			send_dec_buf.hdr.token = 0;
+			send_dec_buf.hdr.opcode =
+					VSS_ISTREAM_EVT_SEND_DEC_BUFFER;
+
+			ret = apr_send_pkt(c->apr_q6_cvs,
+					   (uint32_t *) &send_dec_buf);
+			if (ret < 0) {
+				pr_err("%s: Error %d sending DEC_BUF\n",
+				       __func__, ret);
+				goto fail;
+			}
+		} else
+			pr_debug("%s: dl_cb is NULL\n", __func__);
+	} else if (data->opcode == VSS_ISTREAM_EVT_SEND_DEC_BUFFER) {
+		pr_debug("Send dec buf resp\n");
+	} else if (data->opcode ==  VOICE_EVT_GET_PARAM_ACK) {
+		rtac_make_voice_callback(RTAC_CVS, data->payload,
+					data->payload_size);
+	} else
+		pr_debug("Unknown opcode 0x%x\n", data->opcode);
+
+fail:
+	return 0;
+}
+
+static int32_t qdsp_cvp_callback(struct apr_client_data *data, void *priv)
+{
+	uint32_t *ptr = NULL;
+	struct common_data *c = NULL;
+	struct voice_data *v = NULL;
+	int i = 0;
+
+	if ((data == NULL) || (priv == NULL)) {
+		pr_err("%s: data or priv is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	c = priv;
+
+	v = voice_get_session(data->dest_port);
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+
+		return -EINVAL;
+	}
+
+	pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
+		data->payload_size, data->opcode);
+
+	if (data->opcode == RESET_EVENTS) {
+		pr_debug("%s: Reset event received in Voice service\n",
+			 __func__);
+
+		apr_reset(c->apr_q6_cvp);
+		c->apr_q6_cvp = NULL;
+
+		/* Sub-system restart is applicable to all sessions. */
+		for (i = 0; i < MAX_VOC_SESSIONS; i++)
+			c->voice[i].cvp_handle = 0;
+
+		return 0;
+	}
+
+	if (data->opcode == APR_BASIC_RSP_RESULT) {
+		if (data->payload_size) {
+			ptr = data->payload;
+
+			switch (ptr[0]) {
+			case VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION:
+			/*response from  CVP */
+				pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
+				if (!ptr[1]) {
+					voice_set_cvp_handle(v, data->src_port);
+					pr_debug("cvphdl=%d\n", data->src_port);
+				} else
+					pr_err("got NACK from CVP create session response\n");
+				v->cvp_state = CMD_STATUS_SUCCESS;
+				wake_up(&v->cvp_wait);
+				break;
+			case VSS_IVOCPROC_CMD_SET_DEVICE:
+			case VSS_IVOCPROC_CMD_SET_RX_VOLUME_INDEX:
+			case VSS_IVOCPROC_CMD_ENABLE:
+			case VSS_IVOCPROC_CMD_DISABLE:
+			case APRV2_IBASIC_CMD_DESTROY_SESSION:
+			case VSS_IVOCPROC_CMD_REGISTER_VOLUME_CAL_TABLE:
+			case VSS_IVOCPROC_CMD_DEREGISTER_VOLUME_CAL_TABLE:
+			case VSS_IVOCPROC_CMD_REGISTER_CALIBRATION_DATA:
+			case VSS_IVOCPROC_CMD_DEREGISTER_CALIBRATION_DATA:
+			case VSS_ICOMMON_CMD_MAP_MEMORY:
+			case VSS_ICOMMON_CMD_UNMAP_MEMORY:
+			case VSS_IVOCPROC_CMD_SET_MUTE:
+				v->cvp_state = CMD_STATUS_SUCCESS;
+				wake_up(&v->cvp_wait);
+				break;
+			case VOICE_CMD_SET_PARAM:
+				rtac_make_voice_callback(RTAC_CVP, ptr,
+							data->payload_size);
+				break;
+			default:
+				pr_debug("%s: not match cmd = 0x%x\n",
+					__func__, ptr[0]);
+				break;
+			}
+		}
+	} else if (data->opcode ==  VOICE_EVT_GET_PARAM_ACK) {
+		rtac_make_voice_callback(RTAC_CVP, data->payload,
+			data->payload_size);
+	}
+	return 0;
+}
+
+
+static int __init voice_init(void)
+{
+	int rc = 0, i = 0;
+	int len;
+
+	memset(&common, 0, sizeof(struct common_data));
+
+	/* Allocate memory for VoIP calibration */
+	common.client = msm_ion_client_create(UINT_MAX, "voip_client");
+	if (IS_ERR_OR_NULL((void *)common.client)) {
+		pr_err("%s: ION create client for Voip failed\n", __func__);
+		goto cont;
+	}
+	common.cvp_cal.handle = ion_alloc(common.client, CVP_CAL_SIZE, SZ_4K,
+					  ION_HEAP(ION_AUDIO_HEAP_ID));
+	if (IS_ERR_OR_NULL((void *) common.cvp_cal.handle)) {
+		pr_err("%s: ION memory allocation for CVP failed\n",
+			__func__);
+		ion_client_destroy(common.client);
+		goto cont;
+	}
+
+	rc = ion_phys(common.client, common.cvp_cal.handle,
+		  (ion_phys_addr_t *)&common.cvp_cal.phy, (size_t *)&len);
+	if (rc) {
+		pr_err("%s: ION Get Physical for cvp failed, rc = %d\n",
+			__func__, rc);
+		ion_free(common.client, common.cvp_cal.handle);
+		ion_client_destroy(common.client);
+		goto cont;
+	}
+
+	common.cvp_cal.buf = ion_map_kernel(common.client,
+					common.cvp_cal.handle, 0);
+	if (IS_ERR_OR_NULL((void *) common.cvp_cal.buf)) {
+		pr_err("%s: ION memory mapping for cvp failed\n", __func__);
+		common.cvp_cal.buf = NULL;
+		ion_free(common.client, common.cvp_cal.handle);
+		ion_client_destroy(common.client);
+		goto cont;
+	}
+	memset((void *)common.cvp_cal.buf, 0, CVP_CAL_SIZE);
+
+	common.cvs_cal.handle = ion_alloc(common.client, CVS_CAL_SIZE, SZ_4K,
+					 ION_HEAP(ION_AUDIO_HEAP_ID));
+	if (IS_ERR_OR_NULL((void *) common.cvs_cal.handle)) {
+		pr_err("%s: ION memory allocation for CVS failed\n",
+			__func__);
+		goto cont;
+	}
+
+	rc = ion_phys(common.client, common.cvs_cal.handle,
+		  (ion_phys_addr_t *)&common.cvs_cal.phy, (size_t *)&len);
+	if (rc) {
+		pr_err("%s: ION Get Physical for cvs failed, rc = %d\n",
+			__func__, rc);
+		ion_free(common.client, common.cvs_cal.handle);
+		goto cont;
+	}
+
+	common.cvs_cal.buf = ion_map_kernel(common.client,
+					common.cvs_cal.handle, 0);
+	if (IS_ERR_OR_NULL((void *) common.cvs_cal.buf)) {
+		pr_err("%s: ION memory mapping for cvs failed\n", __func__);
+		common.cvs_cal.buf = NULL;
+		ion_free(common.client, common.cvs_cal.handle);
+		goto cont;
+	}
+	memset((void *)common.cvs_cal.buf, 0, CVS_CAL_SIZE);
+cont:
+	/* set default value */
+	common.default_mute_val = 1;  /* default is mute */
+	common.default_vol_val = 0;
+	common.default_sample_val = 8000;
+
+	/* Initialize MVS info. */
+	common.mvs_info.network_type = VSS_NETWORK_ID_DEFAULT;
+
+	mutex_init(&common.common_lock);
+
+	for (i = 0; i < MAX_VOC_SESSIONS; i++) {
+		common.voice[i].session_id = SESSION_ID_BASE + i;
+
+		/* initialize dev_rx and dev_tx */
+		common.voice[i].dev_rx.volume = common.default_vol_val;
+		common.voice[i].dev_rx.mute =  0;
+		common.voice[i].dev_tx.mute = common.default_mute_val;
+
+		common.voice[i].dev_tx.port_id = 0x100B;
+		common.voice[i].dev_rx.port_id = 0x100A;
+		common.voice[i].sidetone_gain = 0x512;
+
+		common.voice[i].voc_state = VOC_INIT;
+
+		init_waitqueue_head(&common.voice[i].mvm_wait);
+		init_waitqueue_head(&common.voice[i].cvs_wait);
+		init_waitqueue_head(&common.voice[i].cvp_wait);
+
+		mutex_init(&common.voice[i].lock);
+	}
+
+	return rc;
+}
+
+device_initcall(voice_init);
diff --git a/sound/soc/msm/qdsp6v2/q6voice.h b/sound/soc/msm/qdsp6v2/q6voice.h
new file mode 100644
index 0000000..1bedb15
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/q6voice.h
@@ -0,0 +1,987 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QDSP6VOICE_H__
+#define __QDSP6VOICE_H__
+
+#include <mach/qdsp6v2/apr.h>
+#include <linux/ion.h>
+
+#define MAX_VOC_PKT_SIZE 642
+#define SESSION_NAME_LEN 20
+
+#define VOC_REC_UPLINK		0x00
+#define VOC_REC_DOWNLINK	0x01
+#define VOC_REC_BOTH		0x02
+
+struct voice_header {
+	uint32_t id;
+	uint32_t data_len;
+};
+
+struct voice_init {
+	struct voice_header hdr;
+	void *cb_handle;
+};
+
+/* Device information payload structure */
+
+struct device_data {
+	uint32_t volume; /* in index */
+	uint32_t mute;
+	uint32_t sample;
+	uint32_t enabled;
+	uint32_t dev_id;
+	uint32_t port_id;
+};
+
+struct voice_dev_route_state {
+	u16 rx_route_flag;
+	u16 tx_route_flag;
+};
+
+struct voice_rec_route_state {
+	u16 ul_flag;
+	u16 dl_flag;
+};
+
+enum {
+	VOC_INIT = 0,
+	VOC_RUN,
+	VOC_CHANGE,
+	VOC_RELEASE,
+};
+
+/* Common */
+#define VSS_ICOMMON_CMD_SET_UI_PROPERTY 0x00011103
+/* Set a UI property */
+#define VSS_ICOMMON_CMD_MAP_MEMORY   0x00011025
+#define VSS_ICOMMON_CMD_UNMAP_MEMORY 0x00011026
+/* General shared memory; byte-accessible, 4 kB-aligned. */
+#define VSS_ICOMMON_MAP_MEMORY_SHMEM8_4K_POOL  3
+
+struct vss_icommon_cmd_map_memory_t {
+	uint32_t phys_addr;
+	/* Physical address of a memory region; must be at least
+	 *  4 kB aligned.
+	 */
+
+	uint32_t mem_size;
+	/* Number of bytes in the region; should be a multiple of 32. */
+
+	uint16_t mem_pool_id;
+	/* Type of memory being provided. The memory ID implicitly defines
+	 *  the characteristics of the memory. The characteristics might include
+	 *  alignment type, permissions, etc.
+	 * Memory pool ID. Possible values:
+	 * 3 -- VSS_ICOMMON_MEM_TYPE_SHMEM8_4K_POOL.
+	 */
+} __packed;
+
+struct vss_icommon_cmd_unmap_memory_t {
+	uint32_t phys_addr;
+	/* Physical address of a memory region; must be at least
+	 *  4 kB aligned.
+	 */
+} __packed;
+
+struct vss_map_memory_cmd {
+	struct apr_hdr hdr;
+	struct vss_icommon_cmd_map_memory_t vss_map_mem;
+} __packed;
+
+struct vss_unmap_memory_cmd {
+	struct apr_hdr hdr;
+	struct vss_icommon_cmd_unmap_memory_t vss_unmap_mem;
+} __packed;
+
+/* TO MVM commands */
+#define VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION	0x000110FF
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IMVM_CMD_SET_POLICY_DUAL_CONTROL	0x00011327
+/*
+ * VSS_IMVM_CMD_SET_POLICY_DUAL_CONTROL
+ * Description: This command is required to let MVM know
+ * who is in control of session.
+ * Payload: Defined by vss_imvm_cmd_set_policy_dual_control_t.
+ * Result: Wait for APRV2_IBASIC_RSP_RESULT response.
+ */
+
+#define VSS_IMVM_CMD_CREATE_FULL_CONTROL_SESSION	0x000110FE
+/* Create a new full control MVM session. */
+
+#define APRV2_IBASIC_CMD_DESTROY_SESSION		0x0001003C
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IMVM_CMD_ATTACH_STREAM			0x0001123C
+/* Attach a stream to the MVM. */
+
+#define VSS_IMVM_CMD_DETACH_STREAM			0x0001123D
+/* Detach a stream from the MVM. */
+
+#define VSS_IMVM_CMD_ATTACH_VOCPROC		       0x0001123E
+/* Attach a vocproc to the MVM. The MVM will symmetrically connect this vocproc
+ * to all the streams currently attached to it.
+ */
+
+#define VSS_IMVM_CMD_DETACH_VOCPROC			0x0001123F
+/* Detach a vocproc from the MVM. The MVM will symmetrically disconnect this
+ * vocproc from all the streams to which it is currently attached.
+*/
+
+#define VSS_IMVM_CMD_START_VOICE			0x00011190
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IMVM_CMD_STOP_VOICE				0x00011192
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_ISTREAM_CMD_ATTACH_VOCPROC			0x000110F8
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_ISTREAM_CMD_DETACH_VOCPROC			0x000110F9
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+
+#define VSS_ISTREAM_CMD_SET_TTY_MODE			0x00011196
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_ICOMMON_CMD_SET_NETWORK			0x0001119C
+/* Set the network type. */
+
+#define VSS_ICOMMON_CMD_SET_VOICE_TIMING		0x000111E0
+/* Set the voice timing parameters. */
+
+#define VSS_IWIDEVOICE_CMD_SET_WIDEVOICE                0x00011243
+/* Enable/disable WideVoice */
+
+enum msm_audio_voc_rate {
+		VOC_0_RATE, /* Blank frame */
+		VOC_8_RATE, /* 1/8 rate    */
+		VOC_4_RATE, /* 1/4 rate    */
+		VOC_2_RATE, /* 1/2 rate    */
+		VOC_1_RATE  /* Full rate   */
+};
+
+struct vss_istream_cmd_set_tty_mode_t {
+	uint32_t mode;
+	/**<
+	* TTY mode.
+	*
+	* 0 : TTY disabled
+	* 1 : HCO
+	* 2 : VCO
+	* 3 : FULL
+	*/
+} __packed;
+
+struct vss_istream_cmd_attach_vocproc_t {
+	uint16_t handle;
+	/**< Handle of vocproc being attached. */
+} __packed;
+
+struct vss_istream_cmd_detach_vocproc_t {
+	uint16_t handle;
+	/**< Handle of vocproc being detached. */
+} __packed;
+
+struct vss_imvm_cmd_attach_stream_t {
+	uint16_t handle;
+	/* The stream handle to attach. */
+} __packed;
+
+struct vss_imvm_cmd_detach_stream_t {
+	uint16_t handle;
+	/* The stream handle to detach. */
+} __packed;
+
+struct vss_icommon_cmd_set_network_t {
+	uint32_t network_id;
+	/* Network ID. (Refer to VSS_NETWORK_ID_XXX). */
+} __packed;
+
+struct vss_icommon_cmd_set_voice_timing_t {
+	uint16_t mode;
+	/*
+	 * The vocoder frame synchronization mode.
+	 *
+	 * 0 : No frame sync.
+	 * 1 : Hard VFR (20ms Vocoder Frame Reference interrupt).
+	 */
+	uint16_t enc_offset;
+	/*
+	 * The offset in microseconds from the VFR to deliver a Tx vocoder
+	 * packet. The offset should be less than 20000us.
+	 */
+	uint16_t dec_req_offset;
+	/*
+	 * The offset in microseconds from the VFR to request for an Rx vocoder
+	 * packet. The offset should be less than 20000us.
+	 */
+	uint16_t dec_offset;
+	/*
+	 * The offset in microseconds from the VFR to indicate the deadline to
+	 * receive an Rx vocoder packet. The offset should be less than 20000us.
+	 * Rx vocoder packets received after this deadline are not guaranteed to
+	 * be processed.
+	 */
+} __packed;
+
+struct vss_imvm_cmd_create_control_session_t {
+	char name[SESSION_NAME_LEN];
+	/*
+	* A variable-sized stream name.
+	*
+	* The stream name size is the payload size minus the size of the other
+	* fields.
+	*/
+} __packed;
+
+
+struct vss_imvm_cmd_set_policy_dual_control_t {
+	bool enable_flag;
+	/* Set to TRUE to enable modem state machine control */
+} __packed;
+
+struct vss_iwidevoice_cmd_set_widevoice_t {
+	uint32_t enable;
+	/* WideVoice enable/disable; possible values:
+	* - 0 -- WideVoice disabled
+	* - 1 -- WideVoice enabled
+	*/
+} __packed;
+
+struct mvm_attach_vocproc_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_attach_vocproc_t mvm_attach_cvp_handle;
+} __packed;
+
+struct mvm_detach_vocproc_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_detach_vocproc_t mvm_detach_cvp_handle;
+} __packed;
+
+struct mvm_create_ctl_session_cmd {
+	struct apr_hdr hdr;
+	struct vss_imvm_cmd_create_control_session_t mvm_session;
+} __packed;
+
+struct mvm_modem_dual_control_session_cmd {
+	struct apr_hdr hdr;
+	struct vss_imvm_cmd_set_policy_dual_control_t voice_ctl;
+} __packed;
+
+struct mvm_set_tty_mode_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_set_tty_mode_t tty_mode;
+} __packed;
+
+struct mvm_attach_stream_cmd {
+	struct apr_hdr hdr;
+	struct vss_imvm_cmd_attach_stream_t attach_stream;
+} __packed;
+
+struct mvm_detach_stream_cmd {
+	struct apr_hdr hdr;
+	struct vss_imvm_cmd_detach_stream_t detach_stream;
+} __packed;
+
+struct mvm_set_network_cmd {
+	struct apr_hdr hdr;
+	struct vss_icommon_cmd_set_network_t network;
+} __packed;
+
+struct mvm_set_voice_timing_cmd {
+	struct apr_hdr hdr;
+	struct vss_icommon_cmd_set_voice_timing_t timing;
+} __packed;
+
+struct mvm_set_widevoice_enable_cmd {
+	struct apr_hdr hdr;
+	struct vss_iwidevoice_cmd_set_widevoice_t vss_set_wv;
+} __packed;
+
+/* TO CVS commands */
+#define VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION	0x00011140
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_ISTREAM_CMD_CREATE_FULL_CONTROL_SESSION	0x000110F7
+/* Create a new full control stream session. */
+
+#define APRV2_IBASIC_CMD_DESTROY_SESSION		0x0001003C
+
+#define VSS_ISTREAM_CMD_SET_MUTE			0x00011022
+
+#define VSS_ISTREAM_CMD_REGISTER_CALIBRATION_DATA	0x00011279
+
+#define VSS_ISTREAM_CMD_DEREGISTER_CALIBRATION_DATA     0x0001127A
+
+#define VSS_ISTREAM_CMD_SET_MEDIA_TYPE			0x00011186
+/* Set media type on the stream. */
+
+#define VSS_ISTREAM_EVT_SEND_ENC_BUFFER			0x00011015
+/* Event sent by the stream to its client to provide an encoded packet. */
+
+#define VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER		0x00011017
+/* Event sent by the stream to its client requesting for a decoder packet.
+ * The client should respond with a VSS_ISTREAM_EVT_SEND_DEC_BUFFER event.
+ */
+
+#define VSS_ISTREAM_EVT_SEND_DEC_BUFFER			0x00011016
+/* Event sent by the client to the stream in response to a
+ * VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER event, providing a decoder packet.
+ */
+
+#define VSS_ISTREAM_CMD_VOC_AMR_SET_ENC_RATE		0x0001113E
+/* Set AMR encoder rate. */
+
+#define VSS_ISTREAM_CMD_VOC_AMRWB_SET_ENC_RATE		0x0001113F
+/* Set AMR-WB encoder rate. */
+
+#define VSS_ISTREAM_CMD_CDMA_SET_ENC_MINMAX_RATE	0x00011019
+/* Set encoder minimum and maximum rate. */
+
+#define VSS_ISTREAM_CMD_SET_ENC_DTX_MODE		0x0001101D
+/* Set encoder DTX mode. */
+
+#define MODULE_ID_VOICE_MODULE_FENS			0x00010EEB
+#define MODULE_ID_VOICE_MODULE_ST			0x00010EE3
+#define VOICE_PARAM_MOD_ENABLE				0x00010E00
+#define MOD_ENABLE_PARAM_LEN				4
+
+#define VSS_ISTREAM_CMD_START_PLAYBACK                  0x00011238
+/* Start in-call music delivery on the Tx voice path. */
+
+#define VSS_ISTREAM_CMD_STOP_PLAYBACK                   0x00011239
+/* Stop the in-call music delivery on the Tx voice path. */
+
+#define VSS_ISTREAM_CMD_START_RECORD                    0x00011236
+/* Start in-call conversation recording. */
+#define VSS_ISTREAM_CMD_STOP_RECORD                     0x00011237
+/* Stop in-call conversation recording. */
+
+#define VSS_TAP_POINT_NONE                              0x00010F78
+/* Indicates no tapping for specified path. */
+
+#define VSS_TAP_POINT_STREAM_END                        0x00010F79
+/* Indicates that specified path should be tapped at the end of the stream. */
+
+struct vss_istream_cmd_start_record_t {
+	uint32_t rx_tap_point;
+	/* Tap point to use on the Rx path. Supported values are:
+	 * VSS_TAP_POINT_NONE : Do not record Rx path.
+	 * VSS_TAP_POINT_STREAM_END : Rx tap point is at the end of the stream.
+	 */
+	uint32_t tx_tap_point;
+	/* Tap point to use on the Tx path. Supported values are:
+	 * VSS_TAP_POINT_NONE : Do not record tx path.
+	 * VSS_TAP_POINT_STREAM_END : Tx tap point is at the end of the stream.
+	 */
+} __packed;
+
+struct vss_istream_cmd_create_passive_control_session_t {
+	char name[SESSION_NAME_LEN];
+	/**<
+	* A variable-sized stream name.
+	*
+	* The stream name size is the payload size minus the size of the other
+	* fields.
+	*/
+} __packed;
+
+struct vss_istream_cmd_set_mute_t {
+	uint16_t direction;
+	/**<
+	* 0 : TX only
+	* 1 : RX only
+	* 2 : TX and Rx
+	*/
+	uint16_t mute_flag;
+	/**<
+	* Mute, un-mute.
+	*
+	* 0 : Silence disable
+	* 1 : Silence enable
+	* 2 : CNG enable. Applicable to TX only. If set on RX behavior
+	*     will be the same as 1
+	*/
+} __packed;
+
+struct vss_istream_cmd_create_full_control_session_t {
+	uint16_t direction;
+	/*
+	 * Stream direction.
+	 *
+	 * 0 : TX only
+	 * 1 : RX only
+	 * 2 : TX and RX
+	 * 3 : TX and RX loopback
+	 */
+	uint32_t enc_media_type;
+	/* Tx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */
+	uint32_t dec_media_type;
+	/* Rx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */
+	uint32_t network_id;
+	/* Network ID. (Refer to VSS_NETWORK_ID_XXX). */
+	char name[SESSION_NAME_LEN];
+	/*
+	 * A variable-sized stream name.
+	 *
+	 * The stream name size is the payload size minus the size of the other
+	 * fields.
+	 */
+} __packed;
+
+struct vss_istream_cmd_set_media_type_t {
+	uint32_t rx_media_id;
+	/* Set the Rx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */
+	uint32_t tx_media_id;
+	/* Set the Tx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */
+} __packed;
+
+struct vss_istream_evt_send_enc_buffer_t {
+	uint32_t media_id;
+      /* Media ID of the packet. */
+	uint8_t packet_data[MAX_VOC_PKT_SIZE];
+      /* Packet data buffer. */
+} __packed;
+
+struct vss_istream_evt_send_dec_buffer_t {
+	uint32_t media_id;
+      /* Media ID of the packet. */
+	uint8_t packet_data[MAX_VOC_PKT_SIZE];
+      /* Packet data. */
+} __packed;
+
+struct vss_istream_cmd_voc_amr_set_enc_rate_t {
+	uint32_t mode;
+	/* Set the AMR encoder rate.
+	 *
+	 * 0x00000000 : 4.75 kbps
+	 * 0x00000001 : 5.15 kbps
+	 * 0x00000002 : 5.90 kbps
+	 * 0x00000003 : 6.70 kbps
+	 * 0x00000004 : 7.40 kbps
+	 * 0x00000005 : 7.95 kbps
+	 * 0x00000006 : 10.2 kbps
+	 * 0x00000007 : 12.2 kbps
+	 */
+} __packed;
+
+struct vss_istream_cmd_voc_amrwb_set_enc_rate_t {
+	uint32_t mode;
+	/* Set the AMR-WB encoder rate.
+	 *
+	 * 0x00000000 :  6.60 kbps
+	 * 0x00000001 :  8.85 kbps
+	 * 0x00000002 : 12.65 kbps
+	 * 0x00000003 : 14.25 kbps
+	 * 0x00000004 : 15.85 kbps
+	 * 0x00000005 : 18.25 kbps
+	 * 0x00000006 : 19.85 kbps
+	 * 0x00000007 : 23.05 kbps
+	 * 0x00000008 : 23.85 kbps
+	 */
+} __packed;
+
+struct vss_istream_cmd_cdma_set_enc_minmax_rate_t {
+	uint16_t min_rate;
+	/* Set the lower bound encoder rate.
+	 *
+	 * 0x0000 : Blank frame
+	 * 0x0001 : Eighth rate
+	 * 0x0002 : Quarter rate
+	 * 0x0003 : Half rate
+	 * 0x0004 : Full rate
+	 */
+	uint16_t max_rate;
+	/* Set the upper bound encoder rate.
+	 *
+	 * 0x0000 : Blank frame
+	 * 0x0001 : Eighth rate
+	 * 0x0002 : Quarter rate
+	 * 0x0003 : Half rate
+	 * 0x0004 : Full rate
+	 */
+} __packed;
+
+struct vss_istream_cmd_set_enc_dtx_mode_t {
+	uint32_t enable;
+	/* Toggle DTX on or off.
+	 *
+	 * 0 : Disables DTX
+	 * 1 : Enables DTX
+	 */
+} __packed;
+
+struct vss_istream_cmd_register_calibration_data_t {
+	uint32_t phys_addr;
+	/* Phsical address to be registered with stream. The calibration data
+	 *  is stored at this address.
+	 */
+	uint32_t mem_size;
+	/* Size of the calibration data in bytes. */
+};
+
+struct vss_icommon_cmd_set_ui_property_enable_t {
+	uint32_t module_id;
+	/* Unique ID of the module. */
+	uint32_t param_id;
+	/* Unique ID of the parameter. */
+	uint16_t param_size;
+	/* Size of the parameter in bytes: MOD_ENABLE_PARAM_LEN */
+	uint16_t reserved;
+	/* Reserved; set to 0. */
+	uint16_t enable;
+	uint16_t reserved_field;
+	/* Reserved, set to 0. */
+};
+
+struct cvs_create_passive_ctl_session_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_create_passive_control_session_t cvs_session;
+} __packed;
+
+struct cvs_create_full_ctl_session_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_create_full_control_session_t cvs_session;
+} __packed;
+
+struct cvs_destroy_session_cmd {
+	struct apr_hdr hdr;
+} __packed;
+
+struct cvs_set_mute_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_set_mute_t cvs_set_mute;
+} __packed;
+
+struct cvs_set_media_type_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_set_media_type_t media_type;
+} __packed;
+
+struct cvs_send_dec_buf_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_evt_send_dec_buffer_t dec_buf;
+} __packed;
+
+struct cvs_set_amr_enc_rate_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_voc_amr_set_enc_rate_t amr_rate;
+} __packed;
+
+struct cvs_set_amrwb_enc_rate_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_voc_amrwb_set_enc_rate_t amrwb_rate;
+} __packed;
+
+struct cvs_set_cdma_enc_minmax_rate_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_cdma_set_enc_minmax_rate_t cdma_rate;
+} __packed;
+
+struct cvs_set_enc_dtx_mode_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_set_enc_dtx_mode_t dtx_mode;
+} __packed;
+
+struct cvs_register_cal_data_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_register_calibration_data_t cvs_cal_data;
+} __packed;
+
+struct cvs_deregister_cal_data_cmd {
+	struct apr_hdr hdr;
+} __packed;
+
+struct cvs_set_pp_enable_cmd {
+	struct apr_hdr hdr;
+	struct vss_icommon_cmd_set_ui_property_enable_t vss_set_pp;
+} __packed;
+struct cvs_start_record_cmd {
+	struct apr_hdr hdr;
+	struct vss_istream_cmd_start_record_t rec_mode;
+} __packed;
+
+/* TO CVP commands */
+
+#define VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION	0x000100C3
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define APRV2_IBASIC_CMD_DESTROY_SESSION		0x0001003C
+
+#define VSS_IVOCPROC_CMD_SET_DEVICE			0x000100C4
+
+#define VSS_IVOCPROC_CMD_SET_VP3_DATA			0x000110EB
+
+#define VSS_IVOCPROC_CMD_SET_RX_VOLUME_INDEX		0x000110EE
+
+#define VSS_IVOCPROC_CMD_ENABLE				0x000100C6
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IVOCPROC_CMD_DISABLE			0x000110E1
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IVOCPROC_CMD_REGISTER_CALIBRATION_DATA	0x00011275
+#define VSS_IVOCPROC_CMD_DEREGISTER_CALIBRATION_DATA    0x00011276
+
+#define VSS_IVOCPROC_CMD_REGISTER_VOLUME_CAL_TABLE      0x00011277
+#define VSS_IVOCPROC_CMD_DEREGISTER_VOLUME_CAL_TABLE    0x00011278
+
+#define VSS_IVOCPROC_TOPOLOGY_ID_NONE			0x00010F70
+#define VSS_IVOCPROC_TOPOLOGY_ID_TX_SM_ECNS		0x00010F71
+#define VSS_IVOCPROC_TOPOLOGY_ID_TX_DM_FLUENCE		0x00010F72
+
+#define VSS_IVOCPROC_TOPOLOGY_ID_RX_DEFAULT		0x00010F77
+
+/* Newtwork IDs */
+#define VSS_NETWORK_ID_DEFAULT				0x00010037
+#define VSS_NETWORK_ID_VOIP_NB				0x00011240
+#define VSS_NETWORK_ID_VOIP_WB				0x00011241
+#define VSS_NETWORK_ID_VOIP_WV				0x00011242
+
+/* Media types */
+#define VSS_MEDIA_ID_EVRC_MODEM		0x00010FC2
+/* 80-VF690-47 CDMA enhanced variable rate vocoder modem format. */
+#define VSS_MEDIA_ID_AMR_NB_MODEM	0x00010FC6
+/* 80-VF690-47 UMTS AMR-NB vocoder modem format. */
+#define VSS_MEDIA_ID_AMR_WB_MODEM	0x00010FC7
+/* 80-VF690-47 UMTS AMR-WB vocoder modem format. */
+#define VSS_MEDIA_ID_PCM_NB		0x00010FCB
+#define VSS_MEDIA_ID_PCM_WB		0x00010FCC
+/* Linear PCM (16-bit, little-endian). */
+#define VSS_MEDIA_ID_G711_ALAW		0x00010FCD
+/* G.711 a-law (contains two 10ms vocoder frames). */
+#define VSS_MEDIA_ID_G711_MULAW		0x00010FCE
+/* G.711 mu-law (contains two 10ms vocoder frames). */
+#define VSS_MEDIA_ID_G729		0x00010FD0
+/* G.729AB (contains two 10ms vocoder frames. */
+#define VSS_MEDIA_ID_4GV_NB_MODEM	0x00010FC3
+/*CDMA EVRC-B vocoder modem format */
+#define VSS_MEDIA_ID_4GV_WB_MODEM	0x00010FC4
+/*CDMA EVRC-WB vocoder modem format */
+
+#define VSS_IVOCPROC_CMD_SET_MUTE			0x000110EF
+
+#define VOICE_CMD_SET_PARAM				0x00011006
+#define VOICE_CMD_GET_PARAM				0x00011007
+#define VOICE_EVT_GET_PARAM_ACK				0x00011008
+
+struct vss_ivocproc_cmd_create_full_control_session_t {
+	uint16_t direction;
+	/*
+	 * stream direction.
+	 * 0 : TX only
+	 * 1 : RX only
+	 * 2 : TX and RX
+	 */
+	uint32_t tx_port_id;
+	/*
+	 * TX device port ID which vocproc will connect to. If not supplying a
+	 * port ID set to VSS_IVOCPROC_PORT_ID_NONE.
+	 */
+	uint32_t tx_topology_id;
+	/*
+	 * Tx leg topology ID. If not supplying a topology ID set to
+	 * VSS_IVOCPROC_TOPOLOGY_ID_NONE.
+	 */
+	uint32_t rx_port_id;
+	/*
+	 * RX device port ID which vocproc will connect to. If not supplying a
+	 * port ID set to VSS_IVOCPROC_PORT_ID_NONE.
+	 */
+	uint32_t rx_topology_id;
+	/*
+	 * Rx leg topology ID. If not supplying a topology ID set to
+	 * VSS_IVOCPROC_TOPOLOGY_ID_NONE.
+	 */
+	int32_t network_id;
+	/*
+	 * Network ID. (Refer to VSS_NETWORK_ID_XXX). If not supplying a network
+	 * ID set to VSS_NETWORK_ID_DEFAULT.
+	 */
+} __packed;
+
+struct vss_ivocproc_cmd_set_volume_index_t {
+	uint16_t vol_index;
+	/**<
+	* Volume index utilized by the vocproc to index into the volume table
+	* provided in VSS_IVOCPROC_CMD_CACHE_VOLUME_CALIBRATION_TABLE and set
+	* volume on the VDSP.
+	*/
+} __packed;
+
+struct vss_ivocproc_cmd_set_device_t {
+	uint32_t tx_port_id;
+	/**<
+	* TX device port ID which vocproc will connect to.
+	* VSS_IVOCPROC_PORT_ID_NONE means vocproc will not connect to any port.
+	*/
+	uint32_t tx_topology_id;
+	/**<
+	* TX leg topology ID.
+	* VSS_IVOCPROC_TOPOLOGY_ID_NONE means vocproc does not contain any
+	* pre/post-processing blocks and is pass-through.
+	*/
+	int32_t rx_port_id;
+	/**<
+	* RX device port ID which vocproc will connect to.
+	* VSS_IVOCPROC_PORT_ID_NONE means vocproc will not connect to any port.
+	*/
+	uint32_t rx_topology_id;
+	/**<
+	* RX leg topology ID.
+	* VSS_IVOCPROC_TOPOLOGY_ID_NONE means vocproc does not contain any
+	* pre/post-processing blocks and is pass-through.
+	*/
+} __packed;
+
+struct vss_ivocproc_cmd_register_calibration_data_t {
+	uint32_t phys_addr;
+	/* Phsical address to be registered with vocproc. Calibration data
+	 *  is stored at this address.
+	 */
+	uint32_t mem_size;
+	/* Size of the calibration data in bytes. */
+} __packed;
+
+struct vss_ivocproc_cmd_register_volume_cal_table_t {
+	uint32_t phys_addr;
+	/* Phsical address to be registered with the vocproc. The volume
+	 *  calibration table is stored at this location.
+	 */
+
+	uint32_t mem_size;
+	/* Size of the volume calibration table in bytes. */
+} __packed;
+
+struct vss_ivocproc_cmd_set_mute_t {
+	uint16_t direction;
+	/*
+	* 0 : TX only.
+	* 1 : RX only.
+	* 2 : TX and Rx.
+	*/
+	uint16_t mute_flag;
+	/*
+	* Mute, un-mute.
+	*
+	* 0 : Disable.
+	* 1 : Enable.
+	*/
+} __packed;
+
+struct cvp_create_full_ctl_session_cmd {
+	struct apr_hdr hdr;
+	struct vss_ivocproc_cmd_create_full_control_session_t cvp_session;
+} __packed;
+
+struct cvp_command {
+	struct apr_hdr hdr;
+} __packed;
+
+struct cvp_set_device_cmd {
+	struct apr_hdr hdr;
+	struct vss_ivocproc_cmd_set_device_t cvp_set_device;
+} __packed;
+
+struct cvp_set_vp3_data_cmd {
+	struct apr_hdr hdr;
+} __packed;
+
+struct cvp_set_rx_volume_index_cmd {
+	struct apr_hdr hdr;
+	struct vss_ivocproc_cmd_set_volume_index_t cvp_set_vol_idx;
+} __packed;
+
+struct cvp_register_cal_data_cmd {
+	struct apr_hdr hdr;
+	struct vss_ivocproc_cmd_register_calibration_data_t cvp_cal_data;
+} __packed;
+
+struct cvp_deregister_cal_data_cmd {
+	struct apr_hdr hdr;
+} __packed;
+
+struct cvp_register_vol_cal_table_cmd {
+	struct apr_hdr hdr;
+	struct vss_ivocproc_cmd_register_volume_cal_table_t cvp_vol_cal_tbl;
+} __packed;
+
+struct cvp_deregister_vol_cal_table_cmd {
+	struct apr_hdr hdr;
+} __packed;
+
+struct cvp_set_mute_cmd {
+	struct apr_hdr hdr;
+	struct vss_ivocproc_cmd_set_mute_t cvp_set_mute;
+} __packed;
+
+/* CB for up-link packets. */
+typedef void (*ul_cb_fn)(uint8_t *voc_pkt,
+			 uint32_t pkt_len,
+			 void *private_data);
+
+/* CB for down-link packets. */
+typedef void (*dl_cb_fn)(uint8_t *voc_pkt,
+			 uint32_t *pkt_len,
+			 void *private_data);
+
+
+struct mvs_driver_info {
+	uint32_t media_type;
+	uint32_t rate;
+	uint32_t network_type;
+	uint32_t dtx_mode;
+	ul_cb_fn ul_cb;
+	dl_cb_fn dl_cb;
+	void *private_data;
+};
+
+struct incall_rec_info {
+	uint32_t rec_enable;
+	uint32_t rec_mode;
+	uint32_t recording;
+};
+
+struct incall_music_info {
+	uint32_t play_enable;
+	uint32_t playing;
+	int count;
+	int force;
+};
+
+struct voice_data {
+	int voc_state;/*INIT, CHANGE, RELEASE, RUN */
+
+	wait_queue_head_t mvm_wait;
+	wait_queue_head_t cvs_wait;
+	wait_queue_head_t cvp_wait;
+
+	/* cache the values related to Rx and Tx */
+	struct device_data dev_rx;
+	struct device_data dev_tx;
+
+	u32 mvm_state;
+	u32 cvs_state;
+	u32 cvp_state;
+
+	/* Handle to MVM in the Q6 */
+	u16 mvm_handle;
+	/* Handle to CVS in the Q6 */
+	u16 cvs_handle;
+	/* Handle to CVP in the Q6 */
+	u16 cvp_handle;
+
+	struct mutex lock;
+
+	uint16_t sidetone_gain;
+	uint8_t tty_mode;
+	/* widevoice enable value */
+	uint8_t wv_enable;
+	/* slowtalk enable value */
+	uint32_t st_enable;
+	/* FENC enable value */
+	uint32_t fens_enable;
+
+	struct voice_dev_route_state voc_route_state;
+
+	u16 session_id;
+
+	struct incall_rec_info rec_info;
+
+	struct incall_music_info music_info;
+
+	struct voice_rec_route_state rec_route_state;
+};
+
+struct cal_mem {
+	struct ion_handle *handle;
+	uint32_t phy;
+	void *buf;
+};
+
+#define MAX_VOC_SESSIONS 3
+#define SESSION_ID_BASE 0xFFF0
+
+struct common_data {
+	/* these default values are for all devices */
+	uint32_t default_mute_val;
+	uint32_t default_vol_val;
+	uint32_t default_sample_val;
+
+	/* APR to MVM in the Q6 */
+	void *apr_q6_mvm;
+	/* APR to CVS in the Q6 */
+	void *apr_q6_cvs;
+	/* APR to CVP in the Q6 */
+	void *apr_q6_cvp;
+
+	struct ion_client *client;
+	struct cal_mem cvp_cal;
+	struct cal_mem cvs_cal;
+
+	struct mutex common_lock;
+
+	struct mvs_driver_info mvs_info;
+
+	struct voice_data voice[MAX_VOC_SESSIONS];
+};
+
+void voc_register_mvs_cb(ul_cb_fn ul_cb,
+			dl_cb_fn dl_cb,
+			void *private_data);
+
+void voc_config_vocoder(uint32_t media_type,
+			uint32_t rate,
+			uint32_t network_type,
+			uint32_t dtx_mode);
+
+enum {
+	DEV_RX = 0,
+	DEV_TX,
+};
+
+enum {
+	RX_PATH = 0,
+	TX_PATH,
+};
+
+/* called  by alsa driver */
+int voc_set_pp_enable(uint16_t session_id, uint32_t module_id, uint32_t enable);
+int voc_get_pp_enable(uint16_t session_id, uint32_t module_id);
+int voc_set_widevoice_enable(uint16_t session_id, uint32_t wv_enable);
+uint32_t voc_get_widevoice_enable(uint16_t session_id);
+uint8_t voc_get_tty_mode(uint16_t session_id);
+int voc_set_tty_mode(uint16_t session_id, uint8_t tty_mode);
+int voc_start_voice_call(uint16_t session_id);
+int voc_end_voice_call(uint16_t session_id);
+int voc_set_rxtx_port(uint16_t session_id,
+		      uint32_t dev_port_id,
+		      uint32_t dev_type);
+int voc_set_rx_vol_index(uint16_t session_id, uint32_t dir, uint32_t voc_idx);
+int voc_set_tx_mute(uint16_t session_id, uint32_t dir, uint32_t mute);
+int voc_set_rx_device_mute(uint16_t session_id, uint32_t mute);
+int voc_get_rx_device_mute(uint16_t session_id);
+int voc_disable_cvp(uint16_t session_id);
+int voc_enable_cvp(uint16_t session_id);
+int voc_set_route_flag(uint16_t session_id, uint8_t path_dir, uint8_t set);
+uint8_t voc_get_route_flag(uint16_t session_id, uint8_t path_dir);
+
+#define VOICE_SESSION_NAME "Voice session"
+#define VOIP_SESSION_NAME "VoIP session"
+#define VOLTE_SESSION_NAME "VoLTE session"
+uint16_t voc_get_session_id(char *name);
+
+int voc_start_playback(uint32_t set);
+int voc_start_record(uint32_t port_id, uint32_t set);
+#endif
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index fd3fe6a..65cee5d 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -469,7 +469,10 @@
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 		snd_soc_dai_digital_mute(codec_dai, 1);
 
-	if (cpu_dai->driver->ops->shutdown) {
+	if (cpu_dai->driver->ops->shutdown)
+		cpu_dai->driver->ops->shutdown(substream, cpu_dai);
+
+	if (codec_dai->driver->ops->shutdown) {
 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
 			codec_dai->driver->ops->shutdown(substream, codec_dai);
 		} else {
@@ -479,9 +482,6 @@
 		}
 	}
 
-	if (codec_dai->driver->ops->shutdown)
-		codec_dai->driver->ops->shutdown(substream, codec_dai);
-
 	if (rtd->dai_link->ops && rtd->dai_link->ops->shutdown)
 		rtd->dai_link->ops->shutdown(substream);
 
diff --git a/tools/perf/builtin-periodic.c b/tools/perf/builtin-periodic.c
index 060e909..a0785c2 100644
--- a/tools/perf/builtin-periodic.c
+++ b/tools/perf/builtin-periodic.c
@@ -276,7 +276,7 @@
 static const struct option options[] = {
 	OPT_CALLBACK('e', "event", &evsel_list, "event",
 	"event selector. use 'perf list' to list available events",
-	 parse_events),
+	 parse_events_option),
 	OPT_STRING('m', "math-operations", &operations, "nnnnnn",
 	"math operation to perform on values collected asmd in order"),
 	OPT_STRING('r', "readpipe", &rfifo_name, "xxbadFiFo",