Merge "power_supply: add change the scope property" into msm-3.4
diff --git a/Documentation/block/test-iosched.txt b/Documentation/block/test-iosched.txt
new file mode 100644
index 0000000..75d8134
--- /dev/null
+++ b/Documentation/block/test-iosched.txt
@@ -0,0 +1,39 @@
+Test IO scheduler
+==================
+
+The test scheduler allows testing a block device by dispatching
+specific requests according to the test case and declare PASS/FAIL
+according to the requests completion error code.
+
+The test IO scheduler implements the no-op scheduler operations, and uses
+them in order to dispatch the non-test requests when no test is running.
+This will allow to keep a normal FS operation in parallel to the test
+capability.
+The test IO scheduler keeps two different queues, one for real-world requests
+(inserted by the FS) and the other for the test requests.
+The test IO scheduler chooses the queue for dispatch requests according to the
+test state (IDLE/RUNNING).
+
+the test IO scheduler is compiled by default as a dynamic module and enabled
+only if CONFIG_DEBUG_FS is defined.
+
+Each block device test utility that would like to use the test-iosched test
+services, should register as a blk_dev_test_type and supply an init and exit
+callbacks. Those callback are called upon selection (or removal) of the
+test-iosched as the active scheduler. From that point the block device test
+can start a test and supply its own callbacks for preparing, running, result
+checking and cleanup of the test.
+
+Each test is exposed via debugfs and can be triggered by writing to
+the debugfs file. In order to add a new test one should expose a new debugfs
+file for the new test.
+
+Selecting IO schedulers
+-----------------------
+Refer to Documentation/block/switching-sched.txt for information on
+selecting an io scheduler on a per-device basis.
+
+
+May 10 2012, maya Erez <merez@codeaurora.org>
+
+
diff --git a/Documentation/devicetree/bindings/arm/msm/lpm-resources.txt b/Documentation/devicetree/bindings/arm/msm/lpm-resources.txt
new file mode 100644
index 0000000..9ff43a1
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/lpm-resources.txt
@@ -0,0 +1,31 @@
+* Low Power Management Resources
+
+The application processor in the MSM can enter several different low power
+states depending on the sleep time and on the required system resources. The
+MSM cannot enter a given low power state if that state involves turning off
+some shared resources which are required by some components of the
+system.The lpm-resources device tree node represents the shared resources
+that need to be monitored for usage requirement to check if a given low power
+state can be entered.Each resource is identified by a combination of the name,
+id,type and key which is also used by the RPM to identify a shared resource.
+
+The required nodes for lpm-resources are:
+
+- compatible: "qcom,lpm-resources"
+- reg: The numeric level id
+- qcom,name: The name of the low power resource.
+- qcom,type: The string represeting the type of resource used
+             like smps or pxo.
+- qcom,id: The id representing a device within a resource type.
+- qcom,key: The key is the specific attribute of the resource being
+            monitored.
+
+Example:
+            qcom,lpm-resources@0 {
+                        reg = <0x0>;
+                        qcom,name = "vdd-dig";
+                        qcom,type = "smpb\0";
+                        qcom,id = <0x02>;
+                        qcom,key = "uv\0\0";
+                };
+
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt b/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt
new file mode 100644
index 0000000..9f0c922
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt
@@ -0,0 +1,33 @@
+* Qualcomm MSM Watchdog
+
+Watchdog timer is configured with a bark and a bite time.
+if the watchdog is not "pet" at regular intervals, the system
+is assumed to have become non responsive and needs to be reset.
+A warning in the form of a bark timeout leads to a bark interrupt
+and a kernel panic. if the watchdog timer is still not reset,
+a bite timeout occurs, which is an interrupt in the secure mode,
+which leads to a reset of the SOC via the secure watchdog. The
+driver needs the petting time, and the bark timeout to be programmed
+into the watchdog, as well as the bark and bite irqs.
+
+The device tree parameters for the watchdog are:
+
+Required parameters:
+
+- compatible : "qcom,msm-watchdog"
+- reg : offset and length of the register set for the watchdog block.
+- interrupts : should contain bark and bite irq numbers
+- qcom,pet-time : Non zero time interval at which watchdog should be pet in ms.
+- qcom,bark-time : Non zero timeout value for a watchdog bark in ms.
+- qcom,ipi-ping : send keep alive ping to other cpus if set to 1 else set to 0.
+
+Example:
+
+	qcom,wdt@f9017000 {
+		compatible = "qcom,msm-watchdog";
+		reg = <0xf9017000 0x1000>;
+		interrupts = <0 3 0 0 4 0>;
+		qcom,bark-time = <11000>;
+		qcom,pet-time = <10000>;
+		qcom,ipi-ping = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/gpio/qpnp-gpio.txt b/Documentation/devicetree/bindings/gpio/qpnp-gpio.txt
deleted file mode 100644
index 7cab09b..0000000
--- a/Documentation/devicetree/bindings/gpio/qpnp-gpio.txt
+++ /dev/null
@@ -1,143 +0,0 @@
-* msm-qpnp-gpio
-
-msm-qpnp-gpio is a GPIO chip driver for the MSM SPMI implementation.
-It creates a spmi_device for every spmi-dev-container block of device_nodes.
-These device_nodes contained within specify the PMIC GPIO number associated
-with each GPIO chip. The driver will map these to Linux GPIO numbers.
-
-[PMIC GPIO Device Declarations]
-
--Root Node-
-
-Required properties :
- - spmi-dev-container : Used to specify the following child nodes as part of the
-   same SPMI device.
- - gpio-controller : Specify as gpio-contoller. All child nodes will belong to this
-   gpio_chip.
- - #gpio-cells: We encode a PMIC GPIO number and a 32-bit flag field to
-   specify the gpio configuration. This must be set to '2'.
- - #address-cells: Specify one address field. This must be set to '1'.
- - #size-cells: Specify one size-cell. This must be set to '1'.
- - compatible = "qcom,qpnp-gpio" : Specify driver matching for this driver.
-
--Child Nodes-
-
-Required properties :
- - reg : Specify the spmi offset and size for this gpio device.
- - qcom,gpio-num : Specify the PMIC GPIO number for this gpio device.
-
-Optional configuration properties :
- -  qcom,direction:	indicates whether the gpio should be input, output, or
-			both.
-			QPNP_GPIO_DIR_IN   = 0,
-			QPNP_GPIO_DIR_OUT  = 1,
-			QPNP_GPIO_DIR_BOTH = 2
-
- - qcom,output-type:	indicates gpio should be configured as CMOS or open
-			drain.
-			QPNP_GPIO_OUT_BUF_CMOS = 0
-			QPNP_GPIO_OUT_BUF_OPEN_DRAIN_NMOS = 1,
-			QPNP_GPIO_OUT_BUF_OPEN_DRAIN_PMOS = 2,
-
- - qcom,invert:		Invert the signal of the gpio line -
-			QPNP_GPIO_INVERT_DISABLE = 0
-			QPNP_GPIO_INVERT_ENABLE = 1
-
- - qcom,pull:		Indicates whether a pull up or pull down should be
-			applied. If a pullup is required the current strength
-			needs to be specified. Current values of 30uA, 1.5uA,
-			31.5uA, 1.5uA with 30uA boost are supported.
-			QPNP_GPIO_PULL_UP_30	 = 0,
-			QPNP_GPIO_PULL_UP_1P5	 = 1,
-			QPNP_GPIO_PULL_UP_31P5	 = 2,
-			QPNP_GPIO_PULL_UP_1P5_30 = 3,
-			QPNP_GPIO_PULL_DN	 = 4,
-			QPNP_GPIO_PULL_NO	 = 5
-
-  - qcom,vin-sel:	specifies the voltage level when the output is set to 1.
-			For an input gpio specifies the voltage level at which
-			the input is interpreted as a logical 1.
-			QPNP_GPIO_VIN0 = 0,
-			QPNP_GPIO_VIN1 = 1,
-			QPNP_GPIO_VIN2 = 2,
-			QPNP_GPIO_VIN3 = 3,
-			QPNP_GPIO_VIN4 = 4,
-			QPNP_GPIO_VIN5 = 5,
-			QPNP_GPIO_VIN6 = 6,
-			QPNP_GPIO_VIN7 = 7
-
-  - qcom,out-strength:	the amount of current supplied for an output gpio.
-			QPNP_GPIO_OUT_STRENGTH_LOW  = 1
-			QPNP_GPIO_OUT_STRENGTH_MED  = 2,
-			QPNP_GPIO_OUT_STRENGTH_HIGH = 3,
-
-  - qcom,source-sel:	choose alternate function for the gpio. Certain gpios
-			can be paired (shorted) with each other. Some gpio pin
-			can act as alternate functions.
-			QPNP_GPIO_FUNC_NORMAL   = 0,
-			QPNP_GPIO_FUNC_PAIRED   = 1
-			QPNP_GPIO_FUNC_1	= 2,
-			QPNP_GPIO_FUNC_2	= 3,
-			QPNP_GPIO_DTEST1	= 4,
-			QPNP_GPIO_DTEST2	= 5,
-			QPNP_GPIO_DTEST3	= 6,
-			QPNP_GPIO_DTEST4	= 7
-
- - qcom,master-en:	1 = Enable features within the
-			GPIO block based on configurations.
-			0 = Completely disable the GPIO
-			block and let the pin float with high impedance
-			regardless of other settings.
-
-*Note: If any of the configuration properties are not specified, then the
-       qpnp-gpio driver will not modify that respective configuration in
-       hardware.
-
-[PMIC GPIO clients]
-
-Required properties :
- - gpios : Contains 3 fields of the form <&gpio_controller pmic_gpio_num flags>
-
-[Example]
-
-qpnp: qcom,spmi@fc4c0000 {
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupt-controller;
-		#interrupt-cells = <3>;
-
-		qcom,pm8941@0 {
-			spmi-slave-container;
-			reg = <0x0>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-
-			pm8941_gpios: gpios {
-				spmi-dev-container;
-				compatible = "qcom,qpnp-gpio";
-				gpio-controller;
-				#gpio-cells = <2>;
-				#address-cells = <1>;
-				#size-cells = <1>;
-
-				gpio@c000 {
-					reg = <0xc000 0x100>;
-					qcom,gpio-num = <62>;
-				};
-
-				gpio@c100 {
-					reg = <0xc100 0x100>;
-					qcom,gpio-num = <20>;
-					qcom,source_sel = <2>;
-					qcom,pull = <5>;
-				};
-			};
-
-			qcom,testgpio@1000 {
-				compatible = "qcom,qpnp-testgpio";
-				reg = <0x1000 0x1000>;
-				gpios = <&pm8941_gpios 62 0x0 &pm8941_gpios 20 0x1>;
-			};
-		};
-	};
-};
diff --git a/Documentation/devicetree/bindings/gpio/qpnp-pin.txt b/Documentation/devicetree/bindings/gpio/qpnp-pin.txt
new file mode 100644
index 0000000..c58e073
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/qpnp-pin.txt
@@ -0,0 +1,198 @@
+* msm-qpnp-pin
+
+msm-qpnp-pin is a GPIO chip driver for the MSM SPMI implementation.
+It creates a spmi_device for every spmi-dev-container block of device_nodes.
+These device_nodes contained within specify the PMIC pin number associated
+with each gpio chip. The driver will map these to Linux GPIO numbers.
+
+[PMIC GPIO Device Declarations]
+
+-Root Node-
+
+Required properties :
+ - spmi-dev-container : Used to specify the following child nodes as part of the
+   same SPMI device.
+ - gpio-controller : Specify as gpio-contoller. All child nodes will belong to
+   this gpio_chip.
+ - #gpio-cells: We encode a PMIC pin number and a 32-bit flag field to
+   specify the gpio configuration. This must be set to '2'.
+ - #address-cells: Specify one address field. This must be set to '1'.
+ - #size-cells: Specify one size-cell. This must be set to '1'.
+ - compatible = "qcom,qpnp-pin" : Specify driver matching for this driver.
+ - label: String giving the name for the gpio_chip device. This name
+   should be unique on the system and portray the specifics of the device.
+
+-Child Nodes-
+
+Required properties :
+ - reg : Specify the spmi offset and size for this pin device.
+ - qcom,pin-num : Specify the PMIC pin number for this device.
+
+Optional configuration properties :
+ -  qcom,mode:		indicates whether the pin should be input, output, or
+			both for gpios. mpp pins also support bidirectional,
+			analog in, analog out and current sink.
+			QPNP_PIN_MODE_DIG_IN	 = 0, (GPIO/MPP)
+			QPNP_PIN_MODE_DIG_OUT	 = 1, (GPIO/MPP)
+			QPNP_PIN_MODE_DIG_IN_OUT = 2, (GPIO/MPP)
+			QPNP_PIN_MODE_BIDIR	 = 3, (MPP)
+			QPNP_PIN_MODE_AIN	 = 4, (MPP)
+			QPNP_PIN_MODE_AOUT	 = 5, (MPP)
+			QPNP_PIN_MODE_SINK	 = 6  (MPP)
+
+ - qcom,output-type:	indicates gpio should be configured as CMOS or open
+			drain.
+			QPNP_PIN_OUT_BUF_CMOS		 = 0, (GPIO)
+			QPNP_PIN_OUT_BUF_OPEN_DRAIN_NMOS = 1, (GPIO)
+			QPNP_PIN_OUT_BUF_OPEN_DRAIN_PMOS = 2  (GPIO)
+
+ - qcom,invert:		Invert the signal of the gpio line -
+			QPNP_PIN_INVERT_DISABLE = 0 (GPIO/MPP)
+			QPNP_PIN_INVERT_ENABLE	= 1 (GPIO/MPP)
+
+ - qcom,pull:		This parameter should be programmed to different values
+			depending on whether it's GPIO or MPP.
+			For GPIO, it indicates whether a pull up or pull down
+			should be applied. If a pullup is required the
+			current strength needs to be specified.
+			Current values of 30uA, 1.5uA, 31.5uA, 1.5uA with 30uA
+			boost are supported. This value should be one of
+			the QPNP_PIN_GPIO_PULL_*. Note that the hardware ignores
+			this configuration if the GPIO is not set to input or
+			output open-drain mode.
+			QPNP_PIN_PULL_UP_30	 = 0, (GPIO)
+			QPNP_PIN_PULL_UP_1P5	 = 1, (GPIO)
+			QPNP_PIN_PULL_UP_31P5	 = 2, (GPIO)
+			QPNP_PIN_PULL_UP_1P5_30  = 3, (GPIO)
+			QPNP_PIN_PULL_DN	 = 4, (GPIO)
+			QPNP_PIN_PULL_NO	 = 5  (GPIO)
+
+			For MPP, it indicates whether a pullup should be
+			applied for bidirectitional mode only. The hardware
+			ignores the configuration when operating in other modes.
+			This value should be one of the QPNP_PIN_MPP_PULL_*.
+
+			QPNP_PIN_MPP_PULL_UP_0P6KOHM = 0, (MPP)
+			QPNP_PIN_MPP_PULL_UP_OPEN    = 1  (MPP)
+			QPNP_PIN_MPP_PULL_UP_10KOHM  = 2, (MPP)
+			QPNP_PIN_MPP_PULL_UP_30KOHM  = 3, (MPP)
+
+  - qcom,vin-sel:	specifies the voltage level when the output is set to 1.
+			For an input gpio specifies the voltage level at which
+			the input is interpreted as a logical 1.
+			QPNP_PIN_VIN0 = 0, (GPIO/MPP)
+			QPNP_PIN_VIN1 = 1, (GPIO/MPP)
+			QPNP_PIN_VIN2 = 2, (GPIO/MPP)
+			QPNP_PIN_VIN3 = 3, (GPIO/MPP)
+			QPNP_PIN_VIN4 = 4, (GPIO/MPP)
+			QPNP_PIN_VIN5 = 5, (GPIO/MPP)
+			QPNP_PIN_VIN6 = 6, (GPIO/MPP)
+			QPNP_PIN_VIN7 = 7  (GPIO/MPP)
+
+  - qcom,out-strength:	the amount of current supplied for an output gpio.
+			QPNP_PIN_OUT_STRENGTH_LOW  = 1  (GPIO)
+			QPNP_PIN_OUT_STRENGTH_MED  = 2, (GPIO)
+			QPNP_PIN_OUT_STRENGTH_HIGH = 3, (GPIO)
+
+  - qcom,select:	select a function for the pin. Certain pins
+			can be paired (shorted) with each other. Some gpio pins
+			can act as alternate functions.
+			In the context of gpio, this acts as a source select.
+			For mpps, this is an enable select.
+			QPNP_PIN_SEL_FUNC_CONSTANT = 0, (GPIO/MPP)
+			QPNP_PIN_SEL_FUNC_PAIRED   = 1, (GPIO/MPP)
+			QPNP_PIN_SEL_FUNC_1	   = 2, (GPIO/MPP)
+			QPNP_PIN_SEL_FUNC_2	   = 3, (GPIO/MPP)
+			QPNP_PIN_SEL_DTEST1	   = 4, (GPIO/MPP)
+			QPNP_PIN_SEL_DTEST2	   = 5, (GPIO/MPP)
+			QPNP_PIN_SEL_DTEST3	   = 6, (GPIO/MPP)
+			QPNP_PIN_SEL_DTEST4	   = 7  (GPIO/MPP)
+
+ - qcom,master-en:	1 = Enable features within the
+			pin block based on configurations. (GPIO/MPP)
+			0 = Completely disable the block and
+			let the pin float with high impedance
+			regardless of other settings. (GPIO/MPP)
+ - qcom,aout-ref:	set the analog output reference.
+
+			QPNP_PIN_AOUT_1V25    = 0, (MPP)
+			QPNP_PIN_AOUT_0V625   = 1, (MPP)
+			QPNP_PIN_AOUT_0V3125  = 2, (MPP)
+			QPNP_PIN_AOUT_MPP     = 3, (MPP)
+			QPNP_PIN_AOUT_ABUS1   = 4, (MPP)
+			QPNP_PIN_AOUT_ABUS2   = 5, (MPP)
+			QPNP_PIN_AOUT_ABUS3   = 6, (MPP)
+			QPNP_PIN_AOUT_ABUS4   = 7  (MPP)
+
+ - qcom,ain-route:	Set the destination for analog input.
+			QPNP_PIN_AIN_AMUX_CH5   = 0, (MPP)
+			QPNP_PIN_AIN_AMUX_CH6   = 1, (MPP)
+			QPNP_PIN_AIN_AMUX_CH7   = 2, (MPP)
+			QPNP_PIN_AIN_AMUX_CH8   = 3, (MPP)
+			QPNP_PIN_AIN_AMUX_ABUS1 = 4, (MPP)
+			QPNP_PIN_AIN_AMUX_ABUS2 = 5, (MPP)
+			QPNP_PIN_AIN_AMUX_ABUS3 = 6, (MPP)
+			QPNP_PIN_AIN_AMUX_ABUS4 = 7  (MPP)
+
+ - qcom,cs-out:		Set the the amount of output to sync in mA.
+			QPNP_PIN_CS_OUT_5MA  = 0, (MPP)
+			QPNP_PIN_CS_OUT_10MA = 1, (MPP)
+			QPNP_PIN_CS_OUT_15MA = 2, (MPP)
+			QPNP_PIN_CS_OUT_20MA = 3, (MPP)
+			QPNP_PIN_CS_OUT_25MA = 4, (MPP)
+			QPNP_PIN_CS_OUT_30MA = 5, (MPP)
+			QPNP_PIN_CS_OUT_35MA = 6, (MPP)
+			QPNP_PIN_CS_OUT_40MA = 7  (MPP)
+
+*Note: If any of the configuration properties are not specified, then the
+       qpnp-pin driver will not modify that respective configuration in
+       hardware.
+
+[PMIC GPIO clients]
+
+Required properties :
+ - gpios : Contains 3 fields of the form <&gpio_controller pmic_pin_num flags>
+
+[Example]
+
+qpnp: qcom,spmi@fc4c0000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		interrupt-controller;
+		#interrupt-cells = <3>;
+
+		qcom,pm8941@0 {
+			spmi-slave-container;
+			reg = <0x0>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			pm8941_gpios: gpios {
+				spmi-dev-container;
+				compatible = "qcom,qpnp-pin";
+				gpio-controller;
+				#gpio-cells = <2>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				gpio@c000 {
+					reg = <0xc000 0x100>;
+					qcom,pin-num = <62>;
+				};
+
+				gpio@c100 {
+					reg = <0xc100 0x100>;
+					qcom,pin-num = <20>;
+					qcom,source_sel = <2>;
+					qcom,pull = <5>;
+				};
+			};
+
+			qcom,testgpio@1000 {
+				compatible = "qcom,qpnp-testgpio";
+				reg = <0x1000 0x1000>;
+				gpios = <&pm8941_gpios 62 0x0 &pm8941_gpios 20 0x1>;
+			};
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/spmi/msm-spmi.txt b/Documentation/devicetree/bindings/spmi/msm-spmi.txt
index d50037f..5e43ea6 100644
--- a/Documentation/devicetree/bindings/spmi/msm-spmi.txt
+++ b/Documentation/devicetree/bindings/spmi/msm-spmi.txt
@@ -36,6 +36,10 @@
    number of interrupts.
  - interrupt-parent : the phandle for the interrupt controller that
    services interrupts for this device.
+ - reg-names : a list of strings that map in order to the list of addresses
+   specified above in the 'reg' property.
+ - interrupt-names : a list of strings that map in order to the list of
+   interrupts specified in the 'interrupts' property.
 
 [Second Level Nodes]
 
@@ -60,6 +64,10 @@
  - spmi-dev-container: This specifies that all the device nodes specified for
    this slave_id should have their resources coalesced into only one
    spmi_device.
+ - reg-names : a list of strings that map in order to the list of addresses
+   specified above in the 'reg' property.
+ - interrupt-names : a list of strings that map in order to the list of
+   interrupts specified in the 'interrupts' property.
 
 [Third Level Nodes]
 
@@ -79,7 +87,14 @@
    number of interrupts.
  - interrupt-parent : the phandle for the interrupt controller that
    services interrupts for this device.
-
+ - reg-names : a list of strings that map in order to the list of addresses
+   specified above in the 'reg' property.
+ - interrupt-names : a list of strings that map in order to the list of
+   interrupts specified in the 'interrupts' property.
+ - label: A single name that names the device. This name can be looked up
+   with spmi_get_node_byname(). This is mostly useful in spmi-dev-container
+   configurations where multiple device_nodes are associated with one spmi
+   device.
 Notes :
  - It is considered an error to include spmi-slave-dev at this level.
 
@@ -97,7 +112,7 @@
 			compatible = "qcom,qpnp-testint";
 			reg = <0xf>;
 			interrupts = <0x3 0x15 0x0 0x3 0x15 0x02 0x1 0x47 0x0>;
-
+			interrupt-names = "testint_0", "testint_1", "testint_err";
 		};
 
 		pm8941@0 {
@@ -108,22 +123,23 @@
 
 			pm8941_gpios: gpios {
 				spmi-dev-container;
-				compatible = "qcom,qpnp-gpio";
+				compatible = "qcom,qpnp-pin";
 				gpio-controller;
 				#gpio-cells = <1>;
 				#address-cells = <1>;
 				#size-cells = <1>;
 
 				pm8941_gpio1@0xc000 {
-					compatible = "qcom,qpnp-gpio";
+					compatible = "qcom,qpnp-pin";
 					reg = <0xc000 0x100>;
 					qcom,qpnp_gpio = <1>;
 					interrupt-parent = <&qpnp>;
 					interrupts = <0x3 0x15 0x02 0x1 0x47 0x0>;
+					label = "foo-dev";
 				};
 
 				pm8941_gpio2@0xc100 {
-					compatible = "qcom,qpnp-gpio";
+					compatible = "qcom,qpnp-pin";
 					reg = <0xc100 0x100>;
 					qcom,qpnp_gpio = <2>;
 					interrupt-parent = <&qpnp>;
@@ -133,7 +149,8 @@
 
 			testgpio@0x1000 {
 				compatible = "qcom,qpnp-testgpio";
-				reg = <0x1000 0x1000>;
+				reg = <0x1000 0x1000 0x2000 0x1000>;
+				reg-names = "foo", "bar";
 				qpnp-gpios = <&pm8941_gpios 0x0>;
 			};
 		};
@@ -143,7 +160,7 @@
 			#address-cells = <1>;
 			#size-cells = <1>;
 			spmi-dev-container;
-			compatible = "qcom,qpnp-gpio";
+			compatible = "qcom,qpnp-pin";
 
 			pm8841_gpio1@0xc000 {
 				reg = <0xc000 0x100>;
diff --git a/arch/arm/boot/dts/msm-pm8841.dtsi b/arch/arm/boot/dts/msm-pm8841.dtsi
index b157e95..a586a90 100644
--- a/arch/arm/boot/dts/msm-pm8841.dtsi
+++ b/arch/arm/boot/dts/msm-pm8841.dtsi
@@ -17,6 +17,47 @@
 		interrupt-controller;
 		#interrupt-cells = <3>;
 
+		qcom,pm8841@4 {
+			spmi-slave-container;
+			reg = <0x4>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			pm8841_mpps {
+				spmi-dev-container;
+				compatible = "qcom,qpnp-pin";
+				gpio-controller;
+				#gpio-cells = <2>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				label = "pm8841-mpp";
+
+				mpp@a000 {
+					reg = <0xa000 0x100>;
+					qcom,pin-num = <1>;
+					status = "disabled";
+				};
+
+				mpp@a100 {
+					reg = <0xa100 0x100>;
+					qcom,pin-num = <2>;
+					status = "disabled";
+				};
+
+				mpp@a200 {
+					reg = <0xa200 0x100>;
+					qcom,pin-num = <3>;
+					status = "disabled";
+				};
+
+				mpp@a300 {
+					reg = <0xa300 0x100>;
+					qcom,pin-num = <4>;
+					status = "disabled";
+				};
+			};
+		};
+
 		qcom,pm8841@5 {
 			spmi-slave-container;
 			reg = <0x5>;
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index e62dfbd..2714d9e 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -25,225 +25,284 @@
 
 			pm8941_gpios {
 				spmi-dev-container;
-				compatible = "qcom,qpnp-gpio";
+				compatible = "qcom,qpnp-pin";
 				gpio-controller;
 				#gpio-cells = <2>;
 				#address-cells = <1>;
 				#size-cells = <1>;
+				label = "pm8941-gpio";
 
 				gpio@c000 {
 					reg = <0xc000 0x100>;
-					qcom,gpio-num = <1>;
+					qcom,pin-num = <1>;
 					status = "disabled";
 				};
 
 				gpio@c100 {
 					reg = <0xc100 0x100>;
-					qcom,gpio-num = <2>;
+					qcom,pin-num = <2>;
 					status = "disabled";
 				};
 
 				gpio@c200 {
 					reg = <0xc200 0x100>;
-					qcom,gpio-num = <3>;
+					qcom,pin-num = <3>;
 					status = "disabled";
 				};
 
 				gpio@c300 {
 					reg = <0xc300 0x100>;
-					qcom,gpio-num = <4>;
+					qcom,pin-num = <4>;
 					status = "disabled";
 				};
 
 				gpio@c400 {
 					reg = <0xc400 0x100>;
-					qcom,gpio-num = <5>;
+					qcom,pin-num = <5>;
 					status = "disabled";
 				};
 
 				gpio@c500 {
 					reg = <0xc500 0x100>;
-					qcom,gpio-num = <6>;
+					qcom,pin-num = <6>;
 					status = "disabled";
 				};
 
 				gpio@c600 {
 					reg = <0xc600 0x100>;
-					qcom,gpio-num = <7>;
+					qcom,pin-num = <7>;
 					status = "disabled";
 				};
 
 				gpio@c700 {
 					reg = <0xc700 0x100>;
-					qcom,gpio-num = <8>;
+					qcom,pin-num = <8>;
 					status = "disabled";
 				};
 
 				gpio@c800 {
 					reg = <0xc800 0x100>;
-					qcom,gpio-num = <9>;
+					qcom,pin-num = <9>;
 					status = "disabled";
 				};
 
 				gpio@c900 {
 					reg = <0xc900 0x100>;
-					qcom,gpio-num = <10>;
+					qcom,pin-num = <10>;
 					status = "disabled";
 				};
 
 				gpio@ca00 {
 					reg = <0xca00 0x100>;
-					qcom,gpio-num = <11>;
+					qcom,pin-num = <11>;
 					status = "disabled";
 				};
 
 				gpio@cb00 {
 					reg = <0xcb00 0x100>;
-					qcom,gpio-num = <12>;
+					qcom,pin-num = <12>;
 					status = "disabled";
 				};
 
 				gpio@cc00 {
 					reg = <0xcc00 0x100>;
-					qcom,gpio-num = <13>;
+					qcom,pin-num = <13>;
 					status = "disabled";
 				};
 
 				gpio@cd00 {
 					reg = <0xcd00 0x100>;
-					qcom,gpio-num = <14>;
+					qcom,pin-num = <14>;
 					status = "disabled";
 				};
 
 				gpio@ce00 {
 					reg = <0xce00 0x100>;
-					qcom,gpio-num = <15>;
+					qcom,pin-num = <15>;
 					status = "disabled";
 				};
 
 				gpio@cf00 {
 					reg = <0xcf00 0x100>;
-					qcom,gpio-num = <16>;
+					qcom,pin-num = <16>;
 					status = "disabled";
 				};
 
 				gpio@d000 {
 					reg = <0xd000 0x100>;
-					qcom,gpio-num = <17>;
+					qcom,pin-num = <17>;
 					status = "disabled";
 				};
 
 				gpio@d100 {
 					reg = <0xd100 0x100>;
-					qcom,gpio-num = <18>;
+					qcom,pin-num = <18>;
 					status = "disabled";
 				};
 
 				gpio@d200 {
 					reg = <0xd200 0x100>;
-					qcom,gpio-num = <19>;
+					qcom,pin-num = <19>;
 					status = "disabled";
 				};
 
 				gpio@d300 {
 					reg = <0xd300 0x100>;
-					qcom,gpio-num = <20>;
+					qcom,pin-num = <20>;
 					status = "disabled";
 				};
 
 				gpio@d400 {
 					reg = <0xd400 0x100>;
-					qcom,gpio-num = <21>;
+					qcom,pin-num = <21>;
 					status = "disabled";
 				};
 
 				gpio@d500 {
 					reg = <0xd500 0x100>;
-					qcom,gpio-num = <22>;
+					qcom,pin-num = <22>;
 					status = "disabled";
 				};
 
 				gpio@d600 {
 					reg = <0xd600 0x100>;
-					qcom,gpio-num = <23>;
+					qcom,pin-num = <23>;
 					status = "disabled";
 				};
 
 				gpio@d700 {
 					reg = <0xd700 0x100>;
-					qcom,gpio-num = <24>;
+					qcom,pin-num = <24>;
 					status = "disabled";
 				};
 
 				gpio@d800 {
 					reg = <0xd800 0x100>;
-					qcom,gpio-num = <25>;
+					qcom,pin-num = <25>;
 					status = "disabled";
 				};
 
 				gpio@d900 {
 					reg = <0xd900 0x100>;
-					qcom,gpio-num = <26>;
+					qcom,pin-num = <26>;
 					status = "disabled";
 				};
 
 				gpio@da00 {
 					reg = <0xda00 0x100>;
-					qcom,gpio-num = <27>;
+					qcom,pin-num = <27>;
 					status = "disabled";
 				};
 
 				gpio@db00 {
 					reg = <0xdb00 0x100>;
-					qcom,gpio-num = <28>;
+					qcom,pin-num = <28>;
 					status = "disabled";
 				};
 
 				gpio@dc00 {
 					reg = <0xdc00 0x100>;
-					qcom,gpio-num = <29>;
+					qcom,pin-num = <29>;
 					status = "disabled";
 				};
 
 				gpio@dd00 {
 					reg = <0xdd00 0x100>;
-					qcom,gpio-num = <30>;
+					qcom,pin-num = <30>;
 					status = "disabled";
 				};
 
 				gpio@de00 {
 					reg = <0xde00 0x100>;
-					qcom,gpio-num = <31>;
+					qcom,pin-num = <31>;
 					status = "disabled";
 				};
 
 				gpio@df00 {
 					reg = <0xdf00 0x100>;
-					qcom,gpio-num = <32>;
+					qcom,pin-num = <32>;
 					status = "disabled";
 				};
 
 				gpio@e000 {
 					reg = <0xe000 0x100>;
-					qcom,gpio-num = <33>;
+					qcom,pin-num = <33>;
 					status = "disabled";
 				};
 
 				gpio@e100 {
 					reg = <0xe100 0x100>;
-					qcom,gpio-num = <34>;
+					qcom,pin-num = <34>;
 					status = "disabled";
 				};
 
 				gpio@e200 {
 					reg = <0xe200 0x100>;
-					qcom,gpio-num = <35>;
+					qcom,pin-num = <35>;
 					status = "disabled";
 				};
 
 				gpio@e300 {
 					reg = <0xe300 0x100>;
-					qcom,gpio-num = <36>;
+					qcom,pin-num = <36>;
+					status = "disabled";
+				};
+			};
+
+			pm8941_mpps {
+				spmi-dev-container;
+				compatible = "qcom,qpnp-pin";
+				gpio-controller;
+				#gpio-cells = <2>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				label = "pm8941-mpp";
+
+				mpp@a000 {
+					reg = <0xa000 0x100>;
+					qcom,pin-num = <1>;
+					status = "disabled";
+				};
+
+				mpp@a100 {
+					reg = <0xa100 0x100>;
+					qcom,pin-num = <2>;
+					status = "disabled";
+				};
+
+				mpp@a200 {
+					reg = <0xa200 0x100>;
+					qcom,pin-num = <3>;
+					status = "disabled";
+				};
+
+				mpp@a300 {
+					reg = <0xa300 0x100>;
+					qcom,pin-num = <4>;
+					status = "disabled";
+				};
+
+				mpp@a400 {
+					reg = <0xa400 0x100>;
+					qcom,pin-num = <5>;
+					status = "disabled";
+				};
+
+				mpp@a500 {
+					reg = <0xa500 0x100>;
+					qcom,pin-num = <6>;
+					status = "disabled";
+				};
+
+				mpp@a600 {
+					reg = <0xa600 0x100>;
+					qcom,pin-num = <7>;
+					status = "disabled";
+				};
+
+				mpp@a700 {
+					reg = <0xa700 0x100>;
+					qcom,pin-num = <8>;
 					status = "disabled";
 				};
 			};
diff --git a/arch/arm/boot/dts/msmcopper-gpio.dtsi b/arch/arm/boot/dts/msmcopper-gpio.dtsi
index 7c3f5ce..59ad8db 100644
--- a/arch/arm/boot/dts/msmcopper-gpio.dtsi
+++ b/arch/arm/boot/dts/msmcopper-gpio.dtsi
@@ -18,197 +18,218 @@
 			pm8941_gpios: pm8941_gpios {
 
 				gpio@c000 {
-					qcom,gpio-num = <1>;
 					status = "ok";
 				};
 
 				gpio@c100 {
-					qcom,gpio-num = <2>;
 					status = "ok";
 				};
 
 				gpio@c200 {
-					qcom,gpio-num = <3>;
 					status = "ok";
 				};
 
 				gpio@c300 {
-					qcom,gpio-num = <4>;
 					status = "ok";
 				};
 
 				gpio@c400 {
-					qcom,gpio-num = <5>;
 					status = "ok";
 				};
 
 				gpio@c500 {
-					qcom,gpio-num = <6>;
 					status = "ok";
 				};
 
 				gpio@c600 {
-					qcom,gpio-num = <7>;
 					status = "ok";
 				};
 
 				gpio@c700 {
-					qcom,gpio-num = <8>;
 					status = "ok";
 				};
 
 				gpio@c800 {
-					qcom,gpio-num = <9>;
 					status = "ok";
 				};
 
 				gpio@c900 {
-					qcom,gpio-num = <10>;
 					status = "ok";
 				};
 
 				gpio@ca00 {
-					qcom,gpio-num = <11>;
 					status = "ok";
 				};
 
 				gpio@cb00 {
-					qcom,gpio-num = <12>;
 					status = "ok";
 				};
 
 				gpio@cc00 {
-					qcom,gpio-num = <13>;
 					status = "ok";
 				};
 
 				gpio@cd00 {
-					qcom,gpio-num = <14>;
 					status = "ok";
 				};
 
 				gpio@ce00 {
-					qcom,gpio-num = <15>;
 					status = "ok";
 				};
 
 				gpio@cf00 {
-					qcom,gpio-num = <16>;
 					status = "ok";
 				};
 
 				gpio@d000 {
-					qcom,gpio-num = <17>;
 					status = "ok";
 				};
 
 				gpio@d100 {
-					qcom,gpio-num = <18>;
 					status = "ok";
 				};
 
 				gpio@d200 {
-					qcom,gpio-num = <19>;
 					status = "ok";
 				};
 
 				gpio@d300 {
-					qcom,gpio-num = <20>;
 					status = "ok";
 				};
 
 				gpio@d400 {
-					qcom,gpio-num = <21>;
 					status = "ok";
 				};
 
 				gpio@d500 {
-					qcom,gpio-num = <22>;
 					status = "ok";
 				};
 
 				gpio@d600 {
-					qcom,gpio-num = <23>;
 					status = "ok";
 				};
 
 				gpio@d700 {
-					qcom,gpio-num = <24>;
 					status = "ok";
 				};
 
 				gpio@d800 {
-					qcom,gpio-num = <25>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@d900 {
-					qcom,gpio-num = <26>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@da00 {
-					qcom,gpio-num = <27>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@db00 {
-					qcom,gpio-num = <28>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@dc00 {
-					qcom,gpio-num = <29>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@dd00 {
-					qcom,gpio-num = <30>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@de00 {
-					qcom,gpio-num = <31>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@df00 {
-					qcom,gpio-num = <32>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@e000 {
-					qcom,gpio-num = <33>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@e100 {
-					qcom,gpio-num = <34>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@e200 {
-					qcom,gpio-num = <35>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 
 				gpio@e300 {
-					qcom,gpio-num = <36>;
 					qcom,out-strength = <1>;
 					status = "ok";
 				};
 			};
+
+			pm8941_mpps: pm8941_mpps {
+
+				mpp@a000 {
+					status = "ok";
+				};
+
+				mpp@a100 {
+					status = "ok";
+				};
+
+				mpp@a200 {
+					status = "ok";
+				};
+
+				mpp@a300 {
+					status = "ok";
+				};
+
+				mpp@a400 {
+					status = "ok";
+				};
+
+				mpp@a500 {
+					status = "ok";
+				};
+
+				mpp@a600 {
+					status = "ok";
+				};
+
+				mpp@a700 {
+					status = "ok";
+				};
+			};
+		};
+
+		qcom,pm8841@4 {
+
+			pm8841_mpps: pm8841_mpps {
+
+				mpp@a000 {
+					status = "ok";
+				};
+
+				mpp@a100 {
+					status = "ok";
+				};
+
+				mpp@a200 {
+					status = "ok";
+				};
+
+				mpp@a300 {
+					status = "ok";
+				};
+			};
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/msmcopper-iommu.dtsi b/arch/arm/boot/dts/msmcopper-iommu.dtsi
index e0ce8ac..697136a 100644
--- a/arch/arm/boot/dts/msmcopper-iommu.dtsi
+++ b/arch/arm/boot/dts/msmcopper-iommu.dtsi
@@ -17,6 +17,7 @@
 		#size-cells = <1>;
 		ranges;
 		reg = <0xfda64000 0x10000>;
+		vdd-supply = <&gdsc_jpeg>;
 
 		qcom,iommu-ctx@fda6c000 {
 			reg = <0xfda6c000 0x1000>;
@@ -44,6 +45,7 @@
 		#size-cells = <1>;
 		ranges;
 		reg = <0xfd928000 0x10000>;
+		vdd-supply = <&gdsc_mdss>;
 
 		qcom,iommu-ctx@fd930000 {
 			reg = <0xfd930000 0x1000>;
@@ -65,6 +67,7 @@
 		#size-cells = <1>;
 		ranges;
 		reg = <0xfdc84000 0x10000>;
+		vdd-supply = <&gdsc_venus>;
 
 		qcom,iommu-ctx@fdc8c000 {
 			reg = <0xfdc8c000 0x1000>;
diff --git a/arch/arm/boot/dts/msmcopper.dtsi b/arch/arm/boot/dts/msmcopper.dtsi
index 77cbbe0..5fe99df 100644
--- a/arch/arm/boot/dts/msmcopper.dtsi
+++ b/arch/arm/boot/dts/msmcopper.dtsi
@@ -270,6 +270,10 @@
 
 	qcom,acpuclk@f9000000 {
 		compatible = "qcom,acpuclk-copper";
+		krait0-supply = <&krait0_vreg>;
+		krait1-supply = <&krait1_vreg>;
+		krait2-supply = <&krait2_vreg>;
+		krait3-supply = <&krait3_vreg>;
 	};
 
 	qcom,ssusb@F9200000 {
@@ -392,4 +396,27 @@
 	qcom,qseecom@fe806000 {
 		compatible = "qcom,qseecom";
 	};
+
+	qcom,mdss_mdp@fd900000 {
+		cell-index = <0>;
+		compatible = "qcom,mdss_mdp";
+		reg = <0xfd900000 0x22100>;
+		interrupts = <0 72 0>;
+	};
+
+	qcom,mdss_wb_panel {
+		cell-index = <1>;
+		compatible = "qcom,mdss_wb";
+		qcom,mdss_pan_res = <640 480>;
+		qcom,mdss_pan_bpp = <24>;
+	};
+
+	qcom,wdt@f9017000 {
+		compatible = "qcom,msm-watchdog";
+		reg = <0xf9017000 0x1000>;
+		interrupts = <0 3 0 0 4 0>;
+		qcom,bark-time = <11000>;
+		qcom,pet-time = <10000>;
+		qcom,ipi-ping = <1>;
+	};
 };
diff --git a/arch/arm/boot/dts/msmcopper_pm.dtsi b/arch/arm/boot/dts/msmcopper_pm.dtsi
index 79cb95c..6f12e31c 100644
--- a/arch/arm/boot/dts/msmcopper_pm.dtsi
+++ b/arch/arm/boot/dts/msmcopper_pm.dtsi
@@ -132,6 +132,36 @@
 				3b 60 02 32 a0 50 0f];
 	};
 
+	qcom,lpm-resources {
+		compatible = "qcom,lpm-resources";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,lpm-resources@0 {
+			reg = <0x0>;
+			qcom,name = "vdd-dig";
+			qcom,type = "smpb\0";
+			qcom,id = <0x02>;
+			qcom,key = "uv\0\0";
+		};
+
+		qcom,lpm-resources@1 {
+			reg = <0x1>;
+			qcom,name = "vdd-mem";
+			qcom,type = "smpb\0";
+			qcom,id = <0x01>;
+			qcom,key = "uv\0\0";
+		};
+
+		qcom,lpm-resources@2 {
+			reg = <0x2>;
+			qcom,name = "pxo";
+			qcom,type = "clk0\0";
+			qcom,id = <0x00>;
+			qcom,key = "Enab";
+		};
+	};
+
 	qcom,lpm-levels {
 		compatible = "qcom,lpm-levels";
 		#address-cells = <1>;
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index b14ecf8..29d01f3 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -64,9 +64,7 @@
 	u32 __percpu *saved_ppi_enable;
 	u32 __percpu *saved_ppi_conf;
 #endif
-#ifdef CONFIG_IRQ_DOMAIN
-	struct irq_domain domain;
-#endif
+	struct irq_domain *domain;
 	unsigned int gic_irqs;
 #ifdef CONFIG_GIC_NON_BANKED
 	void __iomem *(*get_base)(union gic_base *);
@@ -447,7 +445,7 @@
 		irqnr = irqstat & ~0x1c00;
 
 		if (likely(irqnr > 15 && irqnr < 1021)) {
-			irqnr = irq_domain_to_irq(&gic->domain, irqnr);
+			irqnr = irq_find_mapping(gic->domain, irqnr);
 			handle_IRQ(irqnr, regs);
 			continue;
 		}
@@ -485,8 +483,8 @@
 	if (gic_irq == 1023)
 		goto out;
 
-	cascade_irq = irq_domain_to_irq(&chip_data->domain, gic_irq);
-	if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS))
+	cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
+	if (unlikely(gic_irq < 32 || gic_irq > 1020))
 		do_bad_IRQ(cascade_irq, desc);
 	else
 		generic_handle_irq(cascade_irq);
@@ -520,10 +518,9 @@
 
 static void __init gic_dist_init(struct gic_chip_data *gic)
 {
-	unsigned int i, irq;
+	unsigned int i;
 	u32 cpumask;
 	unsigned int gic_irqs = gic->gic_irqs;
-	struct irq_domain *domain = &gic->domain;
 	void __iomem *base = gic_data_dist_base(gic);
 	u32 cpu = cpu_logical_map(smp_processor_id());
 
@@ -566,23 +563,6 @@
 	for (i = 32; i < gic_irqs; i += 32)
 		writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
 
-	/*
-	 * Setup the Linux IRQ subsystem.
-	 */
-	irq_domain_for_each_irq(domain, i, irq) {
-		if (i < 32) {
-			irq_set_percpu_devid(irq);
-			irq_set_chip_and_handler(irq, &gic_chip,
-						 handle_percpu_devid_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
-		} else {
-			irq_set_chip_and_handler(irq, &gic_chip,
-						 handle_fasteoi_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-		}
-		irq_set_chip_data(irq, gic);
-	}
-
 	gic->max_irq = gic_irqs;
 
 	if (is_cpu_secure())
@@ -833,11 +813,27 @@
 }
 #endif
 
-#ifdef CONFIG_OF
-static int gic_irq_domain_dt_translate(struct irq_domain *d,
-				       struct device_node *controller,
-				       const u32 *intspec, unsigned int intsize,
-				       unsigned long *out_hwirq, unsigned int *out_type)
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+				irq_hw_number_t hw)
+{
+	if (hw < 32) {
+		irq_set_percpu_devid(irq);
+		irq_set_chip_and_handler(irq, &gic_chip,
+					 handle_percpu_devid_irq);
+		set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+	} else {
+		irq_set_chip_and_handler(irq, &gic_chip,
+					 handle_fasteoi_irq);
+		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+	}
+	irq_set_chip_data(irq, d->host_data);
+	return 0;
+}
+
+static int gic_irq_domain_xlate(struct irq_domain *d,
+				struct device_node *controller,
+				const u32 *intspec, unsigned int intsize,
+				unsigned long *out_hwirq, unsigned int *out_type)
 {
 	if (d->of_node != controller)
 		return -EINVAL;
@@ -854,26 +850,23 @@
 	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
 	return 0;
 }
-#endif
 
 const struct irq_domain_ops gic_irq_domain_ops = {
-#ifdef CONFIG_OF
-	.dt_translate = gic_irq_domain_dt_translate,
-#endif
+	.map = gic_irq_domain_map,
+	.xlate = gic_irq_domain_xlate,
 };
 
 void __init gic_init_bases(unsigned int gic_nr, int irq_start,
 			   void __iomem *dist_base, void __iomem *cpu_base,
-			   u32 percpu_offset)
+			   u32 percpu_offset, struct device_node *node)
 {
+	irq_hw_number_t hwirq_base;
 	struct gic_chip_data *gic;
-	struct irq_domain *domain;
-	int gic_irqs, rc;
+	int gic_irqs, irq_base;
 
 	BUG_ON(gic_nr >= MAX_GIC_NR);
 
 	gic = &gic_data[gic_nr];
-	domain = &gic->domain;
 #ifdef CONFIG_GIC_NON_BANKED
 	if (percpu_offset) { /* Frankein-GIC without banked registers... */
 		unsigned int cpu;
@@ -881,8 +874,11 @@
 		gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
 		gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
 		if (WARN_ON(!gic->dist_base.percpu_base ||
-			    !gic->cpu_base.percpu_base))
-			goto init_bases_err;
+			     !gic->cpu_base.percpu_base)) {
+			free_percpu(gic->dist_base.percpu_base);
+			free_percpu(gic->cpu_base.percpu_base);
+			return;
+		}
 
 		for_each_possible_cpu(cpu) {
 			unsigned long offset = percpu_offset * cpu_logical_map(cpu);
@@ -906,13 +902,12 @@
 	 * For primary GICs, skip over SGIs.
 	 * For secondary GICs, skip over PPIs, too.
 	 */
-	domain->hwirq_base = 32;
-	if (gic_nr == 0) {
-		if ((irq_start & 31) > 0) {
-			domain->hwirq_base = 16;
-			if (irq_start != -1)
-				irq_start = (irq_start & ~31) + 16;
-		}
+	if (gic_nr == 0 && (irq_start & 31) > 0) {
+		hwirq_base = 16;
+		if (irq_start != -1)
+			irq_start = (irq_start & ~31) + 16;
+	} else {
+		hwirq_base = 32;
 	}
 
 	/*
@@ -925,33 +920,22 @@
 		gic_irqs = 1020;
 	gic->gic_irqs = gic_irqs;
 
-	domain->nr_irq = gic_irqs - domain->hwirq_base;
-	domain->irq_base = irq_alloc_descs(irq_start, 16, domain->nr_irq,
-					   numa_node_id());
-	if (IS_ERR_VALUE(domain->irq_base)) {
+	gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
+	irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
+	if (IS_ERR_VALUE(irq_base)) {
 		WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
 		     irq_start);
-		domain->irq_base = irq_start;
+		irq_base = irq_start;
 	}
-	domain->priv = gic;
-	domain->ops = &gic_irq_domain_ops;
-	rc = irq_domain_add(domain);
-	if (rc) {
-		WARN(1, "Unable to create irq_domain\n");
-		goto init_bases_err;
-	}
-	irq_domain_register(domain);
+	gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
+				    hwirq_base, &gic_irq_domain_ops, gic);
+	if (WARN_ON(!gic->domain))
+		return;
 
 	gic_chip.flags |= gic_arch_extn.flags;
 	gic_dist_init(gic);
 	gic_cpu_init(gic);
 	gic_pm_init(gic);
-
-	return;
-
-init_bases_err:
-	free_percpu(gic->dist_base.percpu_base);
-	free_percpu(gic->cpu_base.percpu_base);
 }
 
 void __cpuinit gic_secondary_init(unsigned int gic_nr)
@@ -1036,7 +1020,6 @@
 	void __iomem *dist_base;
 	u32 percpu_offset;
 	int irq;
-	struct irq_domain *domain = &gic_data[gic_cnt].domain;
 
 	if (WARN_ON(!node))
 		return -ENODEV;
@@ -1050,9 +1033,7 @@
 	if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
 		percpu_offset = 0;
 
-	domain->of_node = of_node_get(node);
-
-	gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset);
+	gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
 
 	if (parent) {
 		irq = irq_of_parse_and_map(node, 0);
diff --git a/arch/arm/configs/msm-copper_defconfig b/arch/arm/configs/msm-copper_defconfig
index a978c30..8b9fa0fe 100644
--- a/arch/arm/configs/msm-copper_defconfig
+++ b/arch/arm/configs/msm-copper_defconfig
@@ -136,10 +136,13 @@
 CONFIG_SPMI=y
 CONFIG_SPMI_MSM_PMIC_ARB=y
 CONFIG_MSM_QPNP=y
+CONFIG_MSM_QPNP_INT=y
 CONFIG_SLIMBUS=y
 CONFIG_SLIMBUS_MSM_CTRL=y
 CONFIG_DEBUG_GPIO=y
 CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_GPIO_QPNP_PIN_DEBUG=y
 CONFIG_POWER_SUPPLY=y
 # CONFIG_BATTERY_MSM is not set
 # CONFIG_HWMON is not set
@@ -155,7 +158,14 @@
 CONFIG_ION=y
 CONFIG_ION_MSM=y
 CONFIG_FB=y
-CONFIG_FB_VIRTUAL=y
+CONFIG_FB_MSM=y
+# CONFIG_FB_MSM_BACKLIGHT is not set
+CONFIG_FB_MSM_MDSS=y
+CONFIG_FB_MSM_MDSS_WRITEBACK=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
 # CONFIG_HID_SUPPORT is not set
 CONFIG_USB_GADGET=y
 CONFIG_USB_CI13XXX_MSM=y
@@ -168,8 +178,6 @@
 # CONFIG_MMC_BLOCK_BOUNCE is not set
 CONFIG_MMC_TEST=m
 CONFIG_MMC_MSM=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
 CONFIG_SWITCH=y
 CONFIG_STAGING=y
 CONFIG_ANDROID=y
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index 57e644d..dd33d76 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -92,6 +92,8 @@
 CONFIG_MSM_SLEEP_STATS=y
 CONFIG_MSM_EBI_ERP=y
 CONFIG_MSM_CACHE_ERP=y
+CONFIG_MSM_L1_ERR_PANIC=y
+CONFIG_MSM_L1_ERR_LOG=y
 CONFIG_MSM_L2_ERP_2BIT_PANIC=y
 CONFIG_MSM_DCVS=y
 CONFIG_MSM_HSIC_SYSMON=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index ca8a909..c0bc02e 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -94,6 +94,7 @@
 CONFIG_MSM_EBI_ERP=y
 CONFIG_MSM_CACHE_ERP=y
 CONFIG_MSM_L1_ERR_PANIC=y
+CONFIG_MSM_L1_ERR_LOG=y
 CONFIG_MSM_L2_ERP_PRINT_ACCESS_ERRORS=y
 CONFIG_MSM_L2_ERP_1BIT_PANIC=y
 CONFIG_MSM_L2_ERP_2BIT_PANIC=y
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index a244039..926ac0e 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -90,6 +90,7 @@
 #define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT	16
 #define L2X0_AUX_CTRL_WAY_SIZE_SHIFT		17
 #define L2X0_AUX_CTRL_WAY_SIZE_MASK		(0x7 << 17)
+#define L2X0_AUX_CTRL_EVNT_MON_BUS_EN_SHIFT	20
 #define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT	22
 #define L2X0_AUX_CTRL_L2_FORCE_NWA_SHIFT	23
 #define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT		26
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 3783ff3..e067a08 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -40,7 +40,7 @@
 extern struct irq_chip gic_arch_extn;
 
 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
-		    u32 offset);
+		    u32 offset, struct device_node *);
 int gic_of_init(struct device_node *node, struct device_node *parent);
 void gic_secondary_init(unsigned int);
 void gic_handle_irq(struct pt_regs *regs);
@@ -56,7 +56,7 @@
 static inline void gic_init(unsigned int nr, int start,
 			    void __iomem *dist , void __iomem *cpu)
 {
-	gic_init_bases(nr, start, dist, cpu, 0);
+	gic_init_bases(nr, start, dist, cpu, 0, NULL);
 }
 void gic_set_irq_secure(unsigned int irq);
 #endif
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 0a45dee..669a626 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -22,7 +22,7 @@
 	const char *const 	*dt_compat;	/* array of device tree
 						 * 'compatible' strings	*/
 
-	int			nr_irqs;	/* number of IRQs */
+	unsigned int		nr_irqs;	/* number of IRQs */
 
 #ifdef CONFIG_ZONE_DMA
 	unsigned long		dma_zone_size;	/* size of DMA-able area */
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index bc48bff..65c8c0f 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -132,18 +132,8 @@
 #ifdef CONFIG_SPARSE_IRQ
 int __init arch_probe_nr_irqs(void)
 {
-	/*
-	 * machine_desc->nr_irqs < 0 is a special case that
-	 * specifies not to preallocate any irq_descs.
-	 */
-	if (machine_desc->nr_irqs < 0) {
-		nr_irqs = 0;
-		return nr_irqs;
-	} else {
-		nr_irqs = machine_desc->nr_irqs ?
-			  machine_desc->nr_irqs : NR_IRQS;
-		return nr_irqs;
-	}
+	nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS;
+	return nr_irqs;
 }
 #endif
 
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index e37b28b..778128b 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -616,16 +616,14 @@
 	atomic_set(&armpmu->active_events, 0);
 	mutex_init(&armpmu->reserve_mutex);
 
-	armpmu->pmu = (struct pmu) {
-		.pmu_enable	= armpmu_enable,
-		.pmu_disable	= armpmu_disable,
-		.event_init	= armpmu_event_init,
-		.add		= armpmu_add,
-		.del		= armpmu_del,
-		.start		= armpmu_start,
-		.stop		= armpmu_stop,
-		.read		= armpmu_read,
-	};
+	armpmu->pmu.pmu_enable = armpmu_enable;
+	armpmu->pmu.pmu_disable = armpmu_disable;
+	armpmu->pmu.event_init = armpmu_event_init;
+	armpmu->pmu.add = armpmu_add;
+	armpmu->pmu.del = armpmu_del;
+	armpmu->pmu.start = armpmu_start;
+	armpmu->pmu.stop = armpmu_stop;
+	armpmu->pmu.read = armpmu_read;
 }
 
 int armpmu_register(struct arm_pmu *armpmu, char *name, int type)
@@ -857,14 +855,12 @@
 		case 0x02D0:    /* 8x60 */
 //			fabricmon_pmu_init();
 			cpu_pmu = armv7_scorpionmp_pmu_init();
-//			scorpionmp_l2_pmu_init();
 			break;
 		case 0x0490:    /* 8960 sim */
 		case 0x04D0:    /* 8960 */
 		case 0x06F0:    /* 8064 */
 //			fabricmon_pmu_init();
 			cpu_pmu = armv7_krait_pmu_init();
-//			krait_l2_pmu_init();
 			break;
 		}
 	}
diff --git a/arch/arm/kernel/perf_event_msm.c b/arch/arm/kernel/perf_event_msm.c
index 46fa8fe..90c9c9e 100644
--- a/arch/arm/kernel/perf_event_msm.c
+++ b/arch/arm/kernel/perf_event_msm.c
@@ -720,6 +720,8 @@
 	.start			= armv7pmu_start,
 	.stop			= armv7pmu_stop,
 	.reset			= scorpion_pmu_reset,
+	.test_set_event_constraints	= msm_test_set_ev_constraint,
+	.clear_event_constraints	= msm_clear_ev_constraint,
 	.max_period		= (1LLU << 32) - 1,
 };
 
@@ -728,6 +730,7 @@
 	scorpion_pmu.id		= ARM_PERF_PMU_ID_SCORPION;
 	scorpion_pmu.name	= "ARMv7 Scorpion";
 	scorpion_pmu.num_events	= armv7_read_num_pmnc_events();
+	scorpion_pmu.pmu.attr_groups	= msm_l1_pmu_attr_grps;
 	scorpion_clear_pmuregs();
 	return &scorpion_pmu;
 }
@@ -737,6 +740,7 @@
 	scorpion_pmu.id		= ARM_PERF_PMU_ID_SCORPIONMP;
 	scorpion_pmu.name	= "ARMv7 Scorpion-MP";
 	scorpion_pmu.num_events	= armv7_read_num_pmnc_events();
+	scorpion_pmu.pmu.attr_groups	= msm_l1_pmu_attr_grps;
 	scorpion_clear_pmuregs();
 	return &scorpion_pmu;
 }
diff --git a/arch/arm/kernel/perf_event_msm_krait.c b/arch/arm/kernel/perf_event_msm_krait.c
index 1b115b4..8d8f47a 100644
--- a/arch/arm/kernel/perf_event_msm_krait.c
+++ b/arch/arm/kernel/perf_event_msm_krait.c
@@ -573,10 +573,10 @@
  */
 static int msm_test_set_ev_constraint(struct perf_event *event)
 {
-	u32 krait_evt_type = event->attr.config & KRAIT_EVENT_MASK;
-	u8 prefix = (krait_evt_type & 0xF0000) >> 16;
-	u8 reg = (krait_evt_type & 0x0F000) >> 12;
-	u8 group = krait_evt_type & 0x0000F;
+	u32 evt_type = event->attr.config & KRAIT_EVENT_MASK;
+	u8 prefix = (evt_type & 0xF0000) >> 16;
+	u8 reg = (evt_type & 0x0F000) >> 12;
+	u8 group = evt_type & 0x0000F;
 	u64 cpu_pmu_bitmap = __get_cpu_var(pmu_bitmap);
 	u64 bitmap_t;
 
@@ -598,10 +598,10 @@
 
 static int msm_clear_ev_constraint(struct perf_event *event)
 {
-	u32 krait_evt_type = event->attr.config & KRAIT_EVENT_MASK;
-	u8 prefix = (krait_evt_type & 0xF0000) >> 16;
-	u8 reg = (krait_evt_type & 0x0F000) >> 12;
-	u8 group = krait_evt_type & 0x0000F;
+	u32 evt_type = event->attr.config & KRAIT_EVENT_MASK;
+	u8 prefix = (evt_type & 0xF0000) >> 16;
+	u8 reg = (evt_type & 0x0F000) >> 12;
+	u8 group = evt_type & 0x0000F;
 	u64 cpu_pmu_bitmap = __get_cpu_var(pmu_bitmap);
 	u64 bitmap_t;
 
@@ -636,6 +636,34 @@
 	.max_period		= (1LLU << 32) - 1,
 };
 
+/* NRCCG format for perf RAW codes. */
+PMU_FORMAT_ATTR(prefix,	"config:16-19");
+PMU_FORMAT_ATTR(reg,	"config:12-15");
+PMU_FORMAT_ATTR(code,	"config:4-11");
+PMU_FORMAT_ATTR(grp,	"config:0-3");
+
+static struct attribute *msm_l1_ev_formats[] = {
+	&format_attr_prefix.attr,
+	&format_attr_reg.attr,
+	&format_attr_code.attr,
+	&format_attr_grp.attr,
+	NULL,
+};
+
+/*
+ * Format group is essential to access PMU's from userspace
+ * via their .name field.
+ */
+static struct attribute_group msm_pmu_format_group = {
+	.name = "format",
+	.attrs = msm_l1_ev_formats,
+};
+
+static const struct attribute_group *msm_l1_pmu_attr_grps[] = {
+	&msm_pmu_format_group,
+	NULL,
+};
+
 int get_krait_ver(void)
 {
 	int ver = 0;
@@ -655,6 +683,7 @@
 	krait_pmu.name	        = "ARMv7 Krait";
 	krait_pmu.map_event	= krait_8960_map_event;
 	krait_pmu.num_events	= armv7_read_num_pmnc_events();
+	krait_pmu.pmu.attr_groups	= msm_l1_pmu_attr_grps;
 	krait_clear_pmuregs();
 
 	krait_ver = get_krait_ver();
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 24c57ae..d1ab4dd 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -38,6 +38,7 @@
 	select MSM_PM2 if PM
 	select MSM_RUN_QUEUE_STATS if MSM_SOC_REV_A
 	select DONT_MAP_HOLE_AFTER_MEMBANK0
+	select MIGHT_HAVE_CACHE_L2X0
 
 config ARCH_MSM7X30
 	bool "MSM7x30"
@@ -288,6 +289,7 @@
 	select MULTI_IRQ_HANDLER
 	select ARM_TICKET_LOCKS
 	select MSM_RUN_QUEUE_STATS
+	select MIGHT_HAVE_CACHE_L2X0
 
 config ARCH_MSM9625
 	bool "MSM9625"
@@ -369,6 +371,7 @@
 	select MULTI_IRQ_HANDLER
 	select ARM_GIC
 	select ARCH_MSM_CORTEXMP
+	select MIGHT_HAVE_CACHE_L2X0
 
 config  MSM_VIC
 	bool
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index c3b13ec..8315d70 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -317,7 +317,7 @@
 	obj-$(CONFIG_ARCH_MSM9615) += rpm_resources.o
 endif
 ifdef CONFIG_MSM_RPM_SMD
-	obj-$(CONFIG_ARCH_MSMCOPPER) += lpm_levels.o
+	obj-$(CONFIG_ARCH_MSMCOPPER) += lpm_levels.o lpm_resources.o
 endif
 obj-$(CONFIG_MSM_MPM) += mpm.o
 obj-$(CONFIG_MSM_RPM_STATS_LOG) += rpm_stats.o
diff --git a/arch/arm/mach-msm/acpuclock-copper.c b/arch/arm/mach-msm/acpuclock-copper.c
index f0da74c..7ba2e7d 100644
--- a/arch/arm/mach-msm/acpuclock-copper.c
+++ b/arch/arm/mach-msm/acpuclock-copper.c
@@ -62,68 +62,42 @@
 		.hfpll_data = &hfpll_data_cpu,
 		.l2cpmr_iaddr = 0x4501,
 		.vreg[VREG_CORE] = { "krait0",     1050000, 3200000 },
-		.vreg[VREG_MEM]  = { "krait0_mem", 1050000, 0,
-				     RPM_VREG_VOTER1,
-				     RPM_VREG_ID_PM8941_S1 },
-		.vreg[VREG_DIG]  = { "krait0_dig", 1050000, 0,
-				     RPM_VREG_VOTER1,
-				     RPM_VREG_ID_PM8941_S2 },
-		.vreg[VREG_HFPLL_A] = { "hfpll", 1800000, 0,
-				     RPM_VREG_VOTER1,
-				     RPM_VREG_ID_PM8941_L12 },
+		.vreg[VREG_MEM]  = { "krait0_mem", 1050000 },
+		.vreg[VREG_DIG]  = { "krait0_dig", 1050000 },
+		.vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
 	},
 	[CPU1] = {
 		.hfpll_phys_base = 0xF909A000,
 		.hfpll_data = &hfpll_data_cpu,
 		.l2cpmr_iaddr = 0x5501,
 		.vreg[VREG_CORE] = { "krait1",     1050000, 3200000 },
-		.vreg[VREG_MEM]  = { "krait1_mem", 1050000, 0,
-				     RPM_VREG_VOTER2,
-				     RPM_VREG_ID_PM8941_S1 },
-		.vreg[VREG_DIG]  = { "krait1_dig", 1050000, 0,
-				     RPM_VREG_VOTER2,
-				     RPM_VREG_ID_PM8941_S2 },
-		.vreg[VREG_HFPLL_A] = { "hfpll", 1800000, 0,
-				     RPM_VREG_VOTER2,
-				     RPM_VREG_ID_PM8941_L12 },
+		.vreg[VREG_MEM]  = { "krait1_mem", 1050000 },
+		.vreg[VREG_DIG]  = { "krait1_dig", 1050000 },
+		.vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
 	},
 	[CPU2] = {
 		.hfpll_phys_base = 0xF90AA000,
 		.hfpll_data = &hfpll_data_cpu,
 		.l2cpmr_iaddr = 0x6501,
 		.vreg[VREG_CORE] = { "krait2",     1050000, 3200000 },
-		.vreg[VREG_MEM]  = { "krait2_mem", 1050000, 0,
-				     RPM_VREG_VOTER4,
-				     RPM_VREG_ID_PM8921_S1 },
-		.vreg[VREG_DIG]  = { "krait2_dig", 1050000, 0,
-				     RPM_VREG_VOTER4,
-				     RPM_VREG_ID_PM8921_S2 },
-		.vreg[VREG_HFPLL_A] = { "hfpll", 1800000, 0,
-				     RPM_VREG_VOTER4,
-				     RPM_VREG_ID_PM8941_L12 },
+		.vreg[VREG_MEM]  = { "krait2_mem", 1050000 },
+		.vreg[VREG_DIG]  = { "krait2_dig", 1050000 },
+		.vreg[VREG_HFPLL_A] = { "krait2_hfpll", 1800000 },
 	},
 	[CPU3] = {
 		.hfpll_phys_base = 0xF90BA000,
 		.hfpll_data = &hfpll_data_cpu,
 		.l2cpmr_iaddr = 0x7501,
 		.vreg[VREG_CORE] = { "krait3",     1050000, 3200000 },
-		.vreg[VREG_MEM]  = { "krait3_mem", 1050000, 0,
-				     RPM_VREG_VOTER5,
-				     RPM_VREG_ID_PM8941_S1 },
-		.vreg[VREG_DIG]  = { "krait3_dig", 1050000, 0,
-				     RPM_VREG_VOTER5,
-				     RPM_VREG_ID_PM8941_S2 },
-		.vreg[VREG_HFPLL_A] = { "hfpll", 1800000, 0,
-				     RPM_VREG_VOTER5,
-				     RPM_VREG_ID_PM8941_L12 },
+		.vreg[VREG_MEM]  = { "krait3_mem", 1050000 },
+		.vreg[VREG_DIG]  = { "krait3_dig", 1050000 },
+		.vreg[VREG_HFPLL_A] = { "krait3_hfpll", 1800000 },
 	},
 	[L2] = {
 		.hfpll_phys_base = 0xF9016000,
 		.hfpll_data = &hfpll_data_l2,
 		.l2cpmr_iaddr = 0x0500,
-		.vreg[VREG_HFPLL_A] = { "hfpll", 1800000, 0,
-				     RPM_VREG_VOTER6,
-				     RPM_VREG_ID_PM8941_L12 },
+		.vreg[VREG_HFPLL_A] = { "l2_hfpll", 1800000 },
 	},
 };
 
diff --git a/arch/arm/mach-msm/acpuclock-krait.c b/arch/arm/mach-msm/acpuclock-krait.c
index 5682ac3..4dc47d2 100644
--- a/arch/arm/mach-msm/acpuclock-krait.c
+++ b/arch/arm/mach-msm/acpuclock-krait.c
@@ -32,6 +32,7 @@
 #include <mach/socinfo.h>
 #include <mach/msm-krait-l2-accessors.h>
 #include <mach/rpm-regulator.h>
+#include <mach/rpm-regulator-smd.h>
 #include <mach/msm_bus.h>
 
 #include "acpuclock.h"
@@ -52,7 +53,7 @@
 static DEFINE_SPINLOCK(l2_lock);
 
 static struct drv_data {
-	const struct acpu_level *acpu_freq_tbl;
+	struct acpu_level *acpu_freq_tbl;
 	const struct l2_level *l2_freq_tbl;
 	struct scalable *scalable;
 	u32 bus_perf_client;
@@ -92,35 +93,39 @@
 	udelay(1);
 }
 
-/* Enable an already-configured HFPLL. */
-static void hfpll_enable(struct scalable *sc, bool skip_regulators)
+static void enable_rpm_vreg(struct vreg *vreg)
 {
 	int rc;
 
+	if (vreg->rpm_reg) {
+		rc = rpm_regulator_enable(vreg->rpm_reg);
+		if (rc) {
+			dev_err(drv.dev, "%s regulator enable failed (%d)\n",
+				vreg->name, rc);
+			BUG();
+		}
+	}
+}
+
+static void disable_rpm_vreg(struct vreg *vreg)
+{
+	int rc;
+
+	if (vreg->rpm_reg) {
+		rc = rpm_regulator_disable(vreg->rpm_reg);
+		if (rc)
+			dev_err(drv.dev, "%s regulator disable failed (%d)\n",
+				vreg->name, rc);
+	}
+}
+
+/* Enable an already-configured HFPLL. */
+static void hfpll_enable(struct scalable *sc, bool skip_regulators)
+{
 	if (!skip_regulators) {
 		/* Enable regulators required by the HFPLL. */
-		if (sc->vreg[VREG_HFPLL_A].rpm_vreg_id) {
-			rc = rpm_vreg_set_voltage(
-				sc->vreg[VREG_HFPLL_A].rpm_vreg_id,
-				sc->vreg[VREG_HFPLL_A].rpm_vreg_voter,
-				sc->vreg[VREG_HFPLL_A].cur_vdd,
-				sc->vreg[VREG_HFPLL_A].max_vdd, 0);
-			if (rc)
-				dev_err(drv.dev,
-					"%s regulator enable failed (%d)\n",
-					sc->vreg[VREG_HFPLL_A].name, rc);
-		}
-		if (sc->vreg[VREG_HFPLL_B].rpm_vreg_id) {
-			rc = rpm_vreg_set_voltage(
-				sc->vreg[VREG_HFPLL_B].rpm_vreg_id,
-				sc->vreg[VREG_HFPLL_B].rpm_vreg_voter,
-				sc->vreg[VREG_HFPLL_B].cur_vdd,
-				sc->vreg[VREG_HFPLL_B].max_vdd, 0);
-			if (rc)
-				dev_err(drv.dev,
-					"%s regulator enable failed (%d)\n",
-					sc->vreg[VREG_HFPLL_B].name, rc);
-		}
+		enable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]);
+		enable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]);
 	}
 
 	/* Disable PLL bypass mode. */
@@ -147,8 +152,6 @@
 /* Disable a HFPLL for power-savings or while it's being reprogrammed. */
 static void hfpll_disable(struct scalable *sc, bool skip_regulators)
 {
-	int rc;
-
 	/*
 	 * Disable the PLL output, disable test mode, enable the bypass mode,
 	 * and assert the reset.
@@ -157,26 +160,8 @@
 
 	if (!skip_regulators) {
 		/* Remove voltage votes required by the HFPLL. */
-		if (sc->vreg[VREG_HFPLL_B].rpm_vreg_id) {
-			rc = rpm_vreg_set_voltage(
-				sc->vreg[VREG_HFPLL_B].rpm_vreg_id,
-				sc->vreg[VREG_HFPLL_B].rpm_vreg_voter,
-				0, 0, 0);
-			if (rc)
-				dev_err(drv.dev,
-					"%s regulator enable failed (%d)\n",
-					sc->vreg[VREG_HFPLL_B].name, rc);
-		}
-		if (sc->vreg[VREG_HFPLL_A].rpm_vreg_id) {
-			rc = rpm_vreg_set_voltage(
-				sc->vreg[VREG_HFPLL_A].rpm_vreg_id,
-				sc->vreg[VREG_HFPLL_A].rpm_vreg_voter,
-				0, 0, 0);
-			if (rc)
-				dev_err(drv.dev,
-					"%s regulator enable failed (%d)\n",
-					sc->vreg[VREG_HFPLL_A].name, rc);
-		}
+		disable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]);
+		disable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]);
 	}
 }
 
@@ -228,19 +213,19 @@
 		set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
 
 		/* Re-program HFPLL. */
-		hfpll_disable(sc, 1);
+		hfpll_disable(sc, true);
 		hfpll_set_rate(sc, tgt_s);
-		hfpll_enable(sc, 1);
+		hfpll_enable(sc, true);
 
 		/* Move to HFPLL. */
 		set_pri_clk_src(sc, tgt_s->pri_src_sel);
 	} else if (strt_s->src == HFPLL && tgt_s->src != HFPLL) {
 		set_sec_clk_src(sc, tgt_s->sec_src_sel);
 		set_pri_clk_src(sc, tgt_s->pri_src_sel);
-		hfpll_disable(sc, 0);
+		hfpll_disable(sc, false);
 	} else if (strt_s->src != HFPLL && tgt_s->src == HFPLL) {
 		hfpll_set_rate(sc, tgt_s);
-		hfpll_enable(sc, 0);
+		hfpll_enable(sc, false);
 		set_pri_clk_src(sc, tgt_s->pri_src_sel);
 	} else {
 		set_sec_clk_src(sc, tgt_s->sec_src_sel);
@@ -261,9 +246,8 @@
 	 * vdd_mem should be >= vdd_dig.
 	 */
 	if (vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
-		rc = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
-				sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
-				sc->vreg[VREG_MEM].max_vdd, 0);
+		rc = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
+				vdd_mem, sc->vreg[VREG_MEM].max_vdd);
 		if (rc) {
 			dev_err(drv.dev,
 				"vdd_mem (cpu%d) increase failed (%d)\n",
@@ -275,9 +259,8 @@
 
 	/* Increase vdd_dig active-set vote. */
 	if (vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
-		rc = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
-				sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
-				sc->vreg[VREG_DIG].max_vdd, 0);
+		rc = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
+				vdd_dig, sc->vreg[VREG_DIG].max_vdd);
 		if (rc) {
 			dev_err(drv.dev,
 				"vdd_dig (cpu%d) increase failed (%d)\n",
@@ -336,9 +319,8 @@
 
 	/* Decrease vdd_dig active-set vote. */
 	if (vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
-		ret = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
-				sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
-				sc->vreg[VREG_DIG].max_vdd, 0);
+		ret = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
+				vdd_dig, sc->vreg[VREG_DIG].max_vdd);
 		if (ret) {
 			dev_err(drv.dev,
 				"vdd_dig (cpu%d) decrease failed (%d)\n",
@@ -353,9 +335,8 @@
 	 * vdd_mem should be >= vdd_dig.
 	 */
 	if (vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
-		ret = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
-				sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
-				sc->vreg[VREG_MEM].max_vdd, 0);
+		ret = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
+				vdd_mem, sc->vreg[VREG_MEM].max_vdd);
 		if (ret) {
 			dev_err(drv.dev,
 				"vdd_mem (cpu%d) decrease failed (%d)\n",
@@ -484,7 +465,7 @@
 	pr_debug("Initializing HFPLL%d\n", sc - drv.scalable);
 
 	/* Disable the PLL for re-programming. */
-	hfpll_disable(sc, 1);
+	hfpll_disable(sc, true);
 
 	/* Configure PLL parameters for integer mode. */
 	writel_relaxed(sc->hfpll_data->config_val,
@@ -492,13 +473,49 @@
 	writel_relaxed(0, sc->hfpll_base + sc->hfpll_data->m_offset);
 	writel_relaxed(1, sc->hfpll_base + sc->hfpll_data->n_offset);
 
+	/* Program droop controller, if supported */
+	if (sc->hfpll_data->has_droop_ctl)
+		writel_relaxed(sc->hfpll_data->droop_val,
+			       sc->hfpll_base + sc->hfpll_data->droop_offset);
+
 	/* Set an initial rate and enable the PLL. */
 	hfpll_set_rate(sc, tgt_s);
-	hfpll_enable(sc, 0);
+	hfpll_enable(sc, false);
+}
+
+static void __init rpm_regulator_init(struct scalable *sc, enum vregs vreg,
+				      int vdd, bool enable)
+{
+	int ret;
+
+	if (!sc->vreg[vreg].name)
+		return;
+
+	sc->vreg[vreg].rpm_reg = rpm_regulator_get(drv.dev,
+						   sc->vreg[vreg].name);
+	if (IS_ERR(sc->vreg[vreg].rpm_reg)) {
+		dev_err(drv.dev, "rpm_regulator_get(%s) failed (%ld)\n",
+			sc->vreg[vreg].name,
+			PTR_ERR(sc->vreg[vreg].rpm_reg));
+		BUG();
+	}
+
+	ret = rpm_regulator_set_voltage(sc->vreg[vreg].rpm_reg, vdd,
+					sc->vreg[vreg].max_vdd);
+	if (ret) {
+		dev_err(drv.dev, "%s initialization failed (%d)\n",
+			sc->vreg[vreg].name, ret);
+		BUG();
+	}
+	sc->vreg[vreg].cur_vdd = vdd;
+
+	if (enable)
+		enable_rpm_vreg(&sc->vreg[vreg]);
 }
 
 /* Voltage regulator initialization. */
-static void __init regulator_init(const struct acpu_level *lvl)
+static void __init regulator_init(struct device *dev,
+				  const struct acpu_level *lvl)
 {
 	int cpu, ret;
 	struct scalable *sc;
@@ -507,33 +524,23 @@
 	vdd_mem = calculate_vdd_mem(lvl);
 	vdd_dig = calculate_vdd_dig(lvl);
 
+	rpm_regulator_init(&drv.scalable[L2], VREG_HFPLL_A,
+			   drv.scalable[L2].vreg[VREG_HFPLL_A].max_vdd, false);
+	rpm_regulator_init(&drv.scalable[L2], VREG_HFPLL_B,
+			   drv.scalable[L2].vreg[VREG_HFPLL_B].max_vdd, false);
+
 	for_each_possible_cpu(cpu) {
 		sc = &drv.scalable[cpu];
 
-		/* Set initial vdd_mem vote. */
-		ret = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
-				sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
-				sc->vreg[VREG_MEM].max_vdd, 0);
-		if (ret) {
-			dev_err(drv.dev, "%s initialization failed (%d)\n",
-				sc->vreg[VREG_MEM].name, ret);
-			BUG();
-		}
-		sc->vreg[VREG_MEM].cur_vdd  = vdd_mem;
-
-		/* Set initial vdd_dig vote. */
-		ret = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
-				sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
-				sc->vreg[VREG_DIG].max_vdd, 0);
-		if (ret) {
-			dev_err(drv.dev, "%s initialization failed (%d)\n",
-				sc->vreg[VREG_DIG].name, ret);
-			BUG();
-		}
-		sc->vreg[VREG_DIG].cur_vdd  = vdd_dig;
+		rpm_regulator_init(sc, VREG_MEM, vdd_mem, true);
+		rpm_regulator_init(sc, VREG_DIG, vdd_dig, true);
+		rpm_regulator_init(sc, VREG_HFPLL_A,
+				   sc->vreg[VREG_HFPLL_A].max_vdd, false);
+		rpm_regulator_init(sc, VREG_HFPLL_B,
+				   sc->vreg[VREG_HFPLL_B].max_vdd, false);
 
 		/* Setup Krait CPU regulators and initial core voltage. */
-		sc->vreg[VREG_CORE].reg = regulator_get(NULL,
+		sc->vreg[VREG_CORE].reg = regulator_get(dev,
 					  sc->vreg[VREG_CORE].name);
 		if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
 			dev_err(drv.dev, "regulator_get(%s) failed (%ld)\n",
@@ -571,10 +578,15 @@
 				      const struct core_speed *tgt_s)
 {
 	u32 regval;
+	void __iomem *aux_reg;
 
 	/* Program AUX source input to the secondary MUX. */
-	if (sc->aux_clk_sel_addr)
-		writel_relaxed(sc->aux_clk_sel, sc->aux_clk_sel_addr);
+	if (sc->aux_clk_sel_phys) {
+		aux_reg = ioremap(sc->aux_clk_sel_phys, 4);
+		BUG_ON(!aux_reg);
+		writel_relaxed(sc->aux_clk_sel, aux_reg);
+		iounmap(aux_reg);
+	}
 
 	/* Switch away from the HFPLL while it's re-initialized. */
 	set_sec_clk_src(sc, SEC_SRC_SEL_AUX);
@@ -691,8 +703,27 @@
 	.notifier_call = acpuclk_cpu_callback,
 };
 
+static const int krait_needs_vmin(void)
+{
+	switch (read_cpuid_id()) {
+	case 0x511F04D0: /* KR28M2A20 */
+	case 0x511F04D1: /* KR28M2A21 */
+	case 0x510F06F0: /* KR28M4A10 */
+		return 1;
+	default:
+		return 0;
+	};
+}
+
+static void krait_apply_vmin(struct acpu_level *tbl)
+{
+	for (; tbl->speed.khz != 0; tbl++)
+		if (tbl->vdd_core < 1150000)
+			tbl->vdd_core = 1150000;
+}
+
 static const struct acpu_level __init *select_freq_plan(
-		const struct acpu_level *const *pvs_tbl, u32 qfprom_phys)
+		struct acpu_level *const *pvs_tbl, u32 qfprom_phys)
 {
 	const struct acpu_level *l, *max_acpu_level = NULL;
 	void __iomem *qfprom_base;
@@ -735,6 +766,9 @@
 	}
 	drv.acpu_freq_tbl = pvs_tbl[tbl_idx];
 
+	if (krait_needs_vmin())
+		krait_apply_vmin(drv.acpu_freq_tbl);
+
 	/* Find the max supported scaling frequency. */
 	for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
 		if (l->use_for_scaling)
@@ -769,7 +803,7 @@
 
 	max_acpu_level = select_freq_plan(params->pvs_acpu_freq_tbl,
 					  params->qfprom_phys_base);
-	regulator_init(max_acpu_level);
+	regulator_init(dev, max_acpu_level);
 	bus_init(params->bus_scale_data, max_acpu_level->l2_level->bw_level);
 	init_clock_sources(&drv.scalable[L2], &max_acpu_level->l2_level->speed);
 	for_each_online_cpu(cpu)
diff --git a/arch/arm/mach-msm/acpuclock-krait.h b/arch/arm/mach-msm/acpuclock-krait.h
index fbf1f5f..7c1d2b6 100644
--- a/arch/arm/mach-msm/acpuclock-krait.h
+++ b/arch/arm/mach-msm/acpuclock-krait.h
@@ -40,6 +40,7 @@
 	PLL_0 = 0,
 	HFPLL,
 	QSB,
+	PLL_8,
 };
 
 /**
@@ -91,18 +92,17 @@
  * struct vreg - Voltage regulator data.
  * @name: Name of requlator.
  * @max_vdd: Limit the maximum-settable voltage.
- * @rpm_vreg_id: ID to use with rpm_vreg_*() APIs.
  * @reg: Regulator handle.
+ * @rpm_reg: RPM Regulator handle.
  * @cur_vdd: Last-set voltage in uV.
  * @peak_ua: Maximum current draw expected in uA.
  */
 struct vreg {
-	const char name[15];
+	const char *name;
 	const int max_vdd;
 	const int peak_ua;
-	const int rpm_vreg_voter;
-	const int rpm_vreg_id;
 	struct regulator *reg;
+	struct rpm_regulator *rpm_reg;
 	int cur_vdd;
 };
 
@@ -147,7 +147,7 @@
 	const int use_for_scaling;
 	const struct core_speed speed;
 	const struct l2_level *l2_level;
-	const int vdd_core;
+	int vdd_core;
 };
 
 /**
@@ -158,6 +158,10 @@
  * @n_offset: "N" value register offset from base address.
  * @config_offset: Configuration register offset from base address.
  * @config_val: Value to initialize the @config_offset register to.
+ * @has_droop_ctl: Indicates the presence of a voltage droop controller.
+ * @droop_offset: Droop controller register offset from base address.
+ * @droop_val: Value to initialize the @config_offset register to.
+ * @low_vdd_l_max: Maximum "L" value supported at HFPLL_VDD_LOW.
  * @vdd: voltage requirements for each VDD level.
  */
 struct hfpll_data {
@@ -167,6 +171,9 @@
 	const u32 n_offset;
 	const u32 config_offset;
 	const u32 config_val;
+	const bool has_droop_ctl;
+	const u32 droop_offset;
+	const u32 droop_val;
 	const u32 low_vdd_l_max;
 	const int vdd[NUM_HFPLL_VDD];
 };
@@ -175,7 +182,7 @@
  * struct scalable - Register locations and state associated with a scalable HW.
  * @hfpll_phys_base: Physical base address of HFPLL register.
  * @hfpll_base: Virtual base address of HFPLL registers.
- * @aux_clk_sel_addr: Virtual address of auxiliary MUX.
+ * @aux_clk_sel_phys: Physical address of auxiliary MUX.
  * @aux_clk_sel: Auxiliary mux input to select at boot.
  * @l2cpmr_iaddr: Indirect address of the CPMR MUX/divider CP15 register.
  * @hfpll_data: Descriptive data of HFPLL hardware.
@@ -184,9 +191,9 @@
  * @vreg: Array of voltage regulators needed by the scalable.
  */
 struct scalable {
-	const u32 hfpll_phys_base;
+	const phys_addr_t hfpll_phys_base;
 	void __iomem *hfpll_base;
-	void __iomem *aux_clk_sel_addr;
+	const phys_addr_t aux_clk_sel_phys;
 	const u32 aux_clk_sel;
 	const u32 l2cpmr_iaddr;
 	const struct hfpll_data *hfpll_data;
@@ -206,10 +213,10 @@
  */
 struct acpuclk_krait_params {
 	struct scalable *scalable;
-	const struct acpu_level *pvs_acpu_freq_tbl[NUM_PVS];
+	struct acpu_level *pvs_acpu_freq_tbl[NUM_PVS];
 	const struct l2_level *l2_freq_tbl;
 	const size_t l2_freq_tbl_size;
-	const u32 qfprom_phys_base;
+	const phys_addr_t qfprom_phys_base;
 	struct msm_bus_scale_pdata *bus_scale_data;
 };
 
diff --git a/arch/arm/mach-msm/board-8064-display.c b/arch/arm/mach-msm/board-8064-display.c
index 101a26d..5edddb5 100644
--- a/arch/arm/mach-msm/board-8064-display.c
+++ b/arch/arm/mach-msm/board-8064-display.c
@@ -473,11 +473,18 @@
 			}
 		}
 
+		rc = regulator_disable(reg_l11);
+		if (rc) {
+			pr_err("disable reg_l1 failed, rc=%d\n", rc);
+			return -ENODEV;
+		}
+
 		rc = regulator_disable(reg_lvs7);
 		if (rc) {
 			pr_err("disable reg_lvs7 failed, rc=%d\n", rc);
 			return -ENODEV;
 		}
+
 		rc = regulator_disable(reg_l2);
 		if (rc) {
 			pr_err("disable reg_l2 failed, rc=%d\n", rc);
diff --git a/arch/arm/mach-msm/board-8064-gpiomux.c b/arch/arm/mach-msm/board-8064-gpiomux.c
index b941bd4..ecd4e54 100644
--- a/arch/arm/mach-msm/board-8064-gpiomux.c
+++ b/arch/arm/mach-msm/board-8064-gpiomux.c
@@ -797,6 +797,13 @@
 	.pull = GPIOMUX_PULL_DOWN,
 };
 
+static struct gpiomux_setting mdm2ap_pblrdy = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_16MA,
+	.pull = GPIOMUX_PULL_DOWN,
+};
+
+
 static struct gpiomux_setting ap2mdm_soft_reset_cfg = {
 	.func = GPIOMUX_FUNC_GPIO,
 	.drv = GPIOMUX_DRV_8MA,
@@ -852,6 +859,13 @@
 			[GPIOMUX_SUSPENDED] = &ap2mdm_wakeup,
 		}
 	},
+	/* MDM2AP_PBL_READY*/
+	{
+		.gpio = 46,
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &mdm2ap_pblrdy,
+		}
+	},
 };
 
 static struct gpiomux_setting mi2s_act_cfg = {
diff --git a/arch/arm/mach-msm/board-8064-gpu.c b/arch/arm/mach-msm/board-8064-gpu.c
index e24cac6..d877fd8 100644
--- a/arch/arm/mach-msm/board-8064-gpu.c
+++ b/arch/arm/mach-msm/board-8064-gpu.c
@@ -224,6 +224,7 @@
 	.set_grp_async = NULL,
 	.idle_timeout = HZ/10,
 	.nap_allowed = true,
+	.strtstp_sleepwake = true,
 	.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE,
 #ifdef CONFIG_MSM_BUS_SCALING
 	.bus_scale_table = &grp3d_bus_scale_pdata,
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index 1d231ef..146009c 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -29,6 +29,7 @@
 #include <linux/ion.h>
 #include <linux/memory.h>
 #include <linux/memblock.h>
+#include <linux/msm_thermal.h>
 #include <linux/i2c/atmel_mxt_ts.h>
 #include <linux/cyttsp-qc.h>
 #include <linux/i2c/isa1200.h>
@@ -1219,6 +1220,7 @@
 	.name = "vibrator",
 	.dev_setup = isa1200_dev_setup,
 	.clk_enable = isa1200_clk_enable,
+	.need_pwm_clk = true,
 	.hap_en_gpio = ISA1200_HAP_EN_GPIO,
 	.hap_len_gpio = ISA1200_HAP_LEN_GPIO,
 	.max_timeout = 15000,
@@ -1695,11 +1697,20 @@
 };
 #endif
 
+static struct mdm_vddmin_resource mdm_vddmin_rscs = {
+	.rpm_id = MSM_RPM_ID_VDDMIN_GPIO,
+	.ap2mdm_vddmin_gpio = 30,
+	.modes  = 0x03,
+	.drive_strength = 8,
+	.mdm2ap_vddmin_gpio = 80,
+};
+
 static struct mdm_platform_data mdm_platform_data = {
 	.mdm_version = "3.0",
 	.ramdump_delay_ms = 2000,
 	.early_power_on = 1,
 	.sfr_query = 1,
+	.vddmin_resource = &mdm_vddmin_rscs,
 	.peripheral_platform_device = &apq8064_device_hsic_host,
 };
 
@@ -1716,6 +1727,14 @@
 	.id = -1,
 };
 
+static struct msm_thermal_data msm_thermal_pdata = {
+	.sensor_id = 7,
+	.poll_ms = 1000,
+	.limit_temp = 60,
+	.temp_hysteresis = 10,
+	.limit_freq = 918000,
+};
+
 #define MSM_SHARED_RAM_PHYS 0x80000000
 static void __init apq8064_map_io(void)
 {
@@ -2864,6 +2883,7 @@
 static void __init apq8064_common_init(void)
 {
 	msm_tsens_early_init(&apq_tsens_pdata);
+	msm_thermal_init(&msm_thermal_pdata);
 	if (socinfo_init() < 0)
 		pr_err("socinfo_init() failed!\n");
 	BUG_ON(msm_rpm_init(&apq8064_rpm_data));
diff --git a/arch/arm/mach-msm/board-8930-gpu.c b/arch/arm/mach-msm/board-8930-gpu.c
index 3c3843a..c9021f3 100644
--- a/arch/arm/mach-msm/board-8930-gpu.c
+++ b/arch/arm/mach-msm/board-8930-gpu.c
@@ -139,6 +139,7 @@
 	.set_grp_async = NULL,
 	.idle_timeout = HZ/12,
 	.nap_allowed = true,
+	.strtstp_sleepwake = true,
 	.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE,
 #ifdef CONFIG_MSM_BUS_SCALING
 	.bus_scale_table = &grp3d_bus_scale_pdata,
diff --git a/arch/arm/mach-msm/board-8930-regulator.c b/arch/arm/mach-msm/board-8930-regulator.c
index 5bee8a2..bc370ba 100644
--- a/arch/arm/mach-msm/board-8930-regulator.c
+++ b/arch/arm/mach-msm/board-8930-regulator.c
@@ -70,6 +70,8 @@
 	REGULATOR_SUPPLY("cam_vaf",		"4-0048"),
 	REGULATOR_SUPPLY("cam_vana",            "4-0020"),
 	REGULATOR_SUPPLY("cam_vaf",             "4-0020"),
+	REGULATOR_SUPPLY("vdd",			"12-0018"),
+	REGULATOR_SUPPLY("vdd",			"12-0068"),
 };
 VREG_CONSUMERS(L10) = {
 	REGULATOR_SUPPLY("8038_l10",		NULL),
@@ -186,6 +188,8 @@
 	REGULATOR_SUPPLY("vcc_i2c",		"3-004a"),
 	REGULATOR_SUPPLY("vcc_i2c",		"3-0024"),
 	REGULATOR_SUPPLY("vcc_i2c",		"0-0048"),
+	REGULATOR_SUPPLY("vddio",		"12-0018"),
+	REGULATOR_SUPPLY("vlogic",		"12-0068"),
 };
 VREG_CONSUMERS(EXT_5V) = {
 	REGULATOR_SUPPLY("ext_5v",		NULL),
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index a45f7cb..1f5ea52 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -42,6 +42,7 @@
 #include <linux/gpio_keys.h>
 #include <linux/memory.h>
 #include <linux/memblock.h>
+#include <linux/msm_thermal.h>
 
 #include <linux/slimbus/slimbus.h>
 #include <linux/mfd/wcd9xxx/core.h>
@@ -78,6 +79,10 @@
 #include <mach/msm_rtb.h>
 #include <linux/fmem.h>
 
+#ifdef CONFIG_INPUT_MPU3050
+#include <linux/input/mpu3050.h>
+#endif
+
 #include "timer.h"
 #include "devices.h"
 #include "devices-msm8x60.h"
@@ -1981,6 +1986,14 @@
 	.id = -1,
 };
 
+static struct msm_thermal_data msm_thermal_pdata = {
+	.sensor_id = 9,
+	.poll_ms = 1000,
+	.limit_temp = 60,
+	.temp_hysteresis = 10,
+	.limit_freq = 918000,
+};
+
 #ifdef CONFIG_MSM_FAKE_BATTERY
 static struct platform_device fish_battery_device = {
 	.name = "fish_battery",
@@ -2302,6 +2315,21 @@
 	int                    len;
 };
 
+#ifdef CONFIG_INPUT_MPU3050
+#define MPU3050_INT_GPIO		69
+
+static struct mpu3050_gyro_platform_data mpu3050_gyro = {
+	.gpio_int = MPU3050_INT_GPIO,
+};
+
+static struct i2c_board_info __initdata mpu3050_i2c_boardinfo[] = {
+	{
+		I2C_BOARD_INFO("mpu3050", 0x68),
+		.platform_data = &mpu3050_gyro,
+	},
+};
+#endif
+
 #ifdef CONFIG_ISL9519_CHARGER
 static struct isl_platform_data isl_data __initdata = {
 	.valid_n_gpio		= 0,	/* Not required when notify-by-pmic */
@@ -2331,6 +2359,14 @@
 		ARRAY_SIZE(isl_charger_i2c_info),
 	},
 #endif /* CONFIG_ISL9519_CHARGER */
+#ifdef CONFIG_INPUT_MPU3050
+	{
+		I2C_FFA | I2C_FLUID,
+		MSM_8930_GSBI12_QUP_I2C_BUS_ID,
+		mpu3050_i2c_boardinfo,
+		ARRAY_SIZE(mpu3050_i2c_boardinfo),
+	},
+#endif
 	{
 		I2C_SURF | I2C_FFA | I2C_FLUID,
 		MSM_8930_GSBI9_QUP_I2C_BUS_ID,
@@ -2392,6 +2428,7 @@
 		pr_err("meminfo_init() failed!\n");
 
 	msm_tsens_early_init(&msm_tsens_pdata);
+	msm_thermal_init(&msm_thermal_pdata);
 	BUG_ON(msm_rpm_init(&msm8930_rpm_data));
 	BUG_ON(msm_rpmrs_levels_init(&msm_rpmrs_data));
 
diff --git a/arch/arm/mach-msm/board-8930.h b/arch/arm/mach-msm/board-8930.h
index e564aff..925de45 100644
--- a/arch/arm/mach-msm/board-8930.h
+++ b/arch/arm/mach-msm/board-8930.h
@@ -138,5 +138,6 @@
 #define MSM_8930_GSBI4_QUP_I2C_BUS_ID 4
 #define MSM_8930_GSBI9_QUP_I2C_BUS_ID 0
 #define MSM_8930_GSBI10_QUP_I2C_BUS_ID 10
+#define MSM_8930_GSBI12_QUP_I2C_BUS_ID 12
 
 extern struct msm_rtb_platform_data msm8930_rtb_pdata;
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 22ef940..628a324 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -42,6 +42,7 @@
 #include <linux/i2c/isa1200.h>
 #include <linux/memory.h>
 #include <linux/memblock.h>
+#include <linux/msm_thermal.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -2427,6 +2428,14 @@
 	.id = -1,
 };
 
+static struct msm_thermal_data msm_thermal_pdata = {
+	.sensor_id = 0,
+	.poll_ms = 1000,
+	.limit_temp = 60,
+	.temp_hysteresis = 10,
+	.limit_freq = 918000,
+};
+
 #ifdef CONFIG_MSM_FAKE_BATTERY
 static struct platform_device fish_battery_device = {
 	.name = "fish_battery",
@@ -3044,6 +3053,7 @@
 
 	wdog_pdata->bark_time = 15000;
 	msm_tsens_early_init(&msm_tsens_pdata);
+	msm_thermal_init(&msm_thermal_pdata);
 	BUG_ON(msm_rpm_init(&msm8960_rpm_data));
 	BUG_ON(msm_rpmrs_levels_init(&msm_rpmrs_data));
 	regulator_suppress_info_printing();
@@ -3076,6 +3086,7 @@
 static void __init msm8960_rumi3_init(void)
 {
 	msm_tsens_early_init(&msm_tsens_pdata);
+	msm_thermal_init(&msm_thermal_pdata);
 	BUG_ON(msm_rpm_init(&msm8960_rpm_data));
 	BUG_ON(msm_rpmrs_levels_init(&msm_rpmrs_data));
 	regulator_suppress_info_printing();
@@ -3108,6 +3119,7 @@
 		pr_err("meminfo_init() failed!\n");
 
 	msm_tsens_early_init(&msm_tsens_pdata);
+	msm_thermal_init(&msm_thermal_pdata);
 	BUG_ON(msm_rpm_init(&msm8960_rpm_data));
 	BUG_ON(msm_rpmrs_levels_init(&msm_rpmrs_data));
 
diff --git a/arch/arm/mach-msm/board-copper-regulator.c b/arch/arm/mach-msm/board-copper-regulator.c
index 7543872..10d5d0b 100644
--- a/arch/arm/mach-msm/board-copper-regulator.c
+++ b/arch/arm/mach-msm/board-copper-regulator.c
@@ -22,16 +22,16 @@
  *			 regulator name		consumer dev_name
  */
 VREG_CONSUMERS(K0) = {
-	REGULATOR_SUPPLY("krait0",		NULL),
+	REGULATOR_SUPPLY("krait0",		"f9000000.qcom,acpuclk"),
 };
 VREG_CONSUMERS(K1) = {
-	REGULATOR_SUPPLY("krait1",		NULL),
+	REGULATOR_SUPPLY("krait1",		"f9000000.qcom,acpuclk"),
 };
 VREG_CONSUMERS(K2) = {
-	REGULATOR_SUPPLY("krait2",		NULL),
+	REGULATOR_SUPPLY("krait2",		"f9000000.qcom,acpuclk"),
 };
 VREG_CONSUMERS(K3) = {
-	REGULATOR_SUPPLY("krait3",		NULL),
+	REGULATOR_SUPPLY("krait3",		"f9000000.qcom,acpuclk"),
 };
 
 #define PM8X41_VREG_INIT(_id, _name, _min_uV, _max_uV, _modes, _ops, \
diff --git a/arch/arm/mach-msm/board-copper.c b/arch/arm/mach-msm/board-copper.c
index 4dda0b7..85241a4 100644
--- a/arch/arm/mach-msm/board-copper.c
+++ b/arch/arm/mach-msm/board-copper.c
@@ -47,6 +47,7 @@
 #include "devices.h"
 #include "spm.h"
 #include "modem_notifier.h"
+#include "lpm_resources.h"
 
 #define MSM_KERNEL_EBI1_MEM_SIZE	0x280000
 #ifdef CONFIG_FB_MSM_HDMI_AS_PRIMARY
@@ -443,6 +444,12 @@
 	CLK_DUMMY("core_clk",	NULL,	"f9966000.i2c", 0),
 	CLK_DUMMY("iface_clk",	NULL,	"f9966000.i2c", 0),
 	CLK_DUMMY("core_clk",	NULL,	"fe12f000.slim",	OFF),
+	CLK_DUMMY("core_clk", "mdp.0", NULL, 0),
+	CLK_DUMMY("core_clk_src", "mdp.0", NULL, 0),
+	CLK_DUMMY("lut_clk", "mdp.0", NULL, 0),
+	CLK_DUMMY("vsync_clk", "mdp.0", NULL, 0),
+	CLK_DUMMY("iface_clk", "mdp.0", NULL, 0),
+	CLK_DUMMY("bus_clk", "mdp.0", NULL, 0),
 };
 
 struct clock_init_data msm_dummy_clock_init_data __initdata = {
@@ -461,6 +468,7 @@
 	msm_init_modem_notifier_list();
 	msm_smd_init();
 	msm_rpm_driver_init();
+	msm_lpmrs_module_init();
 	rpm_regulator_smd_driver_init();
 	msm_spm_device_init();
 	regulator_stub_init();
@@ -511,6 +519,7 @@
 			"msm_rng", NULL),
 	OF_DEV_AUXDATA("qcom,qseecom", 0xFE806000, \
 			"qseecom", NULL),
+	OF_DEV_AUXDATA("qcom,mdss_mdp", 0xFD900000, "mdp.0", NULL),
 	{}
 };
 
diff --git a/arch/arm/mach-msm/board-dt.c b/arch/arm/mach-msm/board-dt.c
index 674df09..8a801c2 100644
--- a/arch/arm/mach-msm/board-dt.c
+++ b/arch/arm/mach-msm/board-dt.c
@@ -85,7 +85,6 @@
 	.handle_irq = gic_handle_irq,
 	.timer = &msm_dt_timer,
 	.dt_compat = msm_dt_match,
-	.nr_irqs = -1,
 	.reserve = msm_dt_reserve,
 	.init_very_early = msm_dt_init_very_early,
 MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm7627a-io.c b/arch/arm/mach-msm/board-msm7627a-io.c
index ec168f9..22095cd 100644
--- a/arch/arm/mach-msm/board-msm7627a-io.c
+++ b/arch/arm/mach-msm/board-msm7627a-io.c
@@ -229,7 +229,7 @@
 
 static int mxt_vkey_setup(void)
 {
-	int retval;
+	int retval = 0;
 
 	mxt_virtual_key_properties_kobj =
 		kobject_create_and_add("board_properties", NULL);
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 098ad6e..1bb69b5 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -5320,7 +5320,7 @@
 	.align = PAGE_SIZE,
 };
 
-static struct ion_co_heap_pdata hole_co_ion_pdata = {
+static struct ion_co_heap_pdata mm_fw_co_ion_pdata = {
 	.adjacent_mem_id = ION_CP_MM_HEAP_ID,
 };
 
@@ -5363,10 +5363,10 @@
 			.id	= ION_MM_FIRMWARE_HEAP_ID,
 			.type	= ION_HEAP_TYPE_CARVEOUT,
 			.name	= ION_MM_FIRMWARE_HEAP_NAME,
-			.base	= MSM_ION_HOLE_BASE,
-			.size	= MSM_ION_HOLE_SIZE,
+			.base	= MSM_MM_FW_BASE,
+			.size	= MSM_MM_FW_SIZE,
 			.memory_type = ION_SMI_TYPE,
-			.extra_data = (void *) &hole_co_ion_pdata,
+			.extra_data = (void *) &mm_fw_co_ion_pdata,
 		},
 		{
 			.id	= ION_CP_MFC_HEAP_ID,
diff --git a/arch/arm/mach-msm/cache_erp.c b/arch/arm/mach-msm/cache_erp.c
index 4d7ce12..c3302ec 100644
--- a/arch/arm/mach-msm/cache_erp.c
+++ b/arch/arm/mach-msm/cache_erp.c
@@ -90,7 +90,7 @@
 
 #define MODULE_NAME "msm_cache_erp"
 
-#define ERP_LOG_MAGIC_ADDR	0x748
+#define ERP_LOG_MAGIC_ADDR	0x6A4
 #define ERP_LOG_MAGIC		0x11C39893
 
 struct msm_l1_err_stats {
diff --git a/arch/arm/mach-msm/clock-7x30.c b/arch/arm/mach-msm/clock-7x30.c
index aa94be6..225ea2b 100644
--- a/arch/arm/mach-msm/clock-7x30.c
+++ b/arch/arm/mach-msm/clock-7x30.c
@@ -200,7 +200,7 @@
 #define PCOM_XO_TCXO	0
 #define PCOM_XO_LPXO	1
 
-static bool pcom_is_local(struct clk *clk)
+static bool pcom_is_local(struct clk *c)
 {
 	return false;
 }
@@ -2441,7 +2441,7 @@
 
 struct measure_sel {
 	u32 test_vector;
-	struct clk *clk;
+	struct clk *c;
 };
 
 static struct measure_sel measure_mux[] = {
@@ -2538,17 +2538,17 @@
 	{ CLK_TEST_LS(0x3F), &usb_hs_clk.c },
 };
 
-static struct measure_sel *find_measure_sel(struct clk *clk)
+static struct measure_sel *find_measure_sel(struct clk *c)
 {
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(measure_mux); i++)
-		if (measure_mux[i].clk == clk)
+		if (measure_mux[i].c == c)
 			return &measure_mux[i];
 	return NULL;
 }
 
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
 {
 	struct measure_sel *p;
 	unsigned long flags;
@@ -2599,7 +2599,7 @@
 
 /* Perform a hardware rate measurement for a given clock.
    FOR DEBUG USE ONLY: Measurements take ~15 ms! */
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
 {
 	unsigned long flags;
 	u32 regval, prph_web_reg_old;
@@ -2647,12 +2647,12 @@
 	return ret;
 }
 #else /* !CONFIG_DEBUG_FS */
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
 {
 	return -EINVAL;
 }
 
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
 {
 	return 0;
 }
@@ -2670,14 +2670,14 @@
 };
 
 /* Implementation for clk_set_flags(). */
-int soc_clk_set_flags(struct clk *clk, unsigned clk_flags)
+int soc_clk_set_flags(struct clk *c, unsigned clk_flags)
 {
 	uint32_t regval, ret = 0;
 	unsigned long flags;
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
 
-	if (clk == &vfe_clk.c) {
+	if (c == &vfe_clk.c) {
 		regval = readl_relaxed(CAM_VFE_NS_REG);
 		/* Flag values chosen for backward compatibility
 		 * with proc_comm remote clock control. */
@@ -2701,17 +2701,15 @@
 	return ret;
 }
 
-static int msm7x30_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int msm7x30_clk_reset(struct clk *c, enum clk_reset_action action)
 {
 	/* reset_mask is actually a proc_comm id */
-	unsigned id = to_rcg_clk(clk)->b.reset_mask;
-	return pc_clk_reset(id, action);
+	return pc_clk_reset(to_rcg_clk(c)->b.reset_mask, action);
 }
 
-static int soc_branch_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int soc_branch_clk_reset(struct clk *c, enum clk_reset_action action)
 {
-	unsigned id = to_branch_clk(clk)->b.reset_mask;
-	return pc_clk_reset(id, action);
+	return pc_clk_reset(to_branch_clk(c)->b.reset_mask, action);
 }
 
 /*
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 5867eef..2846940 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -2858,9 +2858,9 @@
 	struct clk c;
 };
 
-static inline struct pix_rdi_clk *to_pix_rdi_clk(struct clk *clk)
+static inline struct pix_rdi_clk *to_pix_rdi_clk(struct clk *c)
 {
-	return container_of(clk, struct pix_rdi_clk, c);
+	return container_of(c, struct pix_rdi_clk, c);
 }
 
 static int pix_rdi_clk_set_rate(struct clk *c, unsigned long rate)
@@ -2868,7 +2868,7 @@
 	int ret, i;
 	u32 reg;
 	unsigned long flags;
-	struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
+	struct pix_rdi_clk *rdi = to_pix_rdi_clk(c);
 	struct clk **mux_map = pix_rdi_mux_map;
 
 	/*
@@ -2889,32 +2889,32 @@
 		goto err;
 	}
 	/* Keep the new source on when switching inputs of an enabled clock */
-	if (clk->enabled) {
-		clk_disable(mux_map[clk->cur_rate]);
+	if (rdi->enabled) {
+		clk_disable(mux_map[rdi->cur_rate]);
 		clk_enable(mux_map[rate]);
 	}
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
-	reg = readl_relaxed(clk->s2_reg);
-	reg &= ~clk->s2_mask;
-	reg |= rate == 2 ? clk->s2_mask : 0;
-	writel_relaxed(reg, clk->s2_reg);
+	reg = readl_relaxed(rdi->s2_reg);
+	reg &= ~rdi->s2_mask;
+	reg |= rate == 2 ? rdi->s2_mask : 0;
+	writel_relaxed(reg, rdi->s2_reg);
 	/*
 	 * Wait at least 6 cycles of slowest clock
 	 * for the glitch-free MUX to fully switch sources.
 	 */
 	mb();
 	udelay(1);
-	reg = readl_relaxed(clk->s_reg);
-	reg &= ~clk->s_mask;
-	reg |= rate == 1 ? clk->s_mask : 0;
-	writel_relaxed(reg, clk->s_reg);
+	reg = readl_relaxed(rdi->s_reg);
+	reg &= ~rdi->s_mask;
+	reg |= rate == 1 ? rdi->s_mask : 0;
+	writel_relaxed(reg, rdi->s_reg);
 	/*
 	 * Wait at least 6 cycles of slowest clock
 	 * for the glitch-free MUX to fully switch sources.
 	 */
 	mb();
 	udelay(1);
-	clk->cur_rate = rate;
+	rdi->cur_rate = rate;
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 err:
 	for (i--; i >= 0; i--)
@@ -2931,12 +2931,12 @@
 static int pix_rdi_clk_enable(struct clk *c)
 {
 	unsigned long flags;
-	struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
+	struct pix_rdi_clk *rdi = to_pix_rdi_clk(c);
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
-	__branch_clk_enable_reg(&clk->b, clk->c.dbg_name);
+	__branch_clk_enable_reg(&rdi->b, rdi->c.dbg_name);
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
-	clk->enabled = true;
+	rdi->enabled = true;
 
 	return 0;
 }
@@ -2944,24 +2944,22 @@
 static void pix_rdi_clk_disable(struct clk *c)
 {
 	unsigned long flags;
-	struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
+	struct pix_rdi_clk *rdi = to_pix_rdi_clk(c);
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
-	__branch_clk_disable_reg(&clk->b, clk->c.dbg_name);
+	__branch_clk_disable_reg(&rdi->b, rdi->c.dbg_name);
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
-	clk->enabled = false;
+	rdi->enabled = false;
 }
 
-static int pix_rdi_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int pix_rdi_clk_reset(struct clk *c, enum clk_reset_action action)
 {
-	return branch_reset(&to_pix_rdi_clk(clk)->b, action);
+	return branch_reset(&to_pix_rdi_clk(c)->b, action);
 }
 
 static struct clk *pix_rdi_clk_get_parent(struct clk *c)
 {
-	struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
-
-	return pix_rdi_mux_map[clk->cur_rate];
+	return pix_rdi_mux_map[to_pix_rdi_clk(c)->cur_rate];
 }
 
 static int pix_rdi_clk_list_rate(struct clk *c, unsigned n)
@@ -2974,17 +2972,17 @@
 static enum handoff pix_rdi_clk_handoff(struct clk *c)
 {
 	u32 reg;
-	struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
+	struct pix_rdi_clk *rdi = to_pix_rdi_clk(c);
 	enum handoff ret;
 
-	ret = branch_handoff(&clk->b, &clk->c);
+	ret = branch_handoff(&rdi->b, &rdi->c);
 	if (ret == HANDOFF_DISABLED_CLK)
 		return ret;
 
-	reg = readl_relaxed(clk->s_reg);
-	clk->cur_rate = reg & clk->s_mask ? 1 : 0;
-	reg = readl_relaxed(clk->s2_reg);
-	clk->cur_rate = reg & clk->s2_mask ? 2 : clk->cur_rate;
+	reg = readl_relaxed(rdi->s_reg);
+	rdi->cur_rate = reg & rdi->s_mask ? 1 : 0;
+	reg = readl_relaxed(rdi->s2_reg);
+	rdi->cur_rate = reg & rdi->s2_mask ? 2 : rdi->cur_rate;
 
 	return HANDOFF_ENABLED_CLK;
 }
@@ -3897,7 +3895,7 @@
 	},
 };
 
-static int hdmi_pll_clk_enable(struct clk *clk)
+static int hdmi_pll_clk_enable(struct clk *c)
 {
 	int ret;
 	unsigned long flags;
@@ -3907,7 +3905,7 @@
 	return ret;
 }
 
-static void hdmi_pll_clk_disable(struct clk *clk)
+static void hdmi_pll_clk_disable(struct clk *c)
 {
 	unsigned long flags;
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
@@ -3915,12 +3913,12 @@
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 }
 
-static unsigned long hdmi_pll_clk_get_rate(struct clk *clk)
+static unsigned long hdmi_pll_clk_get_rate(struct clk *c)
 {
 	return hdmi_pll_get_rate();
 }
 
-static struct clk *hdmi_pll_clk_get_parent(struct clk *clk)
+static struct clk *hdmi_pll_clk_get_parent(struct clk *c)
 {
 	return &pxo_clk.c;
 }
@@ -3975,12 +3973,12 @@
  * Unlike other clocks, the TV rate is adjusted through PLL
  * re-programming. It is also routed through an MND divider.
  */
-void set_rate_tv(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_tv(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
 {
 	unsigned long pll_rate = (unsigned long)nf->extra_freq_data;
 	if (pll_rate)
 		hdmi_pll_set_rate(pll_rate);
-	set_rate_mnd(clk, nf);
+	set_rate_mnd(rcg, nf);
 }
 
 static struct rcg_clk tv_src_clk = {
@@ -4616,7 +4614,7 @@
 #ifdef CONFIG_DEBUG_FS
 struct measure_sel {
 	u32 test_vector;
-	struct clk *clk;
+	struct clk *c;
 };
 
 static DEFINE_CLK_MEASURE(l2_m_clk);
@@ -4840,12 +4838,12 @@
 	{ TEST_CPUL2(0x5), &krait3_m_clk },
 };
 
-static struct measure_sel *find_measure_sel(struct clk *clk)
+static struct measure_sel *find_measure_sel(struct clk *c)
 {
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(measure_mux); i++)
-		if (measure_mux[i].clk == clk)
+		if (measure_mux[i].c == c)
 			return &measure_mux[i];
 	return NULL;
 }
@@ -4855,7 +4853,7 @@
 	int ret = 0;
 	u32 clk_sel;
 	struct measure_sel *p;
-	struct measure_clk *clk = to_measure_clk(c);
+	struct measure_clk *measure = to_measure_clk(c);
 	unsigned long flags;
 
 	if (!parent)
@@ -4871,9 +4869,9 @@
 	 * Program the test vector, measurement period (sample_ticks)
 	 * and scaling multiplier.
 	 */
-	clk->sample_ticks = 0x10000;
+	measure->sample_ticks = 0x10000;
 	clk_sel = p->test_vector & TEST_CLK_SEL_MASK;
-	clk->multiplier = 1;
+	measure->multiplier = 1;
 	switch (p->test_vector >> TEST_TYPE_SHIFT) {
 	case TEST_TYPE_PER_LS:
 		writel_relaxed(0x4030D00|BVAL(7, 0, clk_sel), CLK_TEST_REG);
@@ -4902,8 +4900,8 @@
 	case TEST_TYPE_CPUL2:
 		writel_relaxed(0x4030400, CLK_TEST_REG);
 		writel_relaxed(0x80|BVAL(5, 3, clk_sel), GCC_APCS_CLK_DIAG);
-		clk->sample_ticks = 0x4000;
-		clk->multiplier = 2;
+		measure->sample_ticks = 0x4000;
+		measure->multiplier = 2;
 		break;
 	default:
 		ret = -EPERM;
@@ -4946,7 +4944,7 @@
 	unsigned long flags;
 	u32 pdm_reg_backup, ringosc_reg_backup;
 	u64 raw_count_short, raw_count_full;
-	struct measure_clk *clk = to_measure_clk(c);
+	struct measure_clk *measure = to_measure_clk(c);
 	unsigned ret;
 
 	ret = clk_prepare_enable(&cxo_clk.c);
@@ -4973,7 +4971,7 @@
 	/* Run a short measurement. (~1 ms) */
 	raw_count_short = run_measurement(0x1000);
 	/* Run a full measurement. (~14 ms) */
-	raw_count_full = run_measurement(clk->sample_ticks);
+	raw_count_full = run_measurement(measure->sample_ticks);
 
 	writel_relaxed(ringosc_reg_backup, RINGOSC_NS_REG);
 	writel_relaxed(pdm_reg_backup, PDM_CLK_NS_REG);
@@ -4984,8 +4982,8 @@
 	else {
 		/* Compute rate in Hz. */
 		raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
-		do_div(raw_count_full, ((clk->sample_ticks * 10) + 35));
-		ret = (raw_count_full * clk->multiplier);
+		do_div(raw_count_full, ((measure->sample_ticks * 10) + 35));
+		ret = (raw_count_full * measure->multiplier);
 	}
 
 	/* Route dbg_hs_clk to PLLTEST.  300mV single-ended amplitude. */
@@ -4997,12 +4995,12 @@
 	return ret;
 }
 #else /* !CONFIG_DEBUG_FS */
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
 {
 	return -EINVAL;
 }
 
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
 {
 	return 0;
 }
@@ -5025,6 +5023,7 @@
 static struct clk_lookup msm_clocks_8064[] = {
 	CLK_LOOKUP("xo",		cxo_a_clk.c,	""),
 	CLK_LOOKUP("xo",		pxo_a_clk.c,	""),
+	CLK_LOOKUP("pwm_clk",		cxo_clk.c,	"0-0048"),
 	CLK_LOOKUP("cxo",		cxo_clk.c,	"wcnss_wlan.0"),
 	CLK_LOOKUP("cxo",		cxo_clk.c,	"pil_riva"),
 	CLK_LOOKUP("xo",		pxo_clk.c,	"pil_qdsp6v4.0"),
diff --git a/arch/arm/mach-msm/clock-8x60.c b/arch/arm/mach-msm/clock-8x60.c
index 74d71a2..da7dca7 100644
--- a/arch/arm/mach-msm/clock-8x60.c
+++ b/arch/arm/mach-msm/clock-8x60.c
@@ -341,24 +341,24 @@
 	},
 };
 
-static int pll4_clk_enable(struct clk *clk)
+static int pll4_clk_enable(struct clk *c)
 {
 	struct msm_rpm_iv_pair iv = { MSM_RPM_ID_PLL_4, 1 };
 	return msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
 }
 
-static void pll4_clk_disable(struct clk *clk)
+static void pll4_clk_disable(struct clk *c)
 {
 	struct msm_rpm_iv_pair iv = { MSM_RPM_ID_PLL_4, 0 };
 	msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
 }
 
-static struct clk *pll4_clk_get_parent(struct clk *clk)
+static struct clk *pll4_clk_get_parent(struct clk *c)
 {
 	return &pxo_clk.c;
 }
 
-static bool pll4_clk_is_local(struct clk *clk)
+static bool pll4_clk_is_local(struct clk *c)
 {
 	return false;
 }
@@ -397,7 +397,7 @@
 
 /* Unlike other clocks, the TV rate is adjusted through PLL
  * re-programming. It is also routed through an MND divider. */
-static void set_rate_tv(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+static void set_rate_tv(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
 {
 	struct pll_rate *rate = nf->extra_freq_data;
 	uint32_t pll_mode, pll_config, misc_cc2;
@@ -426,7 +426,7 @@
 	writel_relaxed(pll_config, MM_PLL2_CONFIG_REG);
 
 	/* Configure MND. */
-	set_rate_mnd(clk, nf);
+	set_rate_mnd(rcg, nf);
 
 	/* Configure hdmi_ref_clk to be equal to the TV clock rate. */
 	misc_cc2 = readl_relaxed(MISC_CC2_REG);
@@ -3133,7 +3133,7 @@
 #ifdef CONFIG_DEBUG_FS
 struct measure_sel {
 	u32 test_vector;
-	struct clk *clk;
+	struct clk *c;
 };
 
 static struct measure_sel measure_mux[] = {
@@ -3308,12 +3308,12 @@
 	{ TEST_SC(0x42), &l2_m_clk },
 };
 
-static struct measure_sel *find_measure_sel(struct clk *clk)
+static struct measure_sel *find_measure_sel(struct clk *c)
 {
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(measure_mux); i++)
-		if (measure_mux[i].clk == clk)
+		if (measure_mux[i].c == c)
 			return &measure_mux[i];
 	return NULL;
 }
@@ -3323,7 +3323,7 @@
 	int ret = 0;
 	u32 clk_sel;
 	struct measure_sel *p;
-	struct measure_clk *clk = to_measure_clk(c);
+	struct measure_clk *measure = to_measure_clk(c);
 	unsigned long flags;
 
 	if (!parent)
@@ -3340,9 +3340,9 @@
 	 * and scaling factors (multiplier, divider).
 	 */
 	clk_sel = p->test_vector & TEST_CLK_SEL_MASK;
-	clk->sample_ticks = 0x10000;
-	clk->multiplier = 1;
-	clk->divider = 1;
+	measure->sample_ticks = 0x10000;
+	measure->multiplier = 1;
+	measure->divider = 1;
 	switch (p->test_vector >> TEST_TYPE_SHIFT) {
 	case TEST_TYPE_PER_LS:
 		writel_relaxed(0x4030D00|BVAL(7, 0, clk_sel), CLK_TEST_REG);
@@ -3355,7 +3355,7 @@
 		writel_relaxed(BVAL(6, 1, clk_sel)|BIT(0), DBG_CFG_REG_LS_REG);
 		break;
 	case TEST_TYPE_MM_HS2X:
-		clk->divider = 2;
+		measure->divider = 2;
 	case TEST_TYPE_MM_HS:
 		writel_relaxed(0x402B800, CLK_TEST_REG);
 		writel_relaxed(BVAL(6, 1, clk_sel)|BIT(0), DBG_CFG_REG_HS_REG);
@@ -3367,8 +3367,8 @@
 		break;
 	case TEST_TYPE_SC:
 		writel_relaxed(0x5020000|BVAL(16, 10, clk_sel), CLK_TEST_REG);
-		clk->sample_ticks = 0x4000;
-		clk->multiplier = 2;
+		measure->sample_ticks = 0x4000;
+		measure->multiplier = 2;
 		break;
 	default:
 		ret = -EPERM;
@@ -3410,7 +3410,7 @@
 	unsigned long flags;
 	u32 pdm_reg_backup, ringosc_reg_backup;
 	u64 raw_count_short, raw_count_full;
-	struct measure_clk *clk = to_measure_clk(c);
+	struct measure_clk *measure = to_measure_clk(c);
 	unsigned ret;
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
@@ -3431,7 +3431,7 @@
 	/* Run a short measurement. (~1 ms) */
 	raw_count_short = run_measurement(0x1000);
 	/* Run a full measurement. (~14 ms) */
-	raw_count_full = run_measurement(clk->sample_ticks);
+	raw_count_full = run_measurement(measure->sample_ticks);
 
 	writel_relaxed(ringosc_reg_backup, RINGOSC_NS_REG);
 	writel_relaxed(pdm_reg_backup, PDM_CLK_NS_REG);
@@ -3442,9 +3442,9 @@
 	else {
 		/* Compute rate in Hz. */
 		raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
-		do_div(raw_count_full,
-		       (((clk->sample_ticks * 10) + 35) * clk->divider));
-		ret = (raw_count_full * clk->multiplier);
+		do_div(raw_count_full, (((measure->sample_ticks * 10) + 35)
+			* measure->divider));
+		ret = (raw_count_full * measure->multiplier);
 	}
 
 	/* Route dbg_hs_clk to PLLTEST.  300mV single-ended amplitude. */
@@ -3454,12 +3454,12 @@
 	return ret;
 }
 #else /* !CONFIG_DEBUG_FS */
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
 {
 	return -EINVAL;
 }
 
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
 {
 	return 0;
 }
diff --git a/arch/arm/mach-msm/clock-9615.c b/arch/arm/mach-msm/clock-9615.c
index a2e0bc9..f7ccb35 100644
--- a/arch/arm/mach-msm/clock-9615.c
+++ b/arch/arm/mach-msm/clock-9615.c
@@ -216,33 +216,33 @@
 
 static DEFINE_SPINLOCK(soft_vote_lock);
 
-static int pll_acpu_vote_clk_enable(struct clk *clk)
+static int pll_acpu_vote_clk_enable(struct clk *c)
 {
 	int ret = 0;
 	unsigned long flags;
-	struct pll_vote_clk *pll = to_pll_vote_clk(clk);
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
 
 	spin_lock_irqsave(&soft_vote_lock, flags);
 
-	if (!*pll->soft_vote)
-		ret = pll_vote_clk_enable(clk);
+	if (!*pllv->soft_vote)
+		ret = pll_vote_clk_enable(c);
 	if (ret == 0)
-		*pll->soft_vote |= (pll->soft_vote_mask);
+		*pllv->soft_vote |= (pllv->soft_vote_mask);
 
 	spin_unlock_irqrestore(&soft_vote_lock, flags);
 	return ret;
 }
 
-static void pll_acpu_vote_clk_disable(struct clk *clk)
+static void pll_acpu_vote_clk_disable(struct clk *c)
 {
 	unsigned long flags;
-	struct pll_vote_clk *pll = to_pll_vote_clk(clk);
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
 
 	spin_lock_irqsave(&soft_vote_lock, flags);
 
-	*pll->soft_vote &= ~(pll->soft_vote_mask);
-	if (!*pll->soft_vote)
-		pll_vote_clk_disable(clk);
+	*pllv->soft_vote &= ~(pllv->soft_vote_mask);
+	if (!*pllv->soft_vote)
+		pll_vote_clk_disable(c);
 
 	spin_unlock_irqrestore(&soft_vote_lock, flags);
 }
@@ -1376,7 +1376,7 @@
 #ifdef CONFIG_DEBUG_FS
 struct measure_sel {
 	u32 test_vector;
-	struct clk *clk;
+	struct clk *c;
 };
 
 static DEFINE_CLK_MEASURE(q6sw_clk);
@@ -1447,12 +1447,12 @@
 	{ TEST_LPA_HS(0x00), &q6_func_clk },
 };
 
-static struct measure_sel *find_measure_sel(struct clk *clk)
+static struct measure_sel *find_measure_sel(struct clk *c)
 {
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(measure_mux); i++)
-		if (measure_mux[i].clk == clk)
+		if (measure_mux[i].c == c)
 			return &measure_mux[i];
 	return NULL;
 }
@@ -1462,7 +1462,7 @@
 	int ret = 0;
 	u32 clk_sel;
 	struct measure_sel *p;
-	struct measure_clk *clk = to_measure_clk(c);
+	struct measure_clk *measure = to_measure_clk(c);
 	unsigned long flags;
 
 	if (!parent)
@@ -1478,9 +1478,9 @@
 	 * Program the test vector, measurement period (sample_ticks)
 	 * and scaling multiplier.
 	 */
-	clk->sample_ticks = 0x10000;
+	measure->sample_ticks = 0x10000;
 	clk_sel = p->test_vector & TEST_CLK_SEL_MASK;
-	clk->multiplier = 1;
+	measure->multiplier = 1;
 	switch (p->test_vector >> TEST_TYPE_SHIFT) {
 	case TEST_TYPE_PER_LS:
 		writel_relaxed(0x4030D00|BVAL(7, 0, clk_sel), CLK_TEST_REG);
@@ -1539,7 +1539,7 @@
 	unsigned long flags;
 	u32 pdm_reg_backup, ringosc_reg_backup;
 	u64 raw_count_short, raw_count_full;
-	struct measure_clk *clk = to_measure_clk(c);
+	struct measure_clk *measure = to_measure_clk(c);
 	unsigned ret;
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
@@ -1560,7 +1560,7 @@
 	/* Run a short measurement. (~1 ms) */
 	raw_count_short = run_measurement(0x1000);
 	/* Run a full measurement. (~14 ms) */
-	raw_count_full = run_measurement(clk->sample_ticks);
+	raw_count_full = run_measurement(measure->sample_ticks);
 
 	writel_relaxed(ringosc_reg_backup, RINGOSC_NS_REG);
 	writel_relaxed(pdm_reg_backup, PDM_CLK_NS_REG);
@@ -1571,8 +1571,8 @@
 	else {
 		/* Compute rate in Hz. */
 		raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
-		do_div(raw_count_full, ((clk->sample_ticks * 10) + 35));
-		ret = (raw_count_full * clk->multiplier);
+		do_div(raw_count_full, ((measure->sample_ticks * 10) + 35));
+		ret = (raw_count_full * measure->multiplier);
 	}
 
 	/* Route dbg_hs_clk to PLLTEST.  300mV single-ended amplitude. */
@@ -1582,12 +1582,12 @@
 	return ret;
 }
 #else /* !CONFIG_DEBUG_FS */
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
 {
 	return -EINVAL;
 }
 
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
 {
 	return 0;
 }
diff --git a/arch/arm/mach-msm/clock-copper.c b/arch/arm/mach-msm/clock-copper.c
index 61706dc..2dadc4c 100644
--- a/arch/arm/mach-msm/clock-copper.c
+++ b/arch/arm/mach-msm/clock-copper.c
@@ -588,39 +588,29 @@
 
 static DEFINE_VDD_CLASS(vdd_dig, set_vdd_dig);
 
-static int cxo_clk_enable(struct clk *clk)
-{
-	/* TODO: Remove from here once the rpm xo clock is ready. */
-	return 0;
-}
+#define RPM_MISC_CLK_TYPE 0x306b6c63
+#define RPM_BUS_CLK_TYPE  0x316b6c63
+#define RPM_MEM_CLK_TYPE  0x326b6c63
 
-static void cxo_clk_disable(struct clk *clk)
-{
-	/* TODO: Remove from here once the rpm xo clock is ready. */
-	return;
-}
+#define CXO_ID		0x0
 
-static enum handoff cxo_clk_handoff(struct clk *clk)
-{
-	/* TODO: Remove from here once the rpm xo clock is ready. */
-	return HANDOFF_ENABLED_CLK;
-}
+#define PNOC_ID		0x0
+#define SNOC_ID		0x1
+#define CNOC_ID		0x2
 
-static struct clk_ops clk_ops_cxo = {
-	.enable = cxo_clk_enable,
-	.disable = cxo_clk_disable,
-	.handoff = cxo_clk_handoff,
-};
+#define BIMC_ID		0x0
+#define OCMEM_ID	0x1
 
-static struct fixed_clk cxo_clk_src = {
-	.c = {
-		.rate = 19200000,
-		.dbg_name = "cxo_clk_src",
-		.ops = &clk_ops_cxo,
-		.warned = true,
-		CLK_INIT(cxo_clk_src.c),
-	},
-};
+DEFINE_CLK_RPM_SMD(pnoc_clk, pnoc_a_clk, RPM_BUS_CLK_TYPE, PNOC_ID, NULL);
+DEFINE_CLK_RPM_SMD(snoc_clk, snoc_a_clk, RPM_BUS_CLK_TYPE, SNOC_ID, NULL);
+DEFINE_CLK_RPM_SMD(cnoc_clk, cnoc_a_clk, RPM_BUS_CLK_TYPE, CNOC_ID, NULL);
+
+DEFINE_CLK_RPM_SMD(bimc_clk, bimc_a_clk, RPM_MEM_CLK_TYPE, BIMC_ID, NULL);
+DEFINE_CLK_RPM_SMD(ocmemgx_clk, ocmemgx_a_clk, RPM_MEM_CLK_TYPE, OCMEM_ID,
+			NULL);
+
+DEFINE_CLK_RPM_SMD_BRANCH(cxo_clk_src, cxo_a_clk_src,
+				RPM_MISC_CLK_TYPE, CXO_ID, 19200000);
 
 static struct pll_vote_clk gpll0_clk_src = {
 	.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE_REG,
@@ -715,24 +705,6 @@
 	},
 };
 
-#define RPM_BUS_CLK_TYPE  0x316b6c63
-#define RPM_MEM_CLK_TYPE  0x326b6c63
-
-#define PNOC_ID		0x0
-#define SNOC_ID		0x1
-#define CNOC_ID		0x2
-
-#define BIMC_ID		0x0
-#define OCMEM_ID	0x1
-
-DEFINE_CLK_RPM_SMD(pnoc_clk, pnoc_a_clk, RPM_BUS_CLK_TYPE, PNOC_ID, NULL);
-DEFINE_CLK_RPM_SMD(snoc_clk, snoc_a_clk, RPM_BUS_CLK_TYPE, SNOC_ID, NULL);
-DEFINE_CLK_RPM_SMD(cnoc_clk, cnoc_a_clk, RPM_BUS_CLK_TYPE, CNOC_ID, NULL);
-
-DEFINE_CLK_RPM_SMD(bimc_clk, bimc_a_clk, RPM_MEM_CLK_TYPE, BIMC_ID, NULL);
-DEFINE_CLK_RPM_SMD(ocmemgx_clk, ocmemgx_a_clk, RPM_MEM_CLK_TYPE, OCMEM_ID,
-			NULL);
-
 static DEFINE_CLK_VOTER(pnoc_msmbus_clk, &pnoc_clk.c, LONG_MAX);
 static DEFINE_CLK_VOTER(snoc_msmbus_clk, &snoc_clk.c, LONG_MAX);
 static DEFINE_CLK_VOTER(cnoc_msmbus_clk, &cnoc_clk.c, LONG_MAX);
@@ -4706,10 +4678,10 @@
 	CLK_LOOKUP("core_clk", mdss_esc1_clk.c, ""),
 	CLK_LOOKUP("iface_clk", mdss_hdmi_ahb_clk.c, ""),
 	CLK_LOOKUP("core_clk", mdss_hdmi_clk.c, ""),
-	CLK_LOOKUP("core_clk", mdss_mdp_clk.c, ""),
-	CLK_LOOKUP("core_clk", mdss_mdp_lut_clk.c, ""),
-	CLK_LOOKUP("core_clk", mdp_clk_src.c, ""),
-	CLK_LOOKUP("core_clk", mdss_vsync_clk.c, ""),
+	CLK_LOOKUP("core_clk", mdss_mdp_clk.c, "mdp.0"),
+	CLK_LOOKUP("lut_clk", mdss_mdp_lut_clk.c, "mdp.0"),
+	CLK_LOOKUP("core_clk_src", mdp_clk_src.c, "mdp.0"),
+	CLK_LOOKUP("vsync_clk", mdss_vsync_clk.c, "mdp.0"),
 	CLK_LOOKUP("iface_clk", camss_cci_cci_ahb_clk.c, ""),
 	CLK_LOOKUP("core_clk", camss_cci_cci_clk.c, ""),
 	CLK_LOOKUP("iface_clk", camss_csi0_ahb_clk.c, ""),
@@ -4768,9 +4740,10 @@
 	CLK_LOOKUP("iface_clk", camss_vfe_vfe_ahb_clk.c, ""),
 	CLK_LOOKUP("bus_clk", camss_vfe_vfe_axi_clk.c, ""),
 	CLK_LOOKUP("bus_clk", camss_vfe_vfe_ocmemnoc_clk.c, ""),
+	CLK_LOOKUP("iface_clk", mdss_ahb_clk.c, "mdp.0"),
 	CLK_LOOKUP("iface_clk", mdss_ahb_clk.c, "fd928000.qcom,iommu"),
 	CLK_LOOKUP("core_clk", mdss_axi_clk.c, "fd928000.qcom,iommu"),
-	CLK_LOOKUP("bus_clk", mdss_axi_clk.c, ""),
+	CLK_LOOKUP("bus_clk", mdss_axi_clk.c, "mdp.0"),
 	CLK_LOOKUP("core_clk", oxili_gfx3d_clk.c, ""),
 	CLK_LOOKUP("iface_clk", oxilicx_ahb_clk.c, ""),
 	CLK_LOOKUP("bus_clk", oxilicx_axi_clk.c, ""),
diff --git a/arch/arm/mach-msm/clock-dss-8960.c b/arch/arm/mach-msm/clock-dss-8960.c
index 7f3646f..49b6cf7 100644
--- a/arch/arm/mach-msm/clock-dss-8960.c
+++ b/arch/arm/mach-msm/clock-dss-8960.c
@@ -98,6 +98,7 @@
 	unsigned int val;
 	u32 ahb_en_reg, ahb_enabled;
 	unsigned int timeout_count;
+	int pll_lock_retry = 10;
 
 	ahb_en_reg = readl_relaxed(AHB_EN_REG);
 	ahb_enabled = ahb_en_reg & BIT(4);
@@ -149,7 +150,7 @@
 
 	timeout_count = 1000;
 	while (!(readl_relaxed(HDMI_PHY_PLL_STATUS0) & BIT(0)) &&
-			timeout_count) {
+			timeout_count && pll_lock_retry) {
 		if (--timeout_count == 0) {
 			/*
 			 * PLL has still not locked.
@@ -166,16 +167,18 @@
 			udelay(10);
 			writel_relaxed(0x0D, HDMI_PHY_PLL_LOCKDET_CFG2);
 			timeout_count = 1000;
-
-			pr_err("%s: PLL not locked after %d iterations\n",
-				__func__, timeout_count);
-			pr_err("%s: Asserting PLL S/W reset & trying again\n",
-				__func__);
+			pll_lock_retry--;
 		}
 	}
 
 	if (!ahb_enabled)
 		writel_relaxed(ahb_en_reg & ~BIT(4), AHB_EN_REG);
+
+	if (!pll_lock_retry) {
+		pr_err("%s: HDMI PLL not locked\n", __func__);
+		return -EAGAIN;
+	}
+
 	hdmi_pll_on = 1;
 	return 0;
 }
diff --git a/arch/arm/mach-msm/clock-local.c b/arch/arm/mach-msm/clock-local.c
index b5ae4ab..8222e87 100644
--- a/arch/arm/mach-msm/clock-local.c
+++ b/arch/arm/mach-msm/clock-local.c
@@ -53,32 +53,32 @@
  */
 
 /* For clocks with MND dividers. */
-void set_rate_mnd(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
 {
 	uint32_t ns_reg_val, ctl_reg_val;
 
 	/* Assert MND reset. */
-	ns_reg_val = readl_relaxed(clk->ns_reg);
+	ns_reg_val = readl_relaxed(rcg->ns_reg);
 	ns_reg_val |= BIT(7);
-	writel_relaxed(ns_reg_val, clk->ns_reg);
+	writel_relaxed(ns_reg_val, rcg->ns_reg);
 
 	/* Program M and D values. */
-	writel_relaxed(nf->md_val, clk->md_reg);
+	writel_relaxed(nf->md_val, rcg->md_reg);
 
 	/* If the clock has a separate CC register, program it. */
-	if (clk->ns_reg != clk->b.ctl_reg) {
-		ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
-		ctl_reg_val &= ~(clk->ctl_mask);
+	if (rcg->ns_reg != rcg->b.ctl_reg) {
+		ctl_reg_val = readl_relaxed(rcg->b.ctl_reg);
+		ctl_reg_val &= ~(rcg->ctl_mask);
 		ctl_reg_val |= nf->ctl_val;
-		writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+		writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
 	}
 
 	/* Deassert MND reset. */
 	ns_reg_val &= ~BIT(7);
-	writel_relaxed(ns_reg_val, clk->ns_reg);
+	writel_relaxed(ns_reg_val, rcg->ns_reg);
 }
 
-void set_rate_nop(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_nop(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
 {
 	/*
 	 * Nothing to do for fixed-rate or integer-divider clocks. Any settings
@@ -88,31 +88,31 @@
 	 */
 }
 
-void set_rate_mnd_8(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_mnd_8(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
 {
 	uint32_t ctl_reg_val;
 
 	/* Assert MND reset. */
-	ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
+	ctl_reg_val = readl_relaxed(rcg->b.ctl_reg);
 	ctl_reg_val |= BIT(8);
-	writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+	writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
 
 	/* Program M and D values. */
-	writel_relaxed(nf->md_val, clk->md_reg);
+	writel_relaxed(nf->md_val, rcg->md_reg);
 
 	/* Program MN counter Enable and Mode. */
-	ctl_reg_val &= ~(clk->ctl_mask);
+	ctl_reg_val &= ~(rcg->ctl_mask);
 	ctl_reg_val |= nf->ctl_val;
-	writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+	writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
 
 	/* Deassert MND reset. */
 	ctl_reg_val &= ~BIT(8);
-	writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+	writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
 }
 
-void set_rate_mnd_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_mnd_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
 {
-	struct bank_masks *banks = clk->bank_info;
+	struct bank_masks *banks = rcg->bank_info;
 	const struct bank_mask_info *new_bank_masks;
 	const struct bank_mask_info *old_bank_masks;
 	uint32_t ns_reg_val, ctl_reg_val;
@@ -123,10 +123,10 @@
 	 * off, program the active bank since bank switching won't work if
 	 * both banks aren't running.
 	 */
-	ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
+	ctl_reg_val = readl_relaxed(rcg->b.ctl_reg);
 	bank_sel = !!(ctl_reg_val & banks->bank_sel_mask);
 	 /* If clock isn't running, don't switch banks. */
-	bank_sel ^= (!clk->enabled || clk->current_freq->freq_hz == 0);
+	bank_sel ^= (!rcg->enabled || rcg->current_freq->freq_hz == 0);
 	if (bank_sel == 0) {
 		new_bank_masks = &banks->bank1_mask;
 		old_bank_masks = &banks->bank0_mask;
@@ -135,46 +135,46 @@
 		old_bank_masks = &banks->bank1_mask;
 	}
 
-	ns_reg_val = readl_relaxed(clk->ns_reg);
+	ns_reg_val = readl_relaxed(rcg->ns_reg);
 
 	/* Assert bank MND reset. */
 	ns_reg_val |= new_bank_masks->rst_mask;
-	writel_relaxed(ns_reg_val, clk->ns_reg);
+	writel_relaxed(ns_reg_val, rcg->ns_reg);
 
 	/*
 	 * Program NS only if the clock is enabled, since the NS will be set
 	 * as part of the enable procedure and should remain with a low-power
 	 * MUX input selected until then.
 	 */
-	if (clk->enabled) {
+	if (rcg->enabled) {
 		ns_reg_val &= ~(new_bank_masks->ns_mask);
 		ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask);
-		writel_relaxed(ns_reg_val, clk->ns_reg);
+		writel_relaxed(ns_reg_val, rcg->ns_reg);
 	}
 
 	writel_relaxed(nf->md_val, new_bank_masks->md_reg);
 
 	/* Enable counter only if clock is enabled. */
-	if (clk->enabled)
+	if (rcg->enabled)
 		ctl_reg_val |= new_bank_masks->mnd_en_mask;
 	else
 		ctl_reg_val &= ~(new_bank_masks->mnd_en_mask);
 
 	ctl_reg_val &= ~(new_bank_masks->mode_mask);
 	ctl_reg_val |= (nf->ctl_val & new_bank_masks->mode_mask);
-	writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+	writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
 
 	/* Deassert bank MND reset. */
 	ns_reg_val &= ~(new_bank_masks->rst_mask);
-	writel_relaxed(ns_reg_val, clk->ns_reg);
+	writel_relaxed(ns_reg_val, rcg->ns_reg);
 
 	/*
 	 * Switch to the new bank if clock is running.  If it isn't, then
 	 * no switch is necessary since we programmed the active bank.
 	 */
-	if (clk->enabled && clk->current_freq->freq_hz) {
+	if (rcg->enabled && rcg->current_freq->freq_hz) {
 		ctl_reg_val ^= banks->bank_sel_mask;
-		writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+		writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
 		/*
 		 * Wait at least 6 cycles of slowest bank's clock
 		 * for the glitch-free MUX to fully switch sources.
@@ -184,22 +184,22 @@
 
 		/* Disable old bank's MN counter. */
 		ctl_reg_val &= ~(old_bank_masks->mnd_en_mask);
-		writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+		writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
 
 		/* Program old bank to a low-power source and divider. */
 		ns_reg_val &= ~(old_bank_masks->ns_mask);
-		ns_reg_val |= (clk->freq_tbl->ns_val & old_bank_masks->ns_mask);
-		writel_relaxed(ns_reg_val, clk->ns_reg);
+		ns_reg_val |= (rcg->freq_tbl->ns_val & old_bank_masks->ns_mask);
+		writel_relaxed(ns_reg_val, rcg->ns_reg);
 	}
 
 	/* Update the MND_EN and NS masks to match the current bank. */
-	clk->mnd_en_mask = new_bank_masks->mnd_en_mask;
-	clk->ns_mask = new_bank_masks->ns_mask;
+	rcg->mnd_en_mask = new_bank_masks->mnd_en_mask;
+	rcg->ns_mask = new_bank_masks->ns_mask;
 }
 
-void set_rate_div_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_div_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
 {
-	struct bank_masks *banks = clk->bank_info;
+	struct bank_masks *banks = rcg->bank_info;
 	const struct bank_mask_info *new_bank_masks;
 	const struct bank_mask_info *old_bank_masks;
 	uint32_t ns_reg_val, bank_sel;
@@ -209,10 +209,10 @@
 	 * off, program the active bank since bank switching won't work if
 	 * both banks aren't running.
 	 */
-	ns_reg_val = readl_relaxed(clk->ns_reg);
+	ns_reg_val = readl_relaxed(rcg->ns_reg);
 	bank_sel = !!(ns_reg_val & banks->bank_sel_mask);
 	 /* If clock isn't running, don't switch banks. */
-	bank_sel ^= (!clk->enabled || clk->current_freq->freq_hz == 0);
+	bank_sel ^= (!rcg->enabled || rcg->current_freq->freq_hz == 0);
 	if (bank_sel == 0) {
 		new_bank_masks = &banks->bank1_mask;
 		old_bank_masks = &banks->bank0_mask;
@@ -226,19 +226,19 @@
 	 * as part of the enable procedure and should remain with a low-power
 	 * MUX input selected until then.
 	 */
-	if (clk->enabled) {
+	if (rcg->enabled) {
 		ns_reg_val &= ~(new_bank_masks->ns_mask);
 		ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask);
-		writel_relaxed(ns_reg_val, clk->ns_reg);
+		writel_relaxed(ns_reg_val, rcg->ns_reg);
 	}
 
 	/*
 	 * Switch to the new bank if clock is running.  If it isn't, then
 	 * no switch is necessary since we programmed the active bank.
 	 */
-	if (clk->enabled && clk->current_freq->freq_hz) {
+	if (rcg->enabled && rcg->current_freq->freq_hz) {
 		ns_reg_val ^= banks->bank_sel_mask;
-		writel_relaxed(ns_reg_val, clk->ns_reg);
+		writel_relaxed(ns_reg_val, rcg->ns_reg);
 		/*
 		 * Wait at least 6 cycles of slowest bank's clock
 		 * for the glitch-free MUX to fully switch sources.
@@ -248,12 +248,12 @@
 
 		/* Program old bank to a low-power source and divider. */
 		ns_reg_val &= ~(old_bank_masks->ns_mask);
-		ns_reg_val |= (clk->freq_tbl->ns_val & old_bank_masks->ns_mask);
-		writel_relaxed(ns_reg_val, clk->ns_reg);
+		ns_reg_val |= (rcg->freq_tbl->ns_val & old_bank_masks->ns_mask);
+		writel_relaxed(ns_reg_val, rcg->ns_reg);
 	}
 
 	/* Update the NS mask to match the current bank. */
-	clk->ns_mask = new_bank_masks->ns_mask;
+	rcg->ns_mask = new_bank_masks->ns_mask;
 }
 
 /*
@@ -261,10 +261,10 @@
  */
 
 /* Return non-zero if a clock status registers shows the clock is halted. */
-static int branch_clk_is_halted(const struct branch *clk)
+static int branch_clk_is_halted(const struct branch *b)
 {
-	int invert = (clk->halt_check == ENABLE);
-	int status_bit = readl_relaxed(clk->halt_reg) & BIT(clk->halt_bit);
+	int invert = (b->halt_check == ENABLE);
+	int status_bit = readl_relaxed(b->halt_reg) & BIT(b->halt_bit);
 	return invert ? !status_bit : status_bit;
 }
 
@@ -276,14 +276,14 @@
 	return !!(readl_relaxed(b->hwcg_reg) & b->hwcg_mask);
 }
 
-void __branch_clk_enable_reg(const struct branch *clk, const char *name)
+void __branch_clk_enable_reg(const struct branch *b, const char *name)
 {
 	u32 reg_val;
 
-	if (clk->en_mask) {
-		reg_val = readl_relaxed(clk->ctl_reg);
-		reg_val |= clk->en_mask;
-		writel_relaxed(reg_val, clk->ctl_reg);
+	if (b->en_mask) {
+		reg_val = readl_relaxed(b->ctl_reg);
+		reg_val |= b->en_mask;
+		writel_relaxed(reg_val, b->ctl_reg);
 	}
 
 	/*
@@ -295,19 +295,19 @@
 	mb();
 
 	/* Skip checking halt bit if the clock is in hardware gated mode */
-	if (branch_in_hwcg_mode(clk))
+	if (branch_in_hwcg_mode(b))
 		return;
 
 	/* Wait for clock to enable before returning. */
-	if (clk->halt_check == DELAY)
+	if (b->halt_check == DELAY) {
 		udelay(HALT_CHECK_DELAY_US);
-	else if (clk->halt_check == ENABLE || clk->halt_check == HALT
-			|| clk->halt_check == ENABLE_VOTED
-			|| clk->halt_check == HALT_VOTED) {
+	} else if (b->halt_check == ENABLE || b->halt_check == HALT
+			|| b->halt_check == ENABLE_VOTED
+			|| b->halt_check == HALT_VOTED) {
 		int count;
 
 		/* Wait up to HALT_CHECK_MAX_LOOPS for clock to enable. */
-		for (count = HALT_CHECK_MAX_LOOPS; branch_clk_is_halted(clk)
+		for (count = HALT_CHECK_MAX_LOOPS; branch_clk_is_halted(b)
 					&& count > 0; count--)
 			udelay(1);
 		WARN(count == 0, "%s status stuck at 'off'", name);
@@ -315,50 +315,50 @@
 }
 
 /* Perform any register operations required to enable the clock. */
-static void __rcg_clk_enable_reg(struct rcg_clk *clk)
+static void __rcg_clk_enable_reg(struct rcg_clk *rcg)
 {
 	u32 reg_val;
-	void __iomem *const reg = clk->b.ctl_reg;
+	void __iomem *const reg = rcg->b.ctl_reg;
 
-	WARN(clk->current_freq == &rcg_dummy_freq,
+	WARN(rcg->current_freq == &rcg_dummy_freq,
 		"Attempting to enable %s before setting its rate. "
-		"Set the rate first!\n", clk->c.dbg_name);
+		"Set the rate first!\n", rcg->c.dbg_name);
 
 	/*
 	 * Program the NS register, if applicable. NS registers are not
 	 * set in the set_rate path because power can be saved by deferring
 	 * the selection of a clocked source until the clock is enabled.
 	 */
-	if (clk->ns_mask) {
-		reg_val = readl_relaxed(clk->ns_reg);
-		reg_val &= ~(clk->ns_mask);
-		reg_val |= (clk->current_freq->ns_val & clk->ns_mask);
-		writel_relaxed(reg_val, clk->ns_reg);
+	if (rcg->ns_mask) {
+		reg_val = readl_relaxed(rcg->ns_reg);
+		reg_val &= ~(rcg->ns_mask);
+		reg_val |= (rcg->current_freq->ns_val & rcg->ns_mask);
+		writel_relaxed(reg_val, rcg->ns_reg);
 	}
 
 	/* Enable MN counter, if applicable. */
 	reg_val = readl_relaxed(reg);
-	if (clk->current_freq->md_val) {
-		reg_val |= clk->mnd_en_mask;
+	if (rcg->current_freq->md_val) {
+		reg_val |= rcg->mnd_en_mask;
 		writel_relaxed(reg_val, reg);
 	}
 	/* Enable root. */
-	if (clk->root_en_mask) {
-		reg_val |= clk->root_en_mask;
+	if (rcg->root_en_mask) {
+		reg_val |= rcg->root_en_mask;
 		writel_relaxed(reg_val, reg);
 	}
-	__branch_clk_enable_reg(&clk->b, clk->c.dbg_name);
+	__branch_clk_enable_reg(&rcg->b, rcg->c.dbg_name);
 }
 
 /* Perform any register operations required to disable the branch. */
-u32 __branch_clk_disable_reg(const struct branch *clk, const char *name)
+u32 __branch_clk_disable_reg(const struct branch *b, const char *name)
 {
 	u32 reg_val;
 
-	reg_val = readl_relaxed(clk->ctl_reg);
-	if (clk->en_mask) {
-		reg_val &= ~(clk->en_mask);
-		writel_relaxed(reg_val, clk->ctl_reg);
+	reg_val = readl_relaxed(b->ctl_reg);
+	if (b->en_mask) {
+		reg_val &= ~(b->en_mask);
+		writel_relaxed(reg_val, b->ctl_reg);
 	}
 
 	/*
@@ -370,18 +370,18 @@
 	mb();
 
 	/* Skip checking halt bit if the clock is in hardware gated mode */
-	if (branch_in_hwcg_mode(clk))
+	if (branch_in_hwcg_mode(b))
 		return reg_val;
 
 	/* Wait for clock to disable before continuing. */
-	if (clk->halt_check == DELAY || clk->halt_check == ENABLE_VOTED
-				     || clk->halt_check == HALT_VOTED)
+	if (b->halt_check == DELAY || b->halt_check == ENABLE_VOTED
+				   || b->halt_check == HALT_VOTED) {
 		udelay(HALT_CHECK_DELAY_US);
-	else if (clk->halt_check == ENABLE || clk->halt_check == HALT) {
+	} else if (b->halt_check == ENABLE || b->halt_check == HALT) {
 		int count;
 
 		/* Wait up to HALT_CHECK_MAX_LOOPS for clock to disable. */
-		for (count = HALT_CHECK_MAX_LOOPS; !branch_clk_is_halted(clk)
+		for (count = HALT_CHECK_MAX_LOOPS; !branch_clk_is_halted(b)
 					&& count > 0; count--)
 			udelay(1);
 		WARN(count == 0, "%s status stuck at 'on'", name);
@@ -391,31 +391,31 @@
 }
 
 /* Perform any register operations required to disable the generator. */
-static void __rcg_clk_disable_reg(struct rcg_clk *clk)
+static void __rcg_clk_disable_reg(struct rcg_clk *rcg)
 {
-	void __iomem *const reg = clk->b.ctl_reg;
+	void __iomem *const reg = rcg->b.ctl_reg;
 	uint32_t reg_val;
 
-	reg_val = __branch_clk_disable_reg(&clk->b, clk->c.dbg_name);
+	reg_val = __branch_clk_disable_reg(&rcg->b, rcg->c.dbg_name);
 	/* Disable root. */
-	if (clk->root_en_mask) {
-		reg_val &= ~(clk->root_en_mask);
+	if (rcg->root_en_mask) {
+		reg_val &= ~(rcg->root_en_mask);
 		writel_relaxed(reg_val, reg);
 	}
 	/* Disable MN counter, if applicable. */
-	if (clk->current_freq->md_val) {
-		reg_val &= ~(clk->mnd_en_mask);
+	if (rcg->current_freq->md_val) {
+		reg_val &= ~(rcg->mnd_en_mask);
 		writel_relaxed(reg_val, reg);
 	}
 	/*
 	 * Program NS register to low-power value with an un-clocked or
 	 * slowly-clocked source selected.
 	 */
-	if (clk->ns_mask) {
-		reg_val = readl_relaxed(clk->ns_reg);
-		reg_val &= ~(clk->ns_mask);
-		reg_val |= (clk->freq_tbl->ns_val & clk->ns_mask);
-		writel_relaxed(reg_val, clk->ns_reg);
+	if (rcg->ns_mask) {
+		reg_val = readl_relaxed(rcg->ns_reg);
+		reg_val &= ~(rcg->ns_mask);
+		reg_val |= (rcg->freq_tbl->ns_val & rcg->ns_mask);
+		writel_relaxed(reg_val, rcg->ns_reg);
 	}
 }
 
@@ -423,11 +423,11 @@
 static int rcg_clk_enable(struct clk *c)
 {
 	unsigned long flags;
-	struct rcg_clk *clk = to_rcg_clk(c);
+	struct rcg_clk *rcg = to_rcg_clk(c);
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
-	__rcg_clk_enable_reg(clk);
-	clk->enabled = true;
+	__rcg_clk_enable_reg(rcg);
+	rcg->enabled = true;
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 
 	return 0;
@@ -437,11 +437,11 @@
 static void rcg_clk_disable(struct clk *c)
 {
 	unsigned long flags;
-	struct rcg_clk *clk = to_rcg_clk(c);
+	struct rcg_clk *rcg = to_rcg_clk(c);
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
-	__rcg_clk_disable_reg(clk);
-	clk->enabled = false;
+	__rcg_clk_disable_reg(rcg);
+	rcg->enabled = false;
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 }
 
@@ -452,21 +452,21 @@
 /* Set a clock to an exact rate. */
 static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
 {
-	struct rcg_clk *clk = to_rcg_clk(c);
+	struct rcg_clk *rcg = to_rcg_clk(c);
 	struct clk_freq_tbl *nf, *cf;
 	struct clk *chld;
 	int rc = 0;
 
-	for (nf = clk->freq_tbl; nf->freq_hz != FREQ_END
+	for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
 			&& nf->freq_hz != rate; nf++)
 		;
 
 	if (nf->freq_hz == FREQ_END)
 		return -EINVAL;
 
-	cf = clk->current_freq;
+	cf = rcg->current_freq;
 
-	if (clk->enabled) {
+	if (rcg->enabled) {
 		/* Enable source clock dependency for the new freq. */
 		rc = clk_enable(nf->src_clk);
 		if (rc)
@@ -476,9 +476,9 @@
 	spin_lock(&local_clock_reg_lock);
 
 	/* Disable branch if clock isn't dual-banked with a glitch-free MUX. */
-	if (!clk->bank_info) {
+	if (!rcg->bank_info) {
 		/* Disable all branches to prevent glitches. */
-		list_for_each_entry(chld, &clk->c.children, siblings) {
+		list_for_each_entry(chld, &rcg->c.children, siblings) {
 			struct branch_clk *x = to_branch_clk(chld);
 			/*
 			 * We don't need to grab the child's lock because
@@ -488,26 +488,26 @@
 			if (x->enabled)
 				__branch_clk_disable_reg(&x->b, x->c.dbg_name);
 		}
-		if (clk->enabled)
-			__rcg_clk_disable_reg(clk);
+		if (rcg->enabled)
+			__rcg_clk_disable_reg(rcg);
 	}
 
 	/* Perform clock-specific frequency switch operations. */
-	BUG_ON(!clk->set_rate);
-	clk->set_rate(clk, nf);
+	BUG_ON(!rcg->set_rate);
+	rcg->set_rate(rcg, nf);
 
 	/*
 	 * Current freq must be updated before __rcg_clk_enable_reg()
 	 * is called to make sure the MNCNTR_EN bit is set correctly.
 	 */
-	clk->current_freq = nf;
+	rcg->current_freq = nf;
 
 	/* Enable any clocks that were disabled. */
-	if (!clk->bank_info) {
-		if (clk->enabled)
-			__rcg_clk_enable_reg(clk);
+	if (!rcg->bank_info) {
+		if (rcg->enabled)
+			__rcg_clk_enable_reg(rcg);
 		/* Enable only branches that were ON before. */
-		list_for_each_entry(chld, &clk->c.children, siblings) {
+		list_for_each_entry(chld, &rcg->c.children, siblings) {
 			struct branch_clk *x = to_branch_clk(chld);
 			if (x->enabled)
 				__branch_clk_enable_reg(&x->b, x->c.dbg_name);
@@ -517,25 +517,25 @@
 	spin_unlock(&local_clock_reg_lock);
 
 	/* Release source requirements of the old freq. */
-	if (clk->enabled)
+	if (rcg->enabled)
 		clk_disable(cf->src_clk);
 
 	return rc;
 }
 
 /* Check if a clock is currently enabled. */
-static int rcg_clk_is_enabled(struct clk *clk)
+static int rcg_clk_is_enabled(struct clk *c)
 {
-	return to_rcg_clk(clk)->enabled;
+	return to_rcg_clk(c)->enabled;
 }
 
 /* Return a supported rate that's at least the specified rate. */
 static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
 {
-	struct rcg_clk *clk = to_rcg_clk(c);
+	struct rcg_clk *rcg = to_rcg_clk(c);
 	struct clk_freq_tbl *f;
 
-	for (f = clk->freq_tbl; f->freq_hz != FREQ_END; f++)
+	for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
 		if (f->freq_hz >= rate)
 			return f->freq_hz;
 
@@ -545,26 +545,26 @@
 /* Return the nth supported frequency for a given clock. */
 static int rcg_clk_list_rate(struct clk *c, unsigned n)
 {
-	struct rcg_clk *clk = to_rcg_clk(c);
+	struct rcg_clk *rcg = to_rcg_clk(c);
 
-	if (!clk->freq_tbl || clk->freq_tbl->freq_hz == FREQ_END)
+	if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
 		return -ENXIO;
 
-	return (clk->freq_tbl + n)->freq_hz;
+	return (rcg->freq_tbl + n)->freq_hz;
 }
 
-static struct clk *rcg_clk_get_parent(struct clk *clk)
+static struct clk *rcg_clk_get_parent(struct clk *c)
 {
-	return to_rcg_clk(clk)->current_freq->src_clk;
+	return to_rcg_clk(c)->current_freq->src_clk;
 }
 
 /* Disable hw clock gating if not set at boot */
-enum handoff branch_handoff(struct branch *clk, struct clk *c)
+enum handoff branch_handoff(struct branch *b, struct clk *c)
 {
-	if (!branch_in_hwcg_mode(clk)) {
-		clk->hwcg_mask = 0;
+	if (!branch_in_hwcg_mode(b)) {
+		b->hwcg_mask = 0;
 		c->flags &= ~CLKFLAG_HWCG;
-		if (readl_relaxed(clk->ctl_reg) & clk->en_mask)
+		if (readl_relaxed(b->ctl_reg) & b->en_mask)
 			return HANDOFF_ENABLED_CLK;
 	} else {
 		c->flags |= CLKFLAG_HWCG;
@@ -574,24 +574,24 @@
 
 static enum handoff branch_clk_handoff(struct clk *c)
 {
-	struct branch_clk *clk = to_branch_clk(c);
-	return branch_handoff(&clk->b, &clk->c);
+	struct branch_clk *br = to_branch_clk(c);
+	return branch_handoff(&br->b, &br->c);
 }
 
 static enum handoff rcg_clk_handoff(struct clk *c)
 {
-	struct rcg_clk *clk = to_rcg_clk(c);
+	struct rcg_clk *rcg = to_rcg_clk(c);
 	uint32_t ctl_val, ns_val, md_val, ns_mask;
 	struct clk_freq_tbl *freq;
 	enum handoff ret;
 
-	ctl_val = readl_relaxed(clk->b.ctl_reg);
-	ret = branch_handoff(&clk->b, &clk->c);
+	ctl_val = readl_relaxed(rcg->b.ctl_reg);
+	ret = branch_handoff(&rcg->b, &rcg->c);
 	if (ret == HANDOFF_DISABLED_CLK)
 		return HANDOFF_DISABLED_CLK;
 
-	if (clk->bank_info) {
-		const struct bank_masks *bank_masks = clk->bank_info;
+	if (rcg->bank_info) {
+		const struct bank_masks *bank_masks = rcg->bank_info;
 		const struct bank_mask_info *bank_info;
 		if (!(ctl_val & bank_masks->bank_sel_mask))
 			bank_info = &bank_masks->bank0_mask;
@@ -602,13 +602,13 @@
 		md_val = bank_info->md_reg ?
 				readl_relaxed(bank_info->md_reg) : 0;
 	} else {
-		ns_mask = clk->ns_mask;
-		md_val = clk->md_reg ? readl_relaxed(clk->md_reg) : 0;
+		ns_mask = rcg->ns_mask;
+		md_val = rcg->md_reg ? readl_relaxed(rcg->md_reg) : 0;
 	}
 	if (!ns_mask)
 		return HANDOFF_UNKNOWN_RATE;
-	ns_val = readl_relaxed(clk->ns_reg) & ns_mask;
-	for (freq = clk->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
+	ns_val = readl_relaxed(rcg->ns_reg) & ns_mask;
+	for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
 		if ((freq->ns_val & ns_mask) == ns_val &&
 		    (!freq->md_val || freq->md_val == md_val))
 			break;
@@ -616,7 +616,7 @@
 	if (freq->freq_hz == FREQ_END)
 		return HANDOFF_UNKNOWN_RATE;
 
-	clk->current_freq = freq;
+	rcg->current_freq = freq;
 	c->rate = freq->freq_hz;
 
 	return HANDOFF_ENABLED_CLK;
@@ -632,40 +632,38 @@
 	},
 };
 
-static int branch_clk_enable(struct clk *clk)
+static int branch_clk_enable(struct clk *c)
 {
 	unsigned long flags;
-	struct branch_clk *branch = to_branch_clk(clk);
+	struct branch_clk *br = to_branch_clk(c);
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
-	__branch_clk_enable_reg(&branch->b, branch->c.dbg_name);
-	branch->enabled = true;
+	__branch_clk_enable_reg(&br->b, br->c.dbg_name);
+	br->enabled = true;
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 
 	return 0;
 }
 
-static void branch_clk_disable(struct clk *clk)
+static void branch_clk_disable(struct clk *c)
 {
 	unsigned long flags;
-	struct branch_clk *branch = to_branch_clk(clk);
+	struct branch_clk *br = to_branch_clk(c);
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
-	__branch_clk_disable_reg(&branch->b, branch->c.dbg_name);
-	branch->enabled = false;
+	__branch_clk_disable_reg(&br->b, br->c.dbg_name);
+	br->enabled = false;
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 }
 
-static struct clk *branch_clk_get_parent(struct clk *clk)
+static struct clk *branch_clk_get_parent(struct clk *c)
 {
-	struct branch_clk *branch = to_branch_clk(clk);
-	return branch->parent;
+	return to_branch_clk(c)->parent;
 }
 
-static int branch_clk_is_enabled(struct clk *clk)
+static int branch_clk_is_enabled(struct clk *c)
 {
-	struct branch_clk *branch = to_branch_clk(clk);
-	return branch->enabled;
+	return to_branch_clk(c)->enabled;
 }
 
 static void branch_enable_hwcg(struct branch *b)
@@ -692,16 +690,14 @@
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 }
 
-static void branch_clk_enable_hwcg(struct clk *clk)
+static void branch_clk_enable_hwcg(struct clk *c)
 {
-	struct branch_clk *branch = to_branch_clk(clk);
-	branch_enable_hwcg(&branch->b);
+	branch_enable_hwcg(&to_branch_clk(c)->b);
 }
 
-static void branch_clk_disable_hwcg(struct clk *clk)
+static void branch_clk_disable_hwcg(struct clk *c)
 {
-	struct branch_clk *branch = to_branch_clk(clk);
-	branch_disable_hwcg(&branch->b);
+	branch_disable_hwcg(&to_branch_clk(c)->b);
 }
 
 static int branch_set_flags(struct branch *b, unsigned flags)
@@ -738,26 +734,22 @@
 
 static int branch_clk_in_hwcg_mode(struct clk *c)
 {
-	struct branch_clk *clk = to_branch_clk(c);
-	return branch_in_hwcg_mode(&clk->b);
+	return branch_in_hwcg_mode(&to_branch_clk(c)->b);
 }
 
-static void rcg_clk_enable_hwcg(struct clk *clk)
+static void rcg_clk_enable_hwcg(struct clk *c)
 {
-	struct rcg_clk *rcg = to_rcg_clk(clk);
-	branch_enable_hwcg(&rcg->b);
+	branch_enable_hwcg(&to_rcg_clk(c)->b);
 }
 
-static void rcg_clk_disable_hwcg(struct clk *clk)
+static void rcg_clk_disable_hwcg(struct clk *c)
 {
-	struct rcg_clk *rcg = to_rcg_clk(clk);
-	branch_disable_hwcg(&rcg->b);
+	branch_disable_hwcg(&to_rcg_clk(c)->b);
 }
 
 static int rcg_clk_in_hwcg_mode(struct clk *c)
 {
-	struct rcg_clk *clk = to_rcg_clk(c);
-	return branch_in_hwcg_mode(&clk->b);
+	return branch_in_hwcg_mode(&to_rcg_clk(c)->b);
 }
 
 static int rcg_clk_set_flags(struct clk *clk, unsigned flags)
@@ -802,9 +794,9 @@
 	return ret;
 }
 
-static int branch_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
 {
-	return branch_reset(&to_branch_clk(clk)->b, action);
+	return branch_reset(&to_branch_clk(c)->b, action);
 }
 
 struct clk_ops clk_ops_branch = {
@@ -825,9 +817,9 @@
 	.reset = branch_clk_reset,
 };
 
-static int rcg_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int rcg_clk_reset(struct clk *c, enum clk_reset_action action)
 {
-	return branch_reset(&to_rcg_clk(clk)->b, action);
+	return branch_reset(&to_rcg_clk(c)->b, action);
 }
 
 struct clk_ops clk_ops_rcg = {
@@ -850,10 +842,10 @@
 static int cdiv_clk_enable(struct clk *c)
 {
 	unsigned long flags;
-	struct cdiv_clk *clk = to_cdiv_clk(c);
+	struct cdiv_clk *cdiv = to_cdiv_clk(c);
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
-	__branch_clk_enable_reg(&clk->b, clk->c.dbg_name);
+	__branch_clk_enable_reg(&cdiv->b, cdiv->c.dbg_name);
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 
 	return 0;
@@ -862,70 +854,67 @@
 static void cdiv_clk_disable(struct clk *c)
 {
 	unsigned long flags;
-	struct cdiv_clk *clk = to_cdiv_clk(c);
+	struct cdiv_clk *cdiv = to_cdiv_clk(c);
 
 	spin_lock_irqsave(&local_clock_reg_lock, flags);
-	__branch_clk_disable_reg(&clk->b, clk->c.dbg_name);
+	__branch_clk_disable_reg(&cdiv->b, cdiv->c.dbg_name);
 	spin_unlock_irqrestore(&local_clock_reg_lock, flags);
 }
 
 static int cdiv_clk_set_rate(struct clk *c, unsigned long rate)
 {
-	struct cdiv_clk *clk = to_cdiv_clk(c);
+	struct cdiv_clk *cdiv = to_cdiv_clk(c);
 	u32 reg_val;
 
-	if (rate > clk->max_div)
+	if (rate > cdiv->max_div)
 		return -EINVAL;
 
 	spin_lock(&local_clock_reg_lock);
-	reg_val = readl_relaxed(clk->ns_reg);
-	reg_val &= ~(clk->ext_mask | (clk->max_div - 1) << clk->div_offset);
+	reg_val = readl_relaxed(cdiv->ns_reg);
+	reg_val &= ~(cdiv->ext_mask | (cdiv->max_div - 1) << cdiv->div_offset);
 	/* Non-zero rates mean set a divider, zero means use external input */
 	if (rate)
-		reg_val |= (rate - 1) << clk->div_offset;
+		reg_val |= (rate - 1) << cdiv->div_offset;
 	else
-		reg_val |= clk->ext_mask;
-	writel_relaxed(reg_val, clk->ns_reg);
+		reg_val |= cdiv->ext_mask;
+	writel_relaxed(reg_val, cdiv->ns_reg);
 	spin_unlock(&local_clock_reg_lock);
 
-	clk->cur_div = rate;
+	cdiv->cur_div = rate;
 	return 0;
 }
 
 static unsigned long cdiv_clk_get_rate(struct clk *c)
 {
-	struct cdiv_clk *clk = to_cdiv_clk(c);
-	return clk->cur_div;
+	return to_cdiv_clk(c)->cur_div;
 }
 
 static long cdiv_clk_round_rate(struct clk *c, unsigned long rate)
 {
-	struct cdiv_clk *clk = to_cdiv_clk(c);
-	return rate > clk->max_div ? -EPERM : rate;
+	return rate > to_cdiv_clk(c)->max_div ? -EPERM : rate;
 }
 
 static int cdiv_clk_list_rate(struct clk *c, unsigned n)
 {
-	struct cdiv_clk *clk = to_cdiv_clk(c);
-	return n > clk->max_div ? -ENXIO : n;
+	return n > to_cdiv_clk(c)->max_div ? -ENXIO : n;
 }
 
 static enum handoff cdiv_clk_handoff(struct clk *c)
 {
-	struct cdiv_clk *clk = to_cdiv_clk(c);
+	struct cdiv_clk *cdiv = to_cdiv_clk(c);
 	enum handoff ret;
 	u32 reg_val;
 
-	ret = branch_handoff(&clk->b, &clk->c);
+	ret = branch_handoff(&cdiv->b, &cdiv->c);
 	if (ret == HANDOFF_DISABLED_CLK)
 		return ret;
 
-	reg_val = readl_relaxed(clk->ns_reg);
-	if (reg_val & clk->ext_mask) {
-		clk->cur_div = 0;
+	reg_val = readl_relaxed(cdiv->ns_reg);
+	if (reg_val & cdiv->ext_mask) {
+		cdiv->cur_div = 0;
 	} else {
-		reg_val >>= clk->div_offset;
-		clk->cur_div = (reg_val & (clk->max_div - 1)) + 1;
+		reg_val >>= cdiv->div_offset;
+		cdiv->cur_div = (reg_val & (cdiv->max_div - 1)) + 1;
 	}
 
 	return HANDOFF_ENABLED_CLK;
@@ -933,20 +922,17 @@
 
 static void cdiv_clk_enable_hwcg(struct clk *c)
 {
-	struct cdiv_clk *clk = to_cdiv_clk(c);
-	branch_enable_hwcg(&clk->b);
+	branch_enable_hwcg(&to_cdiv_clk(c)->b);
 }
 
 static void cdiv_clk_disable_hwcg(struct clk *c)
 {
-	struct cdiv_clk *clk = to_cdiv_clk(c);
-	branch_disable_hwcg(&clk->b);
+	branch_disable_hwcg(&to_cdiv_clk(c)->b);
 }
 
 static int cdiv_clk_in_hwcg_mode(struct clk *c)
 {
-	struct cdiv_clk *clk = to_cdiv_clk(c);
-	return branch_in_hwcg_mode(&clk->b);
+	return branch_in_hwcg_mode(&to_cdiv_clk(c)->b);
 }
 
 struct clk_ops clk_ops_cdiv = {
diff --git a/arch/arm/mach-msm/clock-local.h b/arch/arm/mach-msm/clock-local.h
index ffc7057..c0a7827 100644
--- a/arch/arm/mach-msm/clock-local.h
+++ b/arch/arm/mach-msm/clock-local.h
@@ -156,9 +156,9 @@
 extern struct clk_ops clk_ops_reset;
 
 int branch_reset(struct branch *b, enum clk_reset_action action);
-void __branch_clk_enable_reg(const struct branch *clk, const char *name);
-u32 __branch_clk_disable_reg(const struct branch *clk, const char *name);
-enum handoff branch_handoff(struct branch *clk, struct clk *c);
+void __branch_clk_enable_reg(const struct branch *b, const char *name);
+u32 __branch_clk_disable_reg(const struct branch *b, const char *name);
+enum handoff branch_handoff(struct branch *b, struct clk *c);
 
 /*
  * Generic clock-definition struct and macros
@@ -183,9 +183,9 @@
 	struct clk	c;
 };
 
-static inline struct rcg_clk *to_rcg_clk(struct clk *clk)
+static inline struct rcg_clk *to_rcg_clk(struct clk *c)
 {
-	return container_of(clk, struct rcg_clk, c);
+	return container_of(c, struct rcg_clk, c);
 }
 
 extern struct clk_ops clk_ops_rcg;
@@ -214,9 +214,9 @@
 	struct clk c;
 };
 
-static inline struct cdiv_clk *to_cdiv_clk(struct clk *clk)
+static inline struct cdiv_clk *to_cdiv_clk(struct clk *c)
 {
-	return container_of(clk, struct cdiv_clk, c);
+	return container_of(c, struct cdiv_clk, c);
 }
 
 extern struct clk_ops clk_ops_cdiv;
@@ -234,7 +234,7 @@
  * @enabled: true if clock is on, false otherwise
  * @b: branch
  * @parent: clock source
- * @c: clk
+ * @c: clock
  *
  * An on/off switch with a rate derived from the parent.
  */
@@ -245,9 +245,9 @@
 	struct clk c;
 };
 
-static inline struct branch_clk *to_branch_clk(struct clk *clk)
+static inline struct branch_clk *to_branch_clk(struct clk *c)
 {
-	return container_of(clk, struct branch_clk, c);
+	return container_of(c, struct branch_clk, c);
 }
 
 /**
@@ -255,7 +255,7 @@
  * @sample_ticks: sample period in reference clock ticks
  * @multiplier: measurement scale-up factor
  * @divider: measurement scale-down factor
- * @c: clk
+ * @c: clock
 */
 struct measure_clk {
 	u64 sample_ticks;
@@ -266,9 +266,9 @@
 
 extern struct clk_ops clk_ops_empty;
 
-static inline struct measure_clk *to_measure_clk(struct clk *clk)
+static inline struct measure_clk *to_measure_clk(struct clk *c)
 {
-	return container_of(clk, struct measure_clk, c);
+	return container_of(c, struct measure_clk, c);
 }
 
 /*
@@ -280,11 +280,11 @@
 /*
  * Generic set-rate implementations
  */
-void set_rate_mnd(struct rcg_clk *clk, struct clk_freq_tbl *nf);
-void set_rate_nop(struct rcg_clk *clk, struct clk_freq_tbl *nf);
-void set_rate_mnd_8(struct rcg_clk *clk, struct clk_freq_tbl *nf);
-void set_rate_mnd_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf);
-void set_rate_div_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf);
+void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
+void set_rate_nop(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
+void set_rate_mnd_8(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
+void set_rate_mnd_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
+void set_rate_div_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
 
 #endif /* __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_H */
 
diff --git a/arch/arm/mach-msm/clock-pll.c b/arch/arm/mach-msm/clock-pll.c
index d839911..49bb063 100644
--- a/arch/arm/mach-msm/clock-pll.c
+++ b/arch/arm/mach-msm/clock-pll.c
@@ -55,16 +55,16 @@
 
 #define ENABLE_WAIT_MAX_LOOPS 200
 
-int pll_vote_clk_enable(struct clk *clk)
+int pll_vote_clk_enable(struct clk *c)
 {
 	u32 ena, count;
 	unsigned long flags;
-	struct pll_vote_clk *pll = to_pll_vote_clk(clk);
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
 
 	spin_lock_irqsave(&pll_reg_lock, flags);
-	ena = readl_relaxed(PLL_EN_REG(pll));
-	ena |= pll->en_mask;
-	writel_relaxed(ena, PLL_EN_REG(pll));
+	ena = readl_relaxed(PLL_EN_REG(pllv));
+	ena |= pllv->en_mask;
+	writel_relaxed(ena, PLL_EN_REG(pllv));
 	spin_unlock_irqrestore(&pll_reg_lock, flags);
 
 	/*
@@ -75,45 +75,44 @@
 
 	/* Wait for pll to enable. */
 	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
-		if (readl_relaxed(PLL_STATUS_REG(pll)) & pll->status_mask)
+		if (readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask)
 			return 0;
 		udelay(1);
 	}
 
-	WARN("PLL %s didn't enable after voting for it!\n", clk->dbg_name);
+	WARN("PLL %s didn't enable after voting for it!\n", c->dbg_name);
 
 	return -ETIMEDOUT;
 }
 
-void pll_vote_clk_disable(struct clk *clk)
+void pll_vote_clk_disable(struct clk *c)
 {
 	u32 ena;
 	unsigned long flags;
-	struct pll_vote_clk *pll = to_pll_vote_clk(clk);
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
 
 	spin_lock_irqsave(&pll_reg_lock, flags);
-	ena = readl_relaxed(PLL_EN_REG(pll));
-	ena &= ~(pll->en_mask);
-	writel_relaxed(ena, PLL_EN_REG(pll));
+	ena = readl_relaxed(PLL_EN_REG(pllv));
+	ena &= ~(pllv->en_mask);
+	writel_relaxed(ena, PLL_EN_REG(pllv));
 	spin_unlock_irqrestore(&pll_reg_lock, flags);
 }
 
-struct clk *pll_vote_clk_get_parent(struct clk *clk)
+struct clk *pll_vote_clk_get_parent(struct clk *c)
 {
-	struct pll_vote_clk *pll = to_pll_vote_clk(clk);
-	return pll->parent;
+	return to_pll_vote_clk(c)->parent;
 }
 
-int pll_vote_clk_is_enabled(struct clk *clk)
+int pll_vote_clk_is_enabled(struct clk *c)
 {
-	struct pll_vote_clk *pll = to_pll_vote_clk(clk);
-	return !!(readl_relaxed(PLL_STATUS_REG(pll)) & pll->status_mask);
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+	return !!(readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask);
 }
 
-static enum handoff pll_vote_clk_handoff(struct clk *clk)
+static enum handoff pll_vote_clk_handoff(struct clk *c)
 {
-	struct pll_vote_clk *pll = to_pll_vote_clk(clk);
-	if (readl_relaxed(PLL_EN_REG(pll)) & pll->en_mask)
+	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+	if (readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask)
 		return HANDOFF_ENABLED_CLK;
 
 	return HANDOFF_DISABLED_CLK;
@@ -158,10 +157,10 @@
 	mb();
 }
 
-static int local_pll_clk_enable(struct clk *clk)
+static int local_pll_clk_enable(struct clk *c)
 {
 	unsigned long flags;
-	struct pll_clk *pll = to_pll_clk(clk);
+	struct pll_clk *pll = to_pll_clk(c);
 
 	spin_lock_irqsave(&pll_reg_lock, flags);
 	__pll_clk_enable_reg(PLL_MODE_REG(pll));
@@ -177,10 +176,10 @@
 	writel_relaxed(mode, mode_reg);
 }
 
-static void local_pll_clk_disable(struct clk *clk)
+static void local_pll_clk_disable(struct clk *c)
 {
 	unsigned long flags;
-	struct pll_clk *pll = to_pll_clk(clk);
+	struct pll_clk *pll = to_pll_clk(c);
 
 	/*
 	 * Disable the PLL output, disable test mode, enable
@@ -191,9 +190,9 @@
 	spin_unlock_irqrestore(&pll_reg_lock, flags);
 }
 
-static enum handoff local_pll_clk_handoff(struct clk *clk)
+static enum handoff local_pll_clk_handoff(struct clk *c)
 {
-	struct pll_clk *pll = to_pll_clk(clk);
+	struct pll_clk *pll = to_pll_clk(c);
 	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
 	u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
 
@@ -203,17 +202,16 @@
 	return HANDOFF_DISABLED_CLK;
 }
 
-static struct clk *local_pll_clk_get_parent(struct clk *clk)
+static struct clk *local_pll_clk_get_parent(struct clk *c)
 {
-	struct pll_clk *pll = to_pll_clk(clk);
-	return pll->parent;
+	return to_pll_clk(c)->parent;
 }
 
-int sr_pll_clk_enable(struct clk *clk)
+int sr_pll_clk_enable(struct clk *c)
 {
 	u32 mode;
 	unsigned long flags;
-	struct pll_clk *pll = to_pll_clk(clk);
+	struct pll_clk *pll = to_pll_clk(c);
 
 	spin_lock_irqsave(&pll_reg_lock, flags);
 	mode = readl_relaxed(PLL_MODE_REG(pll));
@@ -250,10 +248,10 @@
 
 #define PLL_LOCKED_BIT BIT(16)
 
-int copper_pll_clk_enable(struct clk *clk)
+int copper_pll_clk_enable(struct clk *c)
 {
 	unsigned long flags;
-	struct pll_clk *pll = to_pll_clk(clk);
+	struct pll_clk *pll = to_pll_clk(c);
 	u32 count, mode;
 	int ret = 0;
 
@@ -282,7 +280,7 @@
 	}
 
 	if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)) {
-		WARN("PLL %s didn't lock after enabling it!\n", clk->dbg_name);
+		WARN("PLL %s didn't lock after enabling it!\n", c->dbg_name);
 		ret = -ETIMEDOUT;
 		goto out;
 	}
@@ -379,9 +377,9 @@
 
 }
 
-static int pll_clk_enable(struct clk *clk)
+static int pll_clk_enable(struct clk *c)
 {
-	struct pll_shared_clk *pll = to_pll_shared_clk(clk);
+	struct pll_shared_clk *pll = to_pll_shared_clk(c);
 	unsigned int pll_id = pll->id;
 
 	remote_spin_lock(&pll_lock);
@@ -396,9 +394,9 @@
 	return 0;
 }
 
-static void pll_clk_disable(struct clk *clk)
+static void pll_clk_disable(struct clk *c)
 {
-	struct pll_shared_clk *pll = to_pll_shared_clk(clk);
+	struct pll_shared_clk *pll = to_pll_shared_clk(c);
 	unsigned int pll_id = pll->id;
 
 	remote_spin_lock(&pll_lock);
@@ -413,16 +411,14 @@
 	remote_spin_unlock(&pll_lock);
 }
 
-static int pll_clk_is_enabled(struct clk *clk)
+static int pll_clk_is_enabled(struct clk *c)
 {
-	struct pll_shared_clk *pll = to_pll_shared_clk(clk);
-
-	return readl_relaxed(PLL_MODE_REG(pll)) & BIT(0);
+	return readl_relaxed(PLL_MODE_REG(to_pll_shared_clk(c))) & BIT(0);
 }
 
-static enum handoff pll_clk_handoff(struct clk *clk)
+static enum handoff pll_clk_handoff(struct clk *c)
 {
-	struct pll_shared_clk *pll = to_pll_shared_clk(clk);
+	struct pll_shared_clk *pll = to_pll_shared_clk(c);
 	unsigned int pll_lval;
 	struct pll_rate *l;
 
@@ -438,12 +434,12 @@
 	/* Convert PLL L values to PLL Output rate */
 	for (l = pll_l_rate; l->rate != 0; l++) {
 		if (l->lvalue == pll_lval) {
-			clk->rate = l->rate;
+			c->rate = l->rate;
 			break;
 		}
 	}
 
-	if (!clk->rate) {
+	if (!c->rate) {
 		pr_crit("Unknown PLL's L value!\n");
 		BUG();
 	}
diff --git a/arch/arm/mach-msm/clock-pll.h b/arch/arm/mach-msm/clock-pll.h
index a8c642f..f24b066 100644
--- a/arch/arm/mach-msm/clock-pll.h
+++ b/arch/arm/mach-msm/clock-pll.h
@@ -34,7 +34,7 @@
  * @id: PLL ID
  * @mode_reg: enable register
  * @parent: clock source
- * @c: clk
+ * @c: clock
  */
 struct pll_shared_clk {
 	unsigned int id;
@@ -45,9 +45,9 @@
 
 extern struct clk_ops clk_ops_pll;
 
-static inline struct pll_shared_clk *to_pll_shared_clk(struct clk *clk)
+static inline struct pll_shared_clk *to_pll_shared_clk(struct clk *c)
 {
-	return container_of(clk, struct pll_shared_clk, c);
+	return container_of(c, struct pll_shared_clk, c);
 }
 
 /**
@@ -64,7 +64,7 @@
  * @status_mask: ANDed with @status_reg to determine if PLL is active.
  * @status_reg: status register
  * @parent: clock source
- * @c: clk
+ * @c: clock
  */
 struct pll_vote_clk {
 	u32 *soft_vote;
@@ -81,9 +81,9 @@
 
 extern struct clk_ops clk_ops_pll_vote;
 
-static inline struct pll_vote_clk *to_pll_vote_clk(struct clk *clk)
+static inline struct pll_vote_clk *to_pll_vote_clk(struct clk *c)
 {
-	return container_of(clk, struct pll_vote_clk, c);
+	return container_of(c, struct pll_vote_clk, c);
 }
 
 /**
@@ -105,21 +105,21 @@
 
 extern struct clk_ops clk_ops_local_pll;
 
-static inline struct pll_clk *to_pll_clk(struct clk *clk)
+static inline struct pll_clk *to_pll_clk(struct clk *c)
 {
-	return container_of(clk, struct pll_clk, c);
+	return container_of(c, struct pll_clk, c);
 }
 
-int sr_pll_clk_enable(struct clk *clk);
-int copper_pll_clk_enable(struct clk *clk);
+int sr_pll_clk_enable(struct clk *c);
+int copper_pll_clk_enable(struct clk *c);
 
 /*
  * PLL vote clock APIs
  */
-int pll_vote_clk_enable(struct clk *clk);
-void pll_vote_clk_disable(struct clk *clk);
-struct clk *pll_vote_clk_get_parent(struct clk *clk);
-int pll_vote_clk_is_enabled(struct clk *clk);
+int pll_vote_clk_enable(struct clk *c);
+void pll_vote_clk_disable(struct clk *c);
+struct clk *pll_vote_clk_get_parent(struct clk *c);
+int pll_vote_clk_is_enabled(struct clk *c);
 
 struct pll_config {
 	u32 l;
diff --git a/arch/arm/mach-msm/clock-rpm.c b/arch/arm/mach-msm/clock-rpm.c
index ab57cf8..e35e8d4 100644
--- a/arch/arm/mach-msm/clock-rpm.c
+++ b/arch/arm/mach-msm/clock-rpm.c
@@ -54,13 +54,15 @@
 	return (rc < 0) ? rc : iv.value * 1000;
 }
 
-#define RPM_SMD_KEY_CLOCK_SET_RATE	0x007A484B
+#define RPM_SMD_KEY_RATE	0x007A484B
+#define RPM_SMD_KEY_ENABLE	0x62616E45
 
 static int clk_rpmrs_set_rate_smd(struct rpm_clk *r, uint32_t value,
 				uint32_t context, int noirq)
 {
+	u32 rpm_key = r->branch ? RPM_SMD_KEY_ENABLE : RPM_SMD_KEY_RATE;
 	struct msm_rpm_kvp kvp = {
-		.key = RPM_SMD_KEY_CLOCK_SET_RATE,
+		.key = rpm_key,
 		.data = (void *)&value,
 		.length = sizeof(value),
 	};
@@ -272,9 +274,11 @@
 	if (rc < 0)
 		return HANDOFF_DISABLED_CLK;
 
-	r->last_set_khz = iv.value;
-	r->last_set_sleep_khz = iv.value;
-	clk->rate = iv.value * 1000;
+	if (!r->branch) {
+		r->last_set_khz = iv.value;
+		r->last_set_sleep_khz = iv.value;
+		clk->rate = iv.value * 1000;
+	}
 
 	return HANDOFF_ENABLED_CLK;
 }
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index 66ce30e..cc3c6db 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -1917,6 +1917,7 @@
 		MSM_RPM_MAP(8064, HDMI_SWITCH, HDMI_SWITCH, 1),
 		MSM_RPM_MAP(8064, DDR_DMM_0, DDR_DMM, 2),
 		MSM_RPM_MAP(8064, QDSS_CLK, QDSS_CLK, 1),
+		MSM_RPM_MAP(8064, VDDMIN_GPIO, VDDMIN_GPIO, 1),
 	},
 	.target_status = {
 		MSM_RPM_STATUS_ID_MAP(8064, VERSION_MAJOR),
@@ -2050,6 +2051,7 @@
 		MSM_RPM_STATUS_ID_MAP(8064, PM8821_S2_1),
 		MSM_RPM_STATUS_ID_MAP(8064, PM8821_L1_0),
 		MSM_RPM_STATUS_ID_MAP(8064, PM8821_L1_1),
+		MSM_RPM_STATUS_ID_MAP(8064, VDDMIN_GPIO),
 	},
 	.target_ctrl_id = {
 		MSM_RPM_CTRL_MAP(8064, VERSION_MAJOR),
@@ -2271,6 +2273,7 @@
 #define AP2MDM_STATUS			48
 #define AP2MDM_SOFT_RESET		27
 #define AP2MDM_WAKEUP			35
+#define MDM2AP_PBLRDY			46
 
 static struct resource mdm_resources[] = {
 	{
@@ -2309,6 +2312,12 @@
 		.name	= "AP2MDM_WAKEUP",
 		.flags	= IORESOURCE_IO,
 	},
+	{
+		.start	= MDM2AP_PBLRDY,
+		.end	= MDM2AP_PBLRDY,
+		.name	= "MDM2AP_PBLRDY",
+		.flags	= IORESOURCE_IO,
+	},
 };
 
 struct platform_device mdm_8064_device = {
diff --git a/arch/arm/mach-msm/devices-msm7x27a.c b/arch/arm/mach-msm/devices-msm7x27a.c
index b3454cd..553f52f 100644
--- a/arch/arm/mach-msm/devices-msm7x27a.c
+++ b/arch/arm/mach-msm/devices-msm7x27a.c
@@ -1715,7 +1715,7 @@
 
 void __init msm8625_init_irq(void)
 {
-	msm_gic_irq_extn_init(MSM_QGIC_DIST_BASE, MSM_QGIC_CPU_BASE);
+	msm_gic_irq_extn_init();
 	gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE,
 			(void *)MSM_QGIC_CPU_BASE);
 }
diff --git a/arch/arm/mach-msm/hsic_sysmon.c b/arch/arm/mach-msm/hsic_sysmon.c
index 07a9dbb..153e1b4 100644
--- a/arch/arm/mach-msm/hsic_sysmon.c
+++ b/arch/arm/mach-msm/hsic_sysmon.c
@@ -314,6 +314,8 @@
 static inline void hsic_sysmon_debugfs_cleanup(void) { }
 #endif
 
+static void hsic_sysmon_pdev_release(struct device *dev) { }
+
 static int
 hsic_sysmon_probe(struct usb_interface *ifc, const struct usb_device_id *id)
 {
@@ -371,6 +373,7 @@
 
 	hs->pdev.name = "sys_mon";
 	hs->pdev.id = SYSMON_SS_EXT_MODEM;
+	hs->pdev.dev.release = hsic_sysmon_pdev_release;
 	platform_device_register(&hs->pdev);
 
 	pr_debug("complete");
diff --git a/arch/arm/mach-msm/include/mach/camera.h b/arch/arm/mach-msm/include/mach/camera.h
index d8543f3..6d2c25a 100644
--- a/arch/arm/mach-msm/include/mach/camera.h
+++ b/arch/arm/mach-msm/include/mach/camera.h
@@ -94,6 +94,7 @@
 	VFE_MSG_V2X_CAPTURE,
 	VFE_MSG_OUTPUT_PRIMARY,
 	VFE_MSG_OUTPUT_SECONDARY,
+	VFE_MSG_OUTPUT_TERTIARY1,
 };
 
 enum vpe_resp_msg {
diff --git a/arch/arm/mach-msm/include/mach/iommu.h b/arch/arm/mach-msm/include/mach/iommu.h
index b57ae10..4bfbe61 100644
--- a/arch/arm/mach-msm/include/mach/iommu.h
+++ b/arch/arm/mach-msm/include/mach/iommu.h
@@ -15,6 +15,7 @@
 
 #include <linux/interrupt.h>
 #include <linux/clk.h>
+#include <linux/regulator/consumer.h>
 #include <mach/socinfo.h>
 
 extern pgprot_t     pgprot_kernel;
@@ -79,6 +80,7 @@
 	struct clk *clk;
 	struct clk *pclk;
 	const char *name;
+	struct regulator *gdsc;
 };
 
 /**
diff --git a/arch/arm/mach-msm/include/mach/mdm2.h b/arch/arm/mach-msm/include/mach/mdm2.h
index 997b3be..09839b2 100644
--- a/arch/arm/mach-msm/include/mach/mdm2.h
+++ b/arch/arm/mach-msm/include/mach/mdm2.h
@@ -13,12 +13,21 @@
 #ifndef _ARCH_ARM_MACH_MSM_MDM2_H
 #define _ARCH_ARM_MACH_MSM_MDM2_H
 
+struct mdm_vddmin_resource {
+	int rpm_id;
+	int ap2mdm_vddmin_gpio;
+	unsigned int modes;
+	unsigned int drive_strength;
+	int mdm2ap_vddmin_gpio;
+};
+
 struct mdm_platform_data {
 	char *mdm_version;
 	int ramdump_delay_ms;
 	int soft_reset_inverted;
 	int early_power_on;
 	int sfr_query;
+	struct mdm_vddmin_resource *vddmin_resource;
 	struct platform_device *peripheral_platform_device;
 };
 
diff --git a/arch/arm/mach-msm/include/mach/msm_bus_board.h b/arch/arm/mach-msm/include/mach/msm_bus_board.h
index 0c556b5..574491a 100644
--- a/arch/arm/mach-msm/include/mach/msm_bus_board.h
+++ b/arch/arm/mach-msm/include/mach/msm_bus_board.h
@@ -83,6 +83,14 @@
 extern struct msm_bus_fabric_registration msm_bus_8930_sys_fpb_pdata;
 extern struct msm_bus_fabric_registration msm_bus_8930_cpss_fpb_pdata;
 
+extern struct msm_bus_fabric_registration msm_bus_copper_sys_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_copper_mmss_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_copper_bimc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_copper_ocmem_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_copper_periph_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_copper_config_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_copper_ocmem_vnoc_pdata;
+
 void msm_bus_rpm_set_mt_mask(void);
 int msm_bus_board_rpm_get_il_ids(uint16_t *id);
 int msm_bus_board_get_iid(int id);
@@ -148,6 +156,20 @@
 		MSM_BUS_CLK_UNHALT<<MSM_BUS_MASTER_SHIFT((master),\
 		MSM_BUS_CLK_HALT_FIELDSIZE))\
 
+#define RPM_BUS_SLAVE_REQ	0x766c7362
+#define RPM_BUS_MASTER_REQ	0x73616d62
+
+enum msm_bus_rpm_slave_field_type {
+	RPM_SLAVE_FIELD_BW = 0x00007762,
+};
+
+enum msm_bus_rpm_mas_field_type {
+	RPM_MASTER_FIELD_BW =		0x00007762,
+	RPM_MASTER_FIELD_BW_T0 =	0x30747762,
+	RPM_MASTER_FIELD_BW_T1 =	0x31747762,
+	RPM_MASTER_FIELD_BW_T2 =	0x32747762,
+};
+
 /* Topology related enums */
 enum msm_bus_fabric_type {
 	MSM_BUS_FAB_DEFAULT = 0,
diff --git a/arch/arm/mach-msm/include/mach/qpnp-int.h b/arch/arm/mach-msm/include/mach/qpnp-int.h
index a79d2fc..21d95e6 100644
--- a/arch/arm/mach-msm/include/mach/qpnp-int.h
+++ b/arch/arm/mach-msm/include/mach/qpnp-int.h
@@ -52,7 +52,8 @@
  * Used by the PMIC Arbiter driver or equivalent to register
  * callbacks for interrupt events.
  */
-int qpnpint_register_controller(unsigned int busno,
+int qpnpint_register_controller(struct device_node *node,
+				struct spmi_controller *ctrl,
 				struct qpnp_local_int *li_cb);
 
 /**
@@ -68,8 +69,11 @@
 {
 	return -ENXIO;
 }
-static inline int qpnpint_register_controller(unsigned int busno,
-				struct qpnp_local_int *li_cb)
+
+static inline int qpnpint_register_controller(struct device_node *node,
+					      struct spmi_controller *ctrl,
+					      struct qpnp_local_int *li_cb)
+
 {
 	return -ENXIO;
 }
diff --git a/arch/arm/mach-msm/include/mach/qpnp.h b/arch/arm/mach-msm/include/mach/qpnp.h
deleted file mode 100644
index 1d2e440..0000000
--- a/arch/arm/mach-msm/include/mach/qpnp.h
+++ /dev/null
@@ -1,19 +0,0 @@
- /* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/spmi.h>
-
-struct resource *qpnp_get_resource(struct spmi_device *dev,
-				   unsigned int node_idx, unsigned int type,
-				   unsigned int res_num);
-int qpnp_get_irq(struct spmi_device *dev, unsigned int node_idx,
-					  unsigned int res_num);
diff --git a/arch/arm/mach-msm/include/mach/rpm-8064.h b/arch/arm/mach-msm/include/mach/rpm-8064.h
index c4c6b0a..39ec7ff 100644
--- a/arch/arm/mach-msm/include/mach/rpm-8064.h
+++ b/arch/arm/mach-msm/include/mach/rpm-8064.h
@@ -120,7 +120,9 @@
 	MSM_RPM_8064_SEL_HDMI_SWITCH					= 83,
 	MSM_RPM_8064_SEL_DDR_DMM					= 84,
 
-	MSM_RPM_8064_SEL_LAST = MSM_RPM_8064_SEL_DDR_DMM,
+	MSM_RPM_8064_SEL_VDDMIN_GPIO				= 89,
+
+	MSM_RPM_8064_SEL_LAST = MSM_RPM_8064_SEL_VDDMIN_GPIO,
 };
 
 /* RPM resource (4 byte) word ID enum */
@@ -287,8 +289,9 @@
 	MSM_RPM_8064_ID_DDR_DMM_0					= 212,
 	MSM_RPM_8064_ID_DDR_DMM_1					= 213,
 	MSM_RPM_8064_ID_QDSS_CLK					= 214,
+	MSM_RPM_8064_ID_VDDMIN_GPIO					= 215,
 
-	MSM_RPM_8064_ID_LAST = MSM_RPM_8064_ID_QDSS_CLK,
+	MSM_RPM_8064_ID_LAST = MSM_RPM_8064_ID_VDDMIN_GPIO,
 };
 
 
@@ -425,8 +428,9 @@
 	MSM_RPM_8064_STATUS_ID_DDR_DMM_1				= 128,
 	MSM_RPM_8064_STATUS_ID_EBI1_CH0_RANGE				= 129,
 	MSM_RPM_8064_STATUS_ID_EBI1_CH1_RANGE				= 130,
+	MSM_RPM_8064_STATUS_ID_VDDMIN_GPIO				= 131,
 
-	MSM_RPM_8064_STATUS_ID_LAST = MSM_RPM_8064_STATUS_ID_EBI1_CH1_RANGE,
+	MSM_RPM_8064_STATUS_ID_LAST = MSM_RPM_8064_STATUS_ID_VDDMIN_GPIO,
 };
 
 #endif /* __ARCH_ARM_MACH_MSM_RPM_8064_H */
diff --git a/arch/arm/mach-msm/include/mach/rpm.h b/arch/arm/mach-msm/include/mach/rpm.h
index de4c9d9..f6b9a6e 100644
--- a/arch/arm/mach-msm/include/mach/rpm.h
+++ b/arch/arm/mach-msm/include/mach/rpm.h
@@ -460,6 +460,7 @@
 	MSM_RPM_ID_PM8821_S2_1,
 	MSM_RPM_ID_PM8821_L1_0,
 	MSM_RPM_ID_PM8821_L1_1,
+	MSM_RPM_ID_VDDMIN_GPIO,
 
 	MSM_RPM_ID_LAST,
 };
@@ -825,6 +826,7 @@
 	MSM_RPM_STATUS_ID_PM8821_S2_1,
 	MSM_RPM_STATUS_ID_PM8821_L1_0,
 	MSM_RPM_STATUS_ID_PM8821_L1_1,
+	MSM_RPM_STATUS_ID_VDDMIN_GPIO,
 
 	MSM_RPM_STATUS_ID_LAST,
 };
diff --git a/arch/arm/mach-msm/iommu_domains.c b/arch/arm/mach-msm/iommu_domains.c
index a7e06ba..e92b5c5 100644
--- a/arch/arm/mach-msm/iommu_domains.c
+++ b/arch/arm/mach-msm/iommu_domains.c
@@ -47,31 +47,57 @@
 				unsigned long page_size,
 				int cached)
 {
-	int i, ret_value = 0;
-	unsigned long order = get_order(page_size);
-	unsigned long aligned_size = ALIGN(size, page_size);
-	unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
+	int ret = 0;
+	int i = 0;
 	unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
 	unsigned long temp_iova = start_iova;
+	if (page_size == SZ_4K) {
+		struct scatterlist *sglist;
+		unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
+		struct page *dummy_page = phys_to_page(phy_addr);
 
-	for (i = 0; i < nrpages; i++) {
-		int ret = iommu_map(domain, temp_iova, phy_addr, page_size,
-					cached);
-		if (ret) {
-			pr_err("%s: could not map %lx in domain %p, error: %d\n",
-				__func__, start_iova, domain, ret);
-			ret_value = -EAGAIN;
+		sglist = vmalloc(sizeof(*sglist) * nrpages);
+		if (!sglist) {
+			ret = -ENOMEM;
 			goto out;
 		}
-		temp_iova += page_size;
+
+		sg_init_table(sglist, nrpages);
+
+		for (i = 0; i < nrpages; i++)
+			sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);
+
+		ret = iommu_map_range(domain, temp_iova, sglist, size, cached);
+		if (ret) {
+			pr_err("%s: could not map extra %lx in domain %p\n",
+				__func__, start_iova, domain);
+		}
+
+		vfree(sglist);
+	} else {
+		unsigned long order = get_order(page_size);
+		unsigned long aligned_size = ALIGN(size, page_size);
+		unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
+
+		for (i = 0; i < nrpages; i++) {
+			ret = iommu_map(domain, temp_iova, phy_addr, page_size,
+						cached);
+			if (ret) {
+				pr_err("%s: could not map %lx in domain %p, error: %d\n",
+					__func__, start_iova, domain, ret);
+				ret = -EAGAIN;
+				goto out;
+			}
+			temp_iova += page_size;
+		}
 	}
-	return ret_value;
+	return ret;
 out:
 	for (; i > 0; --i) {
 		temp_iova -= page_size;
 		iommu_unmap(domain, start_iova, page_size);
 	}
-	return ret_value;
+	return ret;
 }
 
 void msm_iommu_unmap_extra(struct iommu_domain *domain,
diff --git a/arch/arm/mach-msm/lpm_levels.c b/arch/arm/mach-msm/lpm_levels.c
index a1f5ff5..e65f71c 100644
--- a/arch/arm/mach-msm/lpm_levels.c
+++ b/arch/arm/mach-msm/lpm_levels.c
@@ -18,17 +18,32 @@
 #include <linux/platform_device.h>
 #include <linux/of.h>
 #include <mach/mpm.h>
-#include "rpm_resources.h"
+#include "lpm_resources.h"
 #include "pm.h"
 
 static struct msm_rpmrs_level *msm_lpm_levels;
 static int msm_lpm_level_count;
 
-static int msm_lpm_enter_sleep(uint32_t sclk_count, void *limits,
+static void msm_lpm_level_update(void)
+{
+	unsigned int lpm_level;
+	struct msm_rpmrs_level *level = NULL;
+
+	for (lpm_level = 0; lpm_level < msm_lpm_level_count; lpm_level++) {
+		level = &msm_lpm_levels[lpm_level];
+		level->available =
+			!msm_lpm_level_beyond_limit(&level->rs_limits);
+	}
+}
+
+int msm_lpm_enter_sleep(uint32_t sclk_count, void *limits,
 		bool from_idle, bool notify_rpm)
 {
-	/* TODO */
-	return 0;
+	int ret = 0;
+
+	ret = msm_lpmrs_enter_sleep((struct msm_rpmrs_limits *)limits,
+					from_idle, notify_rpm);
+	return ret;
 }
 
 static void msm_lpm_exit_sleep(void *limits, bool from_idle,
@@ -38,14 +53,7 @@
 	return;
 }
 
-static bool msm_rpmrs_irqs_detectable(struct msm_rpmrs_limits *limits,
-		bool irqs_detect, bool gpio_detect)
-{
-	/* TODO */
-	return true;
-}
-
-void msm_rpmrs_show_resources(void)
+void msm_lpm_show_resources(void)
 {
 	/* TODO */
 	return;
@@ -80,18 +88,13 @@
 {
 	unsigned int cpu = smp_processor_id();
 	struct msm_rpmrs_level *best_level = NULL;
-	bool irqs_detectable = false;
-	bool gpio_detectable = false;
 	uint32_t pwr;
 	int i;
 
 	if (!msm_lpm_levels)
 		return NULL;
 
-	if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
-		irqs_detectable = msm_mpm_irqs_detectable(from_idle);
-		gpio_detectable = msm_mpm_gpio_irqs_detectable(from_idle);
-	}
+	msm_lpm_level_update();
 
 	for (i = 0; i < msm_lpm_level_count; i++) {
 		struct msm_rpmrs_level *level = &msm_lpm_levels[i];
@@ -105,10 +108,6 @@
 		if (latency_us < level->latency_us)
 			continue;
 
-		if (!msm_rpmrs_irqs_detectable(&level->rs_limits,
-					irqs_detectable, gpio_detectable))
-			continue;
-
 		if (sleep_us <= 1) {
 			pwr = level->energy_overhead;
 		} else if (sleep_us <= level->time_overhead_us) {
@@ -192,7 +191,7 @@
 		ret = of_property_read_u32(node, key, &val);
 		if (ret)
 			goto fail;
-		level->rs_limits.vdd_dig = val;
+		level->rs_limits.vdd_dig_lower_bound = val;
 
 		key = "qcom,vdd-mem-upper-bound";
 		ret = of_property_read_u32(node, key, &val);
@@ -204,7 +203,7 @@
 		ret = of_property_read_u32(node, key, &val);
 		if (ret)
 			goto fail;
-		level->rs_limits.vdd_mem = val;
+		level->rs_limits.vdd_mem_lower_bound = val;
 
 		key = "qcom,latency-us";
 		ret = of_property_read_u32(node, key, &val);
diff --git a/arch/arm/mach-msm/lpm_resources.c b/arch/arm/mach-msm/lpm_resources.c
new file mode 100644
index 0000000..f57f974
--- /dev/null
+++ b/arch/arm/mach-msm/lpm_resources.c
@@ -0,0 +1,865 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <mach/mpm.h>
+#include <linux/notifier.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include "spm.h"
+#include "lpm_resources.h"
+#include "rpm-notifier.h"
+#include <mach/rpm-smd.h>
+#include "idle.h"
+
+/*Debug Definitions*/
+enum {
+	MSM_LPMRS_DEBUG_RPM = BIT(0),
+	MSM_LPMRS_DEBUG_PXO = BIT(1),
+	MSM_LPMRS_DEBUG_VDD_DIG = BIT(2),
+	MSM_LPMRS_DEBUG_VDD_MEM = BIT(3),
+	MSM_LPMRS_DEBUG_L2 = BIT(4),
+	MSM_LPMRS_DEBUG_LVLS = BIT(5),
+};
+
+static int msm_lpm_debug_mask;
+module_param_named(
+	debug_mask, msm_lpm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+static bool msm_lpm_get_rpm_notif = true;
+
+/*Macros*/
+#define VDD_DIG_ACTIVE		(950000)
+#define VDD_MEM_ACTIVE		(1050000)
+#define MAX_RS_NAME		(16)
+#define MAX_RS_SIZE		(4)
+#define IS_RPM_CTL(rs) \
+	(!strncmp(rs->name, "rpm_ctl", MAX_RS_NAME))
+
+static bool msm_lpm_beyond_limits_vdd_dig(struct msm_rpmrs_limits *limits);
+static void msm_lpm_aggregate_vdd_dig(struct msm_rpmrs_limits *limits);
+static void msm_lpm_flush_vdd_dig(int notify_rpm);
+static void msm_lpm_notify_vdd_dig(struct msm_rpm_notifier_data
+					*rpm_notifier_cb);
+
+static bool msm_lpm_beyond_limits_vdd_mem(struct msm_rpmrs_limits *limits);
+static void msm_lpm_aggregate_vdd_mem(struct msm_rpmrs_limits *limits);
+static void msm_lpm_flush_vdd_mem(int notify_rpm);
+static void msm_lpm_notify_vdd_mem(struct msm_rpm_notifier_data
+					*rpm_notifier_cb);
+
+static bool msm_lpm_beyond_limits_pxo(struct msm_rpmrs_limits *limits);
+static void msm_lpm_aggregate_pxo(struct msm_rpmrs_limits *limits);
+static void msm_lpm_flush_pxo(int notify_rpm);
+static void msm_lpm_notify_pxo(struct msm_rpm_notifier_data
+					*rpm_notifier_cb);
+
+
+static bool msm_lpm_beyond_limits_l2(struct msm_rpmrs_limits *limits);
+static void msm_lpm_flush_l2(int notify_rpm);
+static void msm_lpm_aggregate_l2(struct msm_rpmrs_limits *limits);
+
+static void msm_lpm_flush_rpm_ctl(int notify_rpm);
+
+static int msm_lpm_rpm_callback(struct notifier_block *rpm_nb,
+				unsigned long action, void *rpm_notif);
+
+static int msm_lpm_cpu_callback(struct notifier_block *cpu_nb,
+				unsigned long action, void *hcpu);
+
+static ssize_t msm_lpm_resource_attr_show(
+	struct kobject *kobj, struct kobj_attribute *attr, char *buf);
+static ssize_t msm_lpm_resource_attr_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count);
+
+
+#define RPMRS_ATTR(_name) \
+	__ATTR(_name, S_IRUGO|S_IWUSR, \
+		msm_lpm_resource_attr_show, msm_lpm_resource_attr_store)
+
+/*Data structures*/
+struct msm_lpm_rs_data {
+	uint32_t type;
+	uint32_t id;
+	uint32_t key;
+	uint32_t value;
+	uint32_t default_value;
+	struct msm_rpm_request *handle;
+};
+
+struct msm_lpm_resource {
+	struct msm_lpm_rs_data rs_data;
+	uint32_t sleep_value;
+	char name[MAX_RS_NAME];
+
+	uint32_t  enable_low_power;
+	bool valid;
+
+	bool (*beyond_limits)(struct msm_rpmrs_limits *limits);
+	void (*aggregate)(struct msm_rpmrs_limits *limits);
+	void (*flush)(int notify_rpm);
+	void (*notify)(struct msm_rpm_notifier_data *rpm_notifier_cb);
+	struct kobj_attribute ko_attr;
+};
+
+
+static struct msm_lpm_resource msm_lpm_l2 = {
+	.name = "l2",
+	.beyond_limits = msm_lpm_beyond_limits_l2,
+	.aggregate = msm_lpm_aggregate_l2,
+	.flush = msm_lpm_flush_l2,
+	.notify = NULL,
+	.valid = true,
+	.rs_data = {
+		.value = MSM_LPM_L2_CACHE_ACTIVE,
+		.default_value = MSM_LPM_L2_CACHE_ACTIVE,
+	},
+	.ko_attr = RPMRS_ATTR(l2),
+};
+
+static struct msm_lpm_resource msm_lpm_vdd_dig = {
+	.name = "vdd-dig",
+	.beyond_limits = msm_lpm_beyond_limits_vdd_dig,
+	.aggregate = msm_lpm_aggregate_vdd_dig,
+	.flush = msm_lpm_flush_vdd_dig,
+	.notify = msm_lpm_notify_vdd_dig,
+	.valid = false,
+	.rs_data = {
+		.value = VDD_DIG_ACTIVE,
+		.default_value = VDD_DIG_ACTIVE,
+	},
+	.ko_attr = RPMRS_ATTR(vdd_dig),
+};
+
+static struct msm_lpm_resource msm_lpm_vdd_mem = {
+	.name = "vdd-mem",
+	.beyond_limits = msm_lpm_beyond_limits_vdd_mem,
+	.aggregate = msm_lpm_aggregate_vdd_mem,
+	.flush = msm_lpm_flush_vdd_mem,
+	.notify = msm_lpm_notify_vdd_mem,
+	.valid = false,
+	.rs_data = {
+		.value = VDD_MEM_ACTIVE,
+		.default_value = VDD_MEM_ACTIVE,
+	},
+	.ko_attr = RPMRS_ATTR(vdd_mem),
+};
+
+static struct msm_lpm_resource msm_lpm_pxo = {
+	.name = "pxo",
+	.beyond_limits = msm_lpm_beyond_limits_pxo,
+	.aggregate = msm_lpm_aggregate_pxo,
+	.flush = msm_lpm_flush_pxo,
+	.notify = msm_lpm_notify_pxo,
+	.valid = false,
+	.rs_data = {
+		.value = MSM_LPM_PXO_ON,
+		.default_value = MSM_LPM_PXO_ON,
+	},
+	.ko_attr = RPMRS_ATTR(pxo),
+};
+
+static struct msm_lpm_resource *msm_lpm_resources[] = {
+	&msm_lpm_vdd_dig,
+	&msm_lpm_vdd_mem,
+	&msm_lpm_pxo,
+	&msm_lpm_l2,
+};
+
+static struct msm_lpm_resource msm_lpm_rpm_ctl = {
+	.name = "rpm_ctl",
+	.beyond_limits = NULL,
+	.aggregate = NULL,
+	.flush = msm_lpm_flush_rpm_ctl,
+	.valid = true,
+	.ko_attr = RPMRS_ATTR(rpm_ctl),
+};
+
+static struct notifier_block msm_lpm_rpm_nblk = {
+	.notifier_call = msm_lpm_rpm_callback,
+};
+
+static struct notifier_block __refdata msm_lpm_cpu_nblk = {
+	.notifier_call = msm_lpm_cpu_callback,
+};
+
+static DEFINE_SPINLOCK(msm_lpm_sysfs_lock);
+
+/* Attribute Definitions */
+static struct attribute *msm_lpm_attributes[] = {
+	&msm_lpm_vdd_dig.ko_attr.attr,
+	&msm_lpm_vdd_mem.ko_attr.attr,
+	&msm_lpm_pxo.ko_attr.attr,
+	&msm_lpm_l2.ko_attr.attr,
+	NULL,
+};
+
+static struct attribute_group msm_lpm_attribute_group = {
+	.attrs = msm_lpm_attributes,
+};
+
+static struct attribute *msm_lpm_rpm_ctl_attribute[] = {
+	&msm_lpm_rpm_ctl.ko_attr.attr,
+	NULL,
+};
+
+static struct attribute_group msm_lpm_rpm_ctl_attr_group = {
+	.attrs = msm_lpm_rpm_ctl_attribute,
+};
+
+#define GET_RS_FROM_ATTR(attr) \
+	(container_of(attr, struct msm_lpm_resource, ko_attr))
+
+/* RPM */
+static struct msm_rpm_request *msm_lpm_create_rpm_request
+				(uint32_t rsc_type, uint32_t rsc_id)
+{
+	struct msm_rpm_request *handle = NULL;
+
+	handle = msm_rpm_create_request(MSM_RPM_CTX_SLEEP_SET,
+						rsc_type,
+						rsc_id, 1);
+	return handle;
+}
+
+static int msm_lpm_send_sleep_data(struct msm_rpm_request *handle,
+					uint32_t key, uint8_t *value)
+{
+	int ret = 0;
+
+	if (!handle)
+		return ret;
+
+	ret = msm_rpm_add_kvp_data_noirq(handle, key, value, MAX_RS_SIZE);
+
+	if (ret < 0) {
+		pr_err("%s: Error adding kvp data key %u, size %d\n",
+				__func__, key, MAX_RS_SIZE);
+		return ret;
+	}
+
+	ret = msm_rpm_send_request_noirq(handle);
+	if (ret < 0) {
+		pr_err("%s: Error sending RPM request key %u, handle 0x%x\n",
+				__func__, key, (unsigned int)handle);
+		return ret;
+	}
+	if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_RPM)
+		pr_info("Rs key %u, value %u, size %d\n", key,
+				*(unsigned int *)value, MAX_RS_SIZE);
+	return ret;
+}
+
+/* RPM Notifier */
+static int msm_lpm_rpm_callback(struct notifier_block *rpm_nb,
+					unsigned long action,
+					void *rpm_notif)
+{
+	int i;
+	struct msm_lpm_resource *rs = NULL;
+	struct msm_rpm_notifier_data *rpm_notifier_cb =
+			(struct msm_rpm_notifier_data *)rpm_notif;
+
+	if (!msm_lpm_get_rpm_notif)
+		return NOTIFY_DONE;
+
+	if (!(rpm_nb && rpm_notif))
+		return NOTIFY_BAD;
+
+	for (i = 0; i < ARRAY_SIZE(msm_lpm_resources); i++) {
+		rs = msm_lpm_resources[i];
+		if (rs && rs->valid && rs->notify)
+			rs->notify(rpm_notifier_cb);
+	}
+
+	return NOTIFY_OK;
+}
+
+/* SYSFS */
+static ssize_t msm_lpm_resource_attr_show(
+	struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	struct kernel_param kp;
+	unsigned long flags;
+	unsigned int temp;
+	int rc;
+
+	spin_lock_irqsave(&msm_lpm_sysfs_lock, flags);
+	temp = GET_RS_FROM_ATTR(attr)->enable_low_power;
+	spin_unlock_irqrestore(&msm_lpm_sysfs_lock, flags);
+
+	kp.arg = &temp;
+	rc = param_get_uint(buf, &kp);
+
+	if (rc > 0) {
+		strlcat(buf, "\n", PAGE_SIZE);
+		rc++;
+	}
+
+	return rc;
+}
+
+static ssize_t msm_lpm_resource_attr_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	struct kernel_param kp;
+	unsigned long flags;
+	unsigned int temp;
+	int rc;
+
+	kp.arg = &temp;
+	rc = param_set_uint(buf, &kp);
+	if (rc)
+		return rc;
+
+	spin_lock_irqsave(&msm_lpm_sysfs_lock, flags);
+	GET_RS_FROM_ATTR(attr)->enable_low_power = temp;
+
+	if (IS_RPM_CTL(GET_RS_FROM_ATTR(attr))) {
+		struct msm_lpm_resource *rs = GET_RS_FROM_ATTR(attr);
+		rs->flush(false);
+	}
+
+	spin_unlock_irqrestore(&msm_lpm_sysfs_lock, flags);
+
+	return count;
+}
+
+/* lpm resource handling functions */
+/* Common */
+static void msm_lpm_notify_common(struct msm_rpm_notifier_data *rpm_notifier_cb,
+				struct msm_lpm_resource *rs)
+{
+	if ((rpm_notifier_cb->rsc_type == rs->rs_data.type) &&
+			(rpm_notifier_cb->rsc_id == rs->rs_data.id) &&
+			(rpm_notifier_cb->key == rs->rs_data.key)) {
+		BUG_ON(rpm_notifier_cb->size > MAX_RS_SIZE);
+
+		if (rs->valid) {
+			if (rpm_notifier_cb->value)
+				memcpy(&rs->rs_data.value,
+				rpm_notifier_cb->value, rpm_notifier_cb->size);
+			else
+				rs->rs_data.value = rs->rs_data.default_value;
+
+			if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_RPM)
+				pr_info("Notification received Rs %s value %u\n",
+						rs->name, rs->rs_data.value);
+		}
+	}
+}
+
+/* L2 */
+static bool msm_lpm_beyond_limits_l2(struct msm_rpmrs_limits *limits)
+{
+	uint32_t l2;
+	bool ret = true;
+	struct msm_lpm_resource *rs = &msm_lpm_l2;
+
+	if (rs->valid) {
+		uint32_t l2_buf = rs->rs_data.value;
+
+		if (rs->enable_low_power == 1)
+			l2 = MSM_LPM_L2_CACHE_GDHS;
+		else if (rs->enable_low_power == 2)
+			l2 = MSM_LPM_L2_CACHE_HSFS_OPEN;
+		else
+			l2 = MSM_LPM_L2_CACHE_ACTIVE ;
+
+		if (l2_buf > l2)
+			l2 = l2_buf;
+		ret = (l2 > limits->l2_cache);
+
+		if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_L2)
+			pr_info("%s: l2 buf %u, l2 %u, limits %u\n",
+				__func__, l2_buf, l2, limits->l2_cache);
+	}
+	return ret;
+}
+
+static void msm_lpm_aggregate_l2(struct msm_rpmrs_limits *limits)
+{
+	struct msm_lpm_resource *rs = &msm_lpm_l2;
+
+	if (rs->valid)
+		rs->sleep_value = limits->l2_cache;
+}
+
+static void msm_lpm_flush_l2(int notify_rpm)
+{
+	struct msm_lpm_resource *rs = &msm_lpm_l2;
+	int lpm;
+	int rc;
+
+	switch (rs->sleep_value) {
+	case MSM_LPM_L2_CACHE_HSFS_OPEN:
+		lpm = MSM_SPM_L2_MODE_POWER_COLLAPSE;
+		msm_pm_set_l2_flush_flag(1);
+		break;
+	case MSM_LPM_L2_CACHE_GDHS:
+		lpm = MSM_SPM_L2_MODE_GDHS;
+		break;
+	case MSM_LPM_L2_CACHE_RETENTION:
+		lpm = MSM_SPM_L2_MODE_RETENTION;
+		break;
+	default:
+	case MSM_LPM_L2_CACHE_ACTIVE:
+		lpm = MSM_SPM_L2_MODE_DISABLED;
+		break;
+	}
+
+	rc = msm_spm_l2_set_low_power_mode(lpm, notify_rpm);
+
+	if (rc < 0)
+		pr_err("%s: Failed to set L2 low power mode %d",
+			__func__, lpm);
+
+	if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_L2)
+		pr_info("%s: Requesting low power mode %d\n",
+				__func__, lpm);
+}
+
+/* RPM CTL */
+static void msm_lpm_flush_rpm_ctl(int notify_rpm)
+{
+	struct msm_lpm_resource *rs = &msm_lpm_rpm_ctl;
+	msm_lpm_send_sleep_data(rs->rs_data.handle,
+				rs->rs_data.key,
+				(uint8_t *)&rs->sleep_value);
+}
+
+/*VDD Dig*/
+static bool msm_lpm_beyond_limits_vdd_dig(struct msm_rpmrs_limits *limits)
+{
+	bool ret = true;
+	struct msm_lpm_resource *rs = &msm_lpm_vdd_dig;
+
+	if (rs->valid) {
+		uint32_t vdd_buf = rs->rs_data.value;
+		uint32_t vdd_dig = rs->enable_low_power ? rs->enable_low_power :
+					rs->rs_data.default_value;
+
+		if (vdd_buf > vdd_dig)
+			vdd_dig = vdd_buf;
+
+		ret = (vdd_dig > limits->vdd_dig_upper_bound);
+
+		if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_VDD_DIG)
+			pr_info("%s:buf %d vdd dig %d limits%d\n",
+				__func__, vdd_buf, vdd_dig,
+				limits->vdd_dig_upper_bound);
+	}
+	return ret;
+}
+
+static void msm_lpm_aggregate_vdd_dig(struct msm_rpmrs_limits *limits)
+{
+	struct msm_lpm_resource *rs = &msm_lpm_vdd_dig;
+
+	if (rs->valid) {
+		uint32_t vdd_buf = rs->rs_data.value;
+		if (limits->vdd_dig_lower_bound > vdd_buf)
+			rs->sleep_value = limits->vdd_dig_lower_bound;
+		else
+			rs->sleep_value = vdd_buf;
+	}
+}
+
+static void msm_lpm_flush_vdd_dig(int notify_rpm)
+{
+	if (notify_rpm) {
+		struct msm_lpm_resource *rs = &msm_lpm_vdd_dig;
+		msm_lpm_send_sleep_data(rs->rs_data.handle,
+					rs->rs_data.key,
+					(uint8_t *)&rs->sleep_value);
+	}
+}
+
+static void msm_lpm_notify_vdd_dig(struct msm_rpm_notifier_data
+					*rpm_notifier_cb)
+{
+	struct msm_lpm_resource *rs = &msm_lpm_vdd_dig;
+	msm_lpm_notify_common(rpm_notifier_cb, rs);
+}
+
+/*VDD Mem*/
+static bool msm_lpm_beyond_limits_vdd_mem(struct msm_rpmrs_limits *limits)
+{
+	bool ret = true;
+	struct msm_lpm_resource *rs = &msm_lpm_vdd_mem;
+
+	if (rs->valid) {
+		uint32_t vdd_buf = rs->rs_data.value;
+		uint32_t vdd_mem = rs->enable_low_power ? rs->enable_low_power :
+					rs->rs_data.default_value;
+
+		if (vdd_buf > vdd_mem)
+			vdd_mem = vdd_buf;
+
+		ret = (vdd_mem > limits->vdd_mem_upper_bound);
+
+		if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_VDD_MEM)
+			pr_info("%s:buf %d vdd mem %d limits%d\n",
+				__func__, vdd_buf, vdd_mem,
+				limits->vdd_mem_upper_bound);
+	}
+	return ret;
+}
+
+static void msm_lpm_aggregate_vdd_mem(struct msm_rpmrs_limits *limits)
+{
+	struct msm_lpm_resource *rs = &msm_lpm_vdd_mem;
+
+	if (rs->valid) {
+		uint32_t vdd_buf = rs->rs_data.value;
+		if (limits->vdd_mem_lower_bound > vdd_buf)
+			rs->sleep_value = limits->vdd_mem_lower_bound;
+		else
+			rs->sleep_value = vdd_buf;
+	}
+}
+
+static void msm_lpm_flush_vdd_mem(int notify_rpm)
+{
+	if (notify_rpm) {
+		struct msm_lpm_resource *rs = &msm_lpm_vdd_mem;
+		msm_lpm_send_sleep_data(rs->rs_data.handle,
+					rs->rs_data.key,
+					(uint8_t *)&rs->sleep_value);
+	}
+}
+
+static void msm_lpm_notify_vdd_mem(struct msm_rpm_notifier_data
+					*rpm_notifier_cb)
+{
+	struct msm_lpm_resource *rs = &msm_lpm_vdd_mem;
+	msm_lpm_notify_common(rpm_notifier_cb, rs);
+}
+
+/*PXO*/
+static bool msm_lpm_beyond_limits_pxo(struct msm_rpmrs_limits *limits)
+{
+	bool ret = true;
+	struct msm_lpm_resource *rs = &msm_lpm_pxo;
+
+	if (rs->valid) {
+		uint32_t pxo_buf = rs->rs_data.value;
+		uint32_t pxo = rs->enable_low_power ? MSM_LPM_PXO_OFF :
+					rs->rs_data.default_value;
+
+		if (pxo_buf > pxo)
+			pxo = pxo_buf;
+
+		ret = (pxo > limits->pxo);
+
+		if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_PXO)
+			pr_info("%s:pxo buf %d pxo %d limits pxo %d\n",
+					__func__, pxo_buf, pxo, limits->pxo);
+	}
+	return ret;
+}
+
+static void msm_lpm_aggregate_pxo(struct msm_rpmrs_limits *limits)
+{
+	struct msm_lpm_resource *rs = &msm_lpm_pxo;
+
+	if (rs->valid) {
+		uint32_t pxo_buf = rs->rs_data.value;
+		if (limits->pxo > pxo_buf)
+			rs->sleep_value = limits->pxo;
+		else
+			rs->sleep_value = pxo_buf;
+
+		if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_PXO)
+			pr_info("%s: pxo buf %d sleep value %d\n",
+					__func__, pxo_buf, rs->sleep_value);
+	}
+}
+
+static void msm_lpm_flush_pxo(int notify_rpm)
+{
+	if (notify_rpm) {
+		struct msm_lpm_resource *rs = &msm_lpm_pxo;
+		msm_lpm_send_sleep_data(rs->rs_data.handle,
+					rs->rs_data.key,
+					(uint8_t *)&rs->sleep_value);
+	}
+}
+
+static void msm_lpm_notify_pxo(struct msm_rpm_notifier_data
+					*rpm_notifier_cb)
+{
+	struct msm_lpm_resource *rs = &msm_lpm_pxo;
+	msm_lpm_notify_common(rpm_notifier_cb, rs);
+}
+
+/* MPM
+static bool msm_lpm_use_mpm(struct msm_rpmrs_limits *limits)
+{
+	return ((limits->pxo == MSM_LPM_PXO_OFF) ||
+		(limits->vdd_dig_lower_bound <= VDD_DIG_RET_HIGH));
+}*/
+
+/* LPM levels interface */
+bool msm_lpm_level_beyond_limit(struct msm_rpmrs_limits *limits)
+{
+	int i;
+	struct msm_lpm_resource *rs;
+	bool beyond_limit = false;
+
+	for (i = 0; i < ARRAY_SIZE(msm_lpm_resources); i++) {
+		rs = msm_lpm_resources[i];
+		if (rs->beyond_limits && rs->beyond_limits(limits)) {
+			beyond_limit = true;
+			if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_LVLS)
+				pr_info("%s: %s beyond limit", __func__,
+						rs->name);
+			break;
+		}
+	}
+
+	return beyond_limit;
+}
+
+int msm_lpmrs_enter_sleep(struct msm_rpmrs_limits *limits,
+				bool from_idle, bool notify_rpm)
+{
+	int ret = 0;
+	int i;
+	struct msm_lpm_resource *rs = NULL;
+
+	for (i = 0; i < ARRAY_SIZE(msm_lpm_resources); i++) {
+		rs = msm_lpm_resources[i];
+		if (rs->aggregate)
+			rs->aggregate(limits);
+	}
+
+	msm_lpm_get_rpm_notif = false;
+	for (i = 0; i < ARRAY_SIZE(msm_lpm_resources); i++) {
+		rs = msm_lpm_resources[i];
+		if (rs->flush)
+			rs->flush(notify_rpm);
+	}
+	msm_lpm_get_rpm_notif = true;
+
+	/* MPM Enter sleep
+	if (msm_lpm_use_mpm(limits))
+		msm_mpm_enter_sleep(from_idle);*/
+
+	return ret;
+}
+
+void msm_lpmrs_exit_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
+		bool from_idle, bool notify_rpm)
+{
+	/* MPM exit sleep
+	if (msm_lpm_use_mpm(limits))
+		msm_mpm_exit_sleep(from_idle);*/
+}
+
+static int msm_lpm_cpu_callback(struct notifier_block *cpu_nb,
+		unsigned long action, void *hcpu)
+{
+	struct msm_lpm_resource *rs = &msm_lpm_l2;
+	switch (action) {
+	case CPU_ONLINE_FROZEN:
+	case CPU_ONLINE:
+		if (num_online_cpus() > 1)
+			rs->rs_data.value = MSM_LPM_L2_CACHE_ACTIVE;
+		break;
+	case CPU_DEAD_FROZEN:
+	case CPU_DEAD:
+		if (num_online_cpus() == 1)
+			rs->rs_data.value = MSM_LPM_L2_CACHE_GDHS;
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+/* RPM CTL */
+static int __devinit msm_lpm_init_rpm_ctl(void)
+{
+	struct msm_lpm_resource *rs = &msm_lpm_rpm_ctl;
+
+	rs->rs_data.handle = msm_rpm_create_request(
+				MSM_RPM_CTX_ACTIVE_SET,
+				rs->rs_data.type,
+				rs->rs_data.id, 1);
+	if (!rs->rs_data.handle)
+		return -EIO;
+
+	rs->valid = true;
+	return 0;
+}
+
+static int __devinit msm_lpm_resource_sysfs_add(void)
+{
+	struct kobject *module_kobj = NULL;
+	struct kobject *low_power_kobj = NULL;
+	struct kobject *mode_kobj = NULL;
+	int rc = 0;
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("%s: cannot find kobject for module %s\n",
+			__func__, KBUILD_MODNAME);
+		rc = -ENOENT;
+		goto resource_sysfs_add_exit;
+	}
+
+	low_power_kobj = kobject_create_and_add(
+				"enable_low_power", module_kobj);
+	if (!low_power_kobj) {
+		pr_err("%s: cannot create kobject\n", __func__);
+		rc = -ENOMEM;
+		goto resource_sysfs_add_exit;
+	}
+
+	mode_kobj = kobject_create_and_add(
+				"mode", module_kobj);
+	if (!mode_kobj) {
+		pr_err("%s: cannot create kobject\n", __func__);
+		rc = -ENOMEM;
+		goto resource_sysfs_add_exit;
+	}
+
+	rc = sysfs_create_group(low_power_kobj, &msm_lpm_attribute_group);
+	if (rc) {
+		pr_err("%s: cannot create kobject attribute group\n", __func__);
+		goto resource_sysfs_add_exit;
+	}
+
+	rc = sysfs_create_group(mode_kobj, &msm_lpm_rpm_ctl_attr_group);
+	if (rc) {
+		pr_err("%s: cannot create kobject attribute group\n", __func__);
+		goto resource_sysfs_add_exit;
+	}
+
+resource_sysfs_add_exit:
+	if (rc) {
+		if (low_power_kobj)
+			sysfs_remove_group(low_power_kobj,
+					&msm_lpm_attribute_group);
+		kobject_del(low_power_kobj);
+		kobject_del(mode_kobj);
+	}
+
+	return rc;
+}
+
+late_initcall(msm_lpm_resource_sysfs_add);
+
+static int __devinit msm_lpmrs_probe(struct platform_device *pdev)
+{
+	struct device_node *node = NULL;
+	char *key = NULL;
+	int ret = 0;
+
+	for_each_child_of_node(pdev->dev.of_node, node) {
+		struct msm_lpm_resource *rs = NULL;
+		const char *val;
+		int i;
+
+		key = "qcom,name";
+		ret = of_property_read_string(node, key, &val);
+		if (ret) {
+			pr_err("Cannot read string\n");
+			goto fail;
+		}
+
+		for (i = 0; i < ARRAY_SIZE(msm_lpm_resources); i++) {
+			char *lpmrs_name = msm_lpm_resources[i]->name;
+			if (!msm_lpm_resources[i]->valid &&
+				!strncmp(val, lpmrs_name, strnlen(lpmrs_name,
+							MAX_RS_NAME))) {
+				rs = msm_lpm_resources[i];
+				break;
+			}
+		}
+
+		if (!rs) {
+			pr_err("LPM resource not found\n");
+			continue;
+		}
+
+		key = "qcom,type";
+		ret = of_property_read_u32(node, key, &rs->rs_data.type);
+		if (ret) {
+			pr_err("Failed to read type\n");
+			goto fail;
+		}
+
+		key = "qcom,id";
+		ret = of_property_read_u32(node, key, &rs->rs_data.id);
+		if (ret) {
+			pr_err("Failed to read id\n");
+			goto fail;
+		}
+
+		key = "qcom,key";
+		ret = of_property_read_u32(node, key, &rs->rs_data.key);
+		if (ret) {
+			pr_err("Failed to read key\n");
+			goto fail;
+		}
+
+		rs->rs_data.handle = msm_lpm_create_rpm_request(
+					rs->rs_data.type, rs->rs_data.id);
+
+		if (!rs->rs_data.handle) {
+			pr_err("%s: Failed to allocate handle for %s\n",
+					__func__, rs->name);
+			ret = -1;
+			goto fail;
+		}
+
+		rs->valid = true;
+	}
+	msm_rpm_register_notifier(&msm_lpm_rpm_nblk);
+	msm_lpm_init_rpm_ctl();
+	register_hotcpu_notifier(&msm_lpm_cpu_nblk);
+	/* For UP mode, set the default to HSFS OPEN*/
+	if (num_possible_cpus() == 1) {
+		msm_lpm_l2.rs_data.default_value = MSM_LPM_L2_CACHE_HSFS_OPEN;
+		msm_lpm_l2.rs_data.value = MSM_LPM_L2_CACHE_HSFS_OPEN;
+	}
+	return 0;
+fail:
+	return ret;
+}
+
+static struct of_device_id msm_lpmrs_match_table[] = {
+	{.compatible = "qcom,lpm-resources"},
+	{},
+};
+
+static struct platform_driver msm_lpmrs_driver = {
+	.probe = msm_lpmrs_probe,
+	.driver = {
+		.name = "lpm-resources",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_lpmrs_match_table,
+	},
+};
+
+int __init msm_lpmrs_module_init(void)
+{
+	return platform_driver_register(&msm_lpmrs_driver);
+}
diff --git a/arch/arm/mach-msm/lpm_resources.h b/arch/arm/mach-msm/lpm_resources.h
new file mode 100644
index 0000000..9973fbf
--- /dev/null
+++ b/arch/arm/mach-msm/lpm_resources.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_LPM_RESOURCES_H
+#define __ARCH_ARM_MACH_MSM_LPM_RESOURCES_H
+
+#include "pm.h"
+
+enum {
+	MSM_LPM_PXO_OFF = 0,
+	MSM_LPM_PXO_ON = 1,
+};
+
+enum {
+	MSM_LPM_L2_CACHE_HSFS_OPEN = 0,
+	MSM_LPM_L2_CACHE_GDHS = 1,
+	MSM_LPM_L2_CACHE_RETENTION = 2,
+	MSM_LPM_L2_CACHE_ACTIVE = 3,
+};
+
+struct msm_rpmrs_limits {
+	uint32_t pxo;
+	uint32_t l2_cache;
+	uint32_t vdd_mem_upper_bound;
+	uint32_t vdd_mem_lower_bound;
+	uint32_t vdd_dig_upper_bound;
+	uint32_t vdd_dig_lower_bound;
+
+	uint32_t latency_us[NR_CPUS];
+	uint32_t power[NR_CPUS];
+};
+
+struct msm_rpmrs_level {
+	enum msm_pm_sleep_mode sleep_mode;
+	struct msm_rpmrs_limits rs_limits;
+	bool available;
+	uint32_t latency_us;
+	uint32_t steady_state_power;
+	uint32_t energy_overhead;
+	uint32_t time_overhead_us;
+};
+
+#ifdef CONFIG_MSM_RPM_SMD
+
+/**
+ * msm_lpm_level_beyond_limit() - Check if the resources in a low power level
+ * is beyond the limits of the driver votes received for those resources.This
+ * function is used by lpm_levels to eliminate any low power level that cannot
+ * be entered.
+ *
+ * @limits: pointer to the resource limits of a low power level.
+ *
+ * returns true if the resource limits are beyond driver resource votes.
+ * false otherwise.
+ */
+bool msm_lpm_level_beyond_limit(struct msm_rpmrs_limits *limits);
+
+/**
+ * msm_lpmrs_enter_sleep() - Enter sleep flushes the sleep votes of low power
+ * resources to the RPM driver, also configure the MPM if needed depending
+ * on the low power mode being entered. L2 low power mode is also set in
+ * this function.
+
+ * @limits: pointer to the resource limits of the low power mode being entered.
+ * @from_idle: bool to determine if this call being made as a part of
+ *             idle power collapse.
+ * @notify_rpm: bool that informs if this is an RPM notified power collapse.
+ *
+ * returns 0 on success.
+ */
+int msm_lpmrs_enter_sleep(struct msm_rpmrs_limits *limits,
+	bool from_idle, bool notify_rpm);
+
+/**
+ * msm_lpmrs_exit_sleep() - Exit sleep, reset the MPM and L2 mode.
+ * @ sclk_count - Sleep Clock count.
+ * @ limits: pointer to resource limits of the most recent low power mode.
+ * @from_idle: bool to determine if this call being made as a part of
+ *             idle power collapse.
+ * @notify_rpm: bool that informs if this is an RPM notified power collapse.
+ */
+void msm_lpmrs_exit_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
+	bool from_idle, bool notify_rpm);
+/**
+ * msm_lpmrs_module_init() - Init function that parses the device tree to
+ * get the low power resource attributes and registers with RPM driver for
+ * callback notification.
+ *
+ * returns 0 on success.
+ */
+int __init msm_lpmrs_module_init(void);
+
+#else
+static inline bool msm_lpm_level_beyond_limit(struct msm_rpmrs_limits *limits)
+{
+	return true;
+}
+
+static inline int msm_lpmrs_enter_sleep(struct msm_rpmrs_limits *limits,
+	bool from_idle, bool notify_rpm)
+{
+	return 0;
+}
+
+static inline void msm_lpmrs_exit_sleep(uint32_t sclk_count,
+		struct msm_rpmrs_limits *limits, bool from_idle,
+		bool notify_rpm)
+{
+	return;
+}
+
+static inline int __init msm_lpmrs_module_init(void)
+{
+	return 0;
+}
+#endif /* CONFIG_MSM_RPM_SMD */
+
+#endif
diff --git a/arch/arm/mach-msm/mdm2.c b/arch/arm/mach-msm/mdm2.c
index bd7bd9e..4791955 100644
--- a/arch/arm/mach-msm/mdm2.c
+++ b/arch/arm/mach-msm/mdm2.c
@@ -45,6 +45,7 @@
 #define MDM_MODEM_TIMEOUT	6000
 #define MDM_HOLD_TIME		4000
 #define MDM_MODEM_DELTA		100
+#define MDM_PBLRDY_CNT		20
 
 static int mdm_debug_on;
 static int power_on_count;
@@ -93,6 +94,8 @@
 {
 	int soft_reset_direction =
 		mdm_drv->pdata->soft_reset_inverted ? 0 : 1;
+	int i;
+	int pblrdy;
 
 	if (power_on_count != 1) {
 		pr_err("%s: Calling fn when power_on_count != 1\n",
@@ -118,7 +121,19 @@
 	pr_debug("%s: De-asserting soft reset gpio\n", __func__);
 	gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
 						  soft_reset_direction);
+	if (!mdm_drv->mdm2ap_pblrdy)
+		goto start_mdm_peripheral;
 
+	for (i = 0; i  < MDM_PBLRDY_CNT; i++) {
+		pblrdy = gpio_get_value(mdm_drv->mdm2ap_pblrdy);
+		if (pblrdy)
+			break;
+		usleep_range(5000, 5000);
+	}
+
+	pr_debug("%s: i:%d\n", __func__, i);
+
+start_mdm_peripheral:
 	mdm_peripheral_connect(mdm_drv);
 	msleep(200);
 }
@@ -127,6 +142,8 @@
 {
 	int soft_reset_direction =
 		mdm_drv->pdata->soft_reset_inverted ? 0 : 1;
+	int i;
+	int pblrdy;
 
 	/* De-assert the soft reset line. */
 	pr_err("%s: soft resetting mdm modem\n", __func__);
@@ -139,6 +156,19 @@
 	gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
 		soft_reset_direction == 1 ? 1 : 0);
 
+	if (!mdm_drv->mdm2ap_pblrdy)
+		goto start_mdm_peripheral;
+
+	for (i = 0; i  < MDM_PBLRDY_CNT; i++) {
+		pblrdy = gpio_get_value(mdm_drv->mdm2ap_pblrdy);
+		if (pblrdy)
+			break;
+		usleep_range(5000, 5000);
+	}
+
+	pr_debug("%s: i:%d\n", __func__, i);
+
+start_mdm_peripheral:
 	mdm_peripheral_connect(mdm_drv);
 	msleep(200);
 }
diff --git a/arch/arm/mach-msm/mdm_common.c b/arch/arm/mach-msm/mdm_common.c
index 74bf25d..c961731 100644
--- a/arch/arm/mach-msm/mdm_common.c
+++ b/arch/arm/mach-msm/mdm_common.c
@@ -36,6 +36,7 @@
 #include <mach/restart.h>
 #include <mach/subsystem_notif.h>
 #include <mach/subsystem_restart.h>
+#include <mach/rpm.h>
 #include <linux/msm_charm.h>
 #include "msm_watchdog.h"
 #include "mdm_private.h"
@@ -65,6 +66,57 @@
 #define SFR_MAX_RETRIES		10
 #define SFR_RETRY_INTERVAL	1000
 
+static irqreturn_t mdm_vddmin_change(int irq, void *dev_id)
+{
+	int value = gpio_get_value(
+		mdm_drv->pdata->vddmin_resource->mdm2ap_vddmin_gpio);
+
+	if (value == 0)
+		pr_info("External Modem entered Vddmin\n");
+	else
+		pr_info("External Modem exited Vddmin\n");
+
+	return IRQ_HANDLED;
+}
+
+static void mdm_setup_vddmin_gpios(void)
+{
+	struct msm_rpm_iv_pair req;
+	struct mdm_vddmin_resource *vddmin_res;
+	int irq, ret;
+
+	/* This resource may not be supported by some platforms. */
+	vddmin_res = mdm_drv->pdata->vddmin_resource;
+	if (!vddmin_res)
+		return;
+
+	req.id = vddmin_res->rpm_id;
+	req.value = ((uint32_t)vddmin_res->ap2mdm_vddmin_gpio & 0x0000FFFF)
+							<< 16;
+	req.value |= ((uint32_t)vddmin_res->modes & 0x000000FF) << 8;
+	req.value |= (uint32_t)vddmin_res->drive_strength & 0x000000FF;
+
+	msm_rpm_set(MSM_RPM_CTX_SET_0, &req, 1);
+
+	/* Monitor low power gpio from mdm */
+	irq = MSM_GPIO_TO_INT(vddmin_res->mdm2ap_vddmin_gpio);
+	if (irq < 0) {
+		pr_err("%s: could not get LPM POWER IRQ resource.\n",
+			__func__);
+		goto error_end;
+	}
+
+	ret = request_threaded_irq(irq, NULL, mdm_vddmin_change,
+		IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+		"mdm lpm", NULL);
+
+	if (ret < 0)
+		pr_err("%s: MDM LPM IRQ#%d request failed with error=%d",
+			__func__, irq, ret);
+error_end:
+	return;
+}
+
 static void mdm_restart_reason_fn(struct work_struct *work)
 {
 	int ret, ntries = 0;
@@ -288,6 +340,14 @@
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t mdm_pblrdy_change(int irq, void *dev_id)
+{
+	pr_info("%s: pbl ready:%d\n", __func__,
+			gpio_get_value(mdm_drv->mdm2ap_pblrdy));
+
+	return IRQ_HANDLED;
+}
+
 static int mdm_subsys_shutdown(const struct subsys_data *crashed_subsys)
 {
 	gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 1);
@@ -445,6 +505,12 @@
 	if (pres)
 		mdm_drv->ap2mdm_pmic_pwr_en_gpio = pres->start;
 
+	/* MDM2AP_PBLRDY */
+	pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
+							"MDM2AP_PBLRDY");
+	if (pres)
+		mdm_drv->mdm2ap_pblrdy = pres->start;
+
 	mdm_drv->boot_type                  = CHARM_NORMAL_BOOT;
 
 	mdm_drv->ops      = mdm_ops;
@@ -472,6 +538,8 @@
 		gpio_request(mdm_drv->ap2mdm_kpdpwr_n_gpio, "AP2MDM_KPDPWR_N");
 	gpio_request(mdm_drv->mdm2ap_status_gpio, "MDM2AP_STATUS");
 	gpio_request(mdm_drv->mdm2ap_errfatal_gpio, "MDM2AP_ERRFATAL");
+	if (mdm_drv->mdm2ap_pblrdy > 0)
+		gpio_request(mdm_drv->mdm2ap_pblrdy, "MDM2AP_PBLRDY");
 
 	if (mdm_drv->ap2mdm_pmic_pwr_en_gpio > 0)
 		gpio_request(mdm_drv->ap2mdm_pmic_pwr_en_gpio,
@@ -560,12 +628,35 @@
 	mdm_drv->mdm_status_irq = irq;
 
 status_err:
+	if (mdm_drv->mdm2ap_pblrdy > 0) {
+		irq = MSM_GPIO_TO_INT(mdm_drv->mdm2ap_pblrdy);
+		if (irq < 0) {
+			pr_err("%s: could not get MDM2AP_PBLRDY IRQ resource",
+				__func__);
+			goto pblrdy_err;
+		}
+
+		ret = request_threaded_irq(irq, NULL, mdm_pblrdy_change,
+			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+			IRQF_SHARED,
+			"mdm pbl ready", mdm_drv);
+
+		if (ret < 0) {
+			pr_err("%s: MDM2AP_PBL IRQ#%d request failed error=%d",
+				__func__, irq, ret);
+			goto pblrdy_err;
+		}
+	}
+
+pblrdy_err:
 	/*
 	 * If AP2MDM_PMIC_PWR_EN gpio is used, pull it high. It remains
 	 * high until the whole phone is shut down.
 	 */
 	if (mdm_drv->ap2mdm_pmic_pwr_en_gpio > 0)
 		gpio_direction_output(mdm_drv->ap2mdm_pmic_pwr_en_gpio, 1);
+	/* Register VDDmin gpios with RPM */
+	mdm_setup_vddmin_gpios();
 
 	/* Perform early powerup of the external modem in order to
 	 * allow tabla devices to be found.
diff --git a/arch/arm/mach-msm/mdm_private.h b/arch/arm/mach-msm/mdm_private.h
index f157d88..53bfaf0 100644
--- a/arch/arm/mach-msm/mdm_private.h
+++ b/arch/arm/mach-msm/mdm_private.h
@@ -35,6 +35,7 @@
 	unsigned ap2mdm_kpdpwr_n_gpio;
 	unsigned ap2mdm_soft_reset_gpio;
 	unsigned ap2mdm_pmic_pwr_en_gpio;
+	unsigned mdm2ap_pblrdy;
 
 	int mdm_errfatal_irq;
 	int mdm_status_irq;
diff --git a/arch/arm/mach-msm/mpm-8625.c b/arch/arm/mach-msm/mpm-8625.c
index fa966d2..954e5cc 100644
--- a/arch/arm/mach-msm/mpm-8625.c
+++ b/arch/arm/mach-msm/mpm-8625.c
@@ -152,7 +152,7 @@
 	return 0;
 }
 
-void __init msm_gic_irq_extn_init(void __iomem *db, void __iomem *cb)
+void __init msm_gic_irq_extn_init(void)
 {
 	gic_arch_extn.irq_mask	= msm_gic_mask_irq;
 	gic_arch_extn.irq_unmask = msm_gic_unmask_irq;
diff --git a/arch/arm/mach-msm/mpm-8625.h b/arch/arm/mach-msm/mpm-8625.h
index 4ada9e2..1c28390 100644
--- a/arch/arm/mach-msm/mpm-8625.h
+++ b/arch/arm/mach-msm/mpm-8625.h
@@ -14,7 +14,7 @@
 #ifndef _ARCH_ARM_MACH_MSM_MPM_8625_H_
 #define _ARCH_ARM_MACH_MSM_MPM_8625_H_
 
-void msm_gic_irq_extn_init(void __iomem *, void __iomem *);
+void msm_gic_irq_extn_init(void);
 
 unsigned int msm_gic_spi_ppi_pending(void);
 int msm_gic_irq_idle_sleep_allowed(void);
diff --git a/arch/arm/mach-msm/msm_bus/Makefile b/arch/arm/mach-msm/msm_bus/Makefile
index 766856c..98e1250 100644
--- a/arch/arm/mach-msm/msm_bus/Makefile
+++ b/arch/arm/mach-msm/msm_bus/Makefile
@@ -8,4 +8,5 @@
 obj-$(CONFIG_ARCH_MSM9615) += msm_bus_board_9615.o
 obj-$(CONFIG_ARCH_APQ8064) += msm_bus_board_8064.o
 obj-$(CONFIG_ARCH_MSM8930) += msm_bus_board_8930.o
+obj-$(CONFIG_ARCH_MSMCOPPER) += msm_bus_board_copper.o
 obj-$(CONFIG_DEBUG_FS) += msm_bus_dbg.o
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_board_copper.c b/arch/arm/mach-msm/msm_bus/msm_bus_board_copper.c
new file mode 100644
index 0000000..9858a73
--- /dev/null
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_board_copper.c
@@ -0,0 +1,2002 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#include <mach/board.h>
+#include <mach/rpm.h>
+#include "msm_bus_core.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_bimc.h"
+
+#define NMASTERS 120
+#define NSLAVES 150
+#define NFAB_COPPER 7
+
+enum msm_bus_copper_master_ports_type {
+	/* System NOC Masters */
+	MASTER_PORT_LPASS_AHB = 0,
+	MASTER_PORT_QDSS_BAM,
+	MASTER_PORT_SNOC_CFG,
+	MASTER_PORT_GW_BIMC_SNOC,
+	MASTER_PORT_GW_CNOC_SNOC,
+	MASTER_PORT_CRYPTO_CORE0,
+	MASTER_PORT_CRYPTO_CORE1,
+	MASTER_PORT_LPASS_PROC,
+	MASTER_PORT_MSS,
+	MASTER_PORT_MSS_NAV,
+	MASTER_PORT_OCMEM_DMA,
+	MASTER_PORT_GW_PNOC_SNOC,
+	MASTER_PORT_WCSS,
+	MASTER_PORT_QDSS_ETR,
+	MASTER_PORT_USB3,
+
+	/* MMSS NOC Masters */
+	MASTER_PORT_GW_CNOC_MNOC_MMSS_CFG = 0,
+	MASTER_PORT_GW_CNOC_MNOC_CFG,
+	MASTER_PORT_GFX3D_PORT0,
+	MASTER_PORT_GFX3D_PORT1,
+	MASTER_PORT_JPEG,
+	MASTER_PORT_MDP,
+	/* Venus video core */
+	MASTER_PORT_VIDEO_PORT0,
+	MASTER_PORT_VIDEO_PORT1,
+	MASTER_PORT_VFE = 16,
+
+	/* BIMC Masters */
+	MASTER_PORT_KMPSS_M0 = 0,
+	MASTER_PORT_KMPSS_M1,
+	MASTER_PORT_MSS_PROC,
+	MASTER_PORT_GW_MNOC_BIMC_0,
+	MASTER_PORT_GW_MNOC_BIMC_1,
+	MASTER_PORT_GW_SNOC_BIMC_0,
+	MASTER_PORT_GW_SNOC_BIMC_1,
+
+	/* OCMEM NOC Masters */
+	MASTER_PORT_CNOC_ONOC_CFG = 0,
+	MASTER_PORT_JPEG_OCMEM,
+	MASTER_PORT_MDP_OCMEM,
+	MASTER_PORT_VIDEO_P0_OCMEM,
+	MASTER_PORT_VIDEO_P1_OCMEM,
+	MASTER_PORT_VFE_OCMEM,
+
+	/* Peripheral NOC Masters */
+	MASTER_PORT_SDCC_1 = 0,
+	MASTER_PORT_SDCC_3,
+	MASTER_PORT_SDCC_2,
+	MASTER_PORT_SDCC_4,
+	MASTER_PORT_TSIF,
+	MASTER_PORT_BAM_DMA,
+	MASTER_PORT_BLSP_2,
+	MASTER_PORT_USB_HSIC,
+	MASTER_PORT_BLSP_1,
+	MASTER_PORT_USB_HS,
+	MASTER_PORT_PNOC_CFG,
+	MASTER_PORT_GW_SNOC_PNOC,
+
+	/* Config NOC Masters */
+	MASTER_PORT_RPM_INST = 0,
+	MASTER_PORT_RPM_DATA,
+	MASTER_PORT_RPM_SYS,
+	MASTER_PORT_DEHR,
+	MASTER_PORT_QDSS_DAP,
+	MASTER_PORT_SPDM,
+	MASTER_PORT_TIC,
+	MASTER_PORT_GW_SNOC_CNOC,
+};
+
+enum msm_bus_copper_slave_ports_type {
+	/* System NOC Slaves */
+	SLAVE_PORT_KMPSS = 1,
+	SLAVE_PORT_LPASS,
+	SLAVE_PORT_USB3 = 4,
+	SLAVE_PORT_WCSS = 6,
+	SLAVE_PORT_GW_SNOC_BIMC_P0,
+	SLAVE_PORT_GW_SNOC_BIMC_P1,
+	SLAVE_PORT_GW_SNOC_CNOC,
+	SLAVE_PORT_OCIMEM,
+	SLAVE_PORT_SNOC_OCMEM,
+	SLAVE_PORT_GW_SNOC_PNOC,
+	SLAVE_PORT_SERVICE_SNOC,
+	SLAVE_PORT_QDSS_STM,
+
+	/* MMSS NOC Slaves */
+	SLAVE_PORT_CAMERA_CFG = 0,
+	SLAVE_PORT_DISPLAY_CFG,
+	SLAVE_PORT_OCMEM_CFG,
+	SLAVE_PORT_CPR_CFG,
+	SLAVE_PORT_CPR_XPU_CFG,
+	SLAVE_PORT_MISC_CFG = 6,
+	SLAVE_PORT_MISC_XPU_CFG,
+	SLAVE_PORT_VENUS_CFG,
+	SLAVE_PORT_GFX3D_CFG,
+	SLAVE_PORT_MMSS_CLK_CFG = 11,
+	SLAVE_PORT_MMSS_CLK_XPU_CFG,
+	SLAVE_PORT_MNOC_MPU_CFG,
+	SLAVE_PORT_ONOC_MPU_CFG,
+	SLAVE_PORT_GW_MMSS_BIMC_P0 = 16,
+	SLAVE_PORT_GW_MMSS_BIMC_P1,
+	SLAVE_PORT_SERVICE_MNOC,
+
+	/* BIMC Slaves */
+	SLAVE_PORT_EBI1_CH0 = 0,
+	SLAVE_PORT_EBI1_CH1,
+	SLAVE_PORT_KMPSS_L2,
+	SLAVE_PORT_GW_BIMC_SNOC,
+
+	/* OCMEM NOC Slaves */
+	SLAVE_PORT_OCMEM_P0 = 0,
+	SLAVE_PORT_OCMEM_P1,
+	SLAVE_PORT_SERVICE_ONOC,
+
+	/*Peripheral NOC Slaves */
+	SLAVE_PORT_SDCC_1 = 0,
+	SLAVE_PORT_SDCC_3,
+	SLAVE_PORT_SDCC_2,
+	SLAVE_PORT_SDCC_4,
+	SLAVE_PORT_TSIF,
+	SLAVE_PORT_BAM_DMA,
+	SLAVE_PORT_BLSP_2,
+	SLAVE_PORT_USB_HSIC,
+	SLAVE_PORT_BLSP_1,
+	SLAVE_PORT_USB_HS,
+	SLAVE_PORT_PDM,
+	SLAVE_PORT_PERIPH_APU_CFG,
+	SLAVE_PORT_PNOC_MPU_CFG,
+	SLAVE_PORT_PRNG,
+	SLAVE_PORT_GW_PNOC_SNOC,
+	SLAVE_PORT_SERVICE_PNOC,
+
+	/* Config NOC slaves */
+	SLAVE_PORT_CLK_CTL = 1,
+	SLAVE_PORT_CNOC_MSS,
+	SLAVE_PORT_SECURITY,
+	SLAVE_PORT_TCSR,
+	SLAVE_PORT_TLMM,
+	SLAVE_PORT_CRYPTO_0_CFG,
+	SLAVE_PORT_CRYPTO_1_CFG,
+	SLAVE_PORT_IMEM_CFG,
+	SLAVE_PORT_MESSAGE_RAM,
+	SLAVE_PORT_BIMC_CFG,
+	SLAVE_PORT_BOOT_ROM,
+	SLAVE_PORT_CNOC_MNOC_MMSS_CFG,
+	SLAVE_PORT_PMIC_ARB,
+	SLAVE_PORT_SPDM_WRAPPER,
+	SLAVE_PORT_DEHR_CFG,
+	SLAVE_PORT_MPM,
+	SLAVE_PORT_QDSS_CFG,
+	SLAVE_PORT_RBCPR_CFG,
+	SLAVE_PORT_RBCPR_QDSS_APU_CFG,
+	SLAVE_PORT_CNOC_MNOC_CFG,
+	SLAVE_PORT_SNOC_MPU_CFG,
+	SLAVE_PORT_CNOC_ONOC_CFG,
+	SLAVE_PORT_PNOC_CFG,
+	SLAVE_PORT_SNOC_CFG,
+	SLAVE_PORT_EBI1_DLL_CFG,
+	SLAVE_PORT_PHY_APU_CFG,
+	SLAVE_PORT_EBI1_PHY_CFG,
+	SLAVE_PORT_RPM,
+	SLAVE_PORT_GW_CNOC_SNOC,
+	SLAVE_PORT_SERVICE_CNOC,
+};
+
+/* Hardware IDs for RPM */
+enum msm_bus_copper_mas_hw_id {
+	MAS_APPSS_PROC = 0,
+	MAS_AMSS_PROC,
+	MAS_MNOC_BIMC,
+	MAS_SNOC_BIMC,
+	MAS_CNOC_MNOC_MMSS_CFG,
+	MAS_CNOC_MNOC_CFG,
+	MAS_GFX3D,
+	MAS_JPEG,
+	MAS_MDP,
+	MAS_VIDEO_P0,
+	MAS_VIDEO_P1,
+	MAS_VFE,
+	MAS_CNOC_ONOC_CFG,
+	MAS_JPEG_OCMEM,
+	MAS_MDP_OCMEM,
+	MAS_VIDEO_P0_OCMEM,
+	MAS_VIDEO_P1_OCMEM,
+	MAS_VFE_OCMEM,
+	MAS_LPASS_AHB,
+	MAS_QDSS_BAM,
+	MAS_SNOC_CFG,
+	MAS_BIMC_SNOC,
+	MAS_CNOC_SNOC,
+	MAS_CRYPTO_CORE0,
+	MAS_CRYPTO_CORE1,
+	MAS_LPASS_PROC,
+	MAS_MSS,
+	MAS_MSS_NAV,
+	MAS_OCMEM_DMA,
+	MAS_PNOC_SNOC,
+	MAS_WCSS,
+	MAS_QDSS_ETR,
+	MAS_USB3,
+	MAS_SDCC_1,
+	MAS_SDCC_3,
+	MAS_SDCC_2,
+	MAS_SDCC_4,
+	MAS_TSIF,
+	MAS_BAM_DMA,
+	MAS_BLSP_2,
+	MAS_USB_HSIC,
+	MAS_BLSP_1,
+	MAS_USB_HS,
+	MAS_PNOC_CFG,
+	MAS_SNOC_PNOC,
+	MAS_RPM_INST,
+	MAS_RPM_DATA,
+	MAS_RPM_SYS,
+	MAS_DEHR,
+	MAS_QDSS_DAP,
+	MAS_SPDM,
+	MAS_TIC,
+	MAS_SNOC_CNOC,
+	MAS_OVNOC_SNOC,
+	MAS_OVNOC_ONOC,
+	MAS_V_OCMEM_GFX3D,
+	MAS_ONOC_OVNOC,
+	MAS_SNOC_OVNOC,
+};
+
+enum msm_bus_copper_slv_hw_id {
+	SLV_EBI = 0,
+	SLV_APSS_L2,
+	SLV_BIMC_SNOC,
+	SLV_CAMERA_CFG,
+	SLV_DISPLAY_CFG,
+	SLV_OCMEM_CFG,
+	SLV_CPR_CFG,
+	SLV_CPR_XPU_CFG,
+	SLV_MISC_CFG,
+	SLV_MISC_XPU_CFG,
+	SLV_VENUS_CFG,
+	SLV_GFX3D_CFG,
+	SLV_MMSS_CLK_CFG,
+	SLV_MMSS_CLK_XPU_CFG,
+	SLV_MNOC_MPU_CFG,
+	SLV_ONOC_MPU_CFG,
+	SLV_MMSS_BIMC,
+	SLV_SERVICE_MNOC,
+	SLV_OCMEM,
+	SLV_SERVICE_ONOC,
+	SLV_APPSS,
+	SLV_LPASS,
+	SLV_USB3,
+	SLV_WCSS,
+	SLV_SNOC_BIMC,
+	SLV_SNOC_CNOC,
+	SLV_OCIMEM,
+	SLV_SNOC_OCMEM,
+	SLV_SNOC_PNOC,
+	SLV_SERVICE_SNOC,
+	SLV_QDSS_STM,
+	SLV_SDCC_1,
+	SLV_SDCC_3,
+	SLV_SDCC_2,
+	SLV_SDCC_4,
+	SLV_TSIF,
+	SLV_BAM_DMA,
+	SLV_BLSP_2,
+	SLV_USB_HSIC,
+	SLV_BLSP_1,
+	SLV_USB_HS,
+	SLV_PDM,
+	SLV_PERIPH_APU_CFG,
+	SLV_MPU_CFG,
+	SLV_PRNG,
+	SLV_PNOC_SNOC,
+	SLV_SERVICE_PNOC,
+	SLV_CLK_CTL,
+	SLV_CNOC_MSS,
+	SLV_SECURITY,
+	SLV_TCSR,
+	SLV_TLMM,
+	SLV_CRYPTO_0_CFG,
+	SLV_CRYPTO_1_CFG,
+	SLV_IMEM_CFG,
+	SLV_MESSAGE_RAM,
+	SLV_BIMC_CFG,
+	SLV_BOOT_ROM,
+	SLV_CNOC_MNOC_MMSS_CFG,
+	SLV_PMIC_ARB,
+	SLV_SPDM_WRAPPER,
+	SLV_DEHR_CFG,
+	SLV_MPM,
+	SLV_QDSS_CFG,
+	SLV_RBCPR_CFG,
+	SLV_RBCPR_QDSS_APU_CFG,
+	SLV_CNOC_MNOC_CFG,
+	SLV_SNOC_MPU_CFG,
+	SLV_CNOC_ONOC_CFG,
+	SLV_PNOC_CFG,
+	SLV_SNOC_CFG,
+	SLV_EBI1_DLL_CFG,
+	SLV_PHY_APU_CFG,
+	SLV_EBI1_PHY_CFG,
+	SLV_RPM,
+	SLV_CNOC_SNOC,
+	SLV_SERVICE_CNOC,
+	SLV_SNOC_OVNOC,
+	SLV_ONOC_OVNOC,
+	SLV_OVNOC_ONOC,
+	SLV_OVNOC_SNOC,
+};
+
+static uint32_t master_iids[NMASTERS];
+static uint32_t slave_iids[NSLAVES];
+
+/* System NOC nodes */
+static int mport_lpass_ahb[] = {MASTER_PORT_LPASS_AHB,};
+static int mport_qdss_bam[] = {MASTER_PORT_QDSS_BAM,};
+static int mport_snoc_cfg[] = {MASTER_PORT_SNOC_CFG,};
+static int mport_gw_bimc_snoc[] = {MASTER_PORT_GW_BIMC_SNOC,};
+static int mport_gw_cnoc_snoc[] = {MASTER_PORT_GW_CNOC_SNOC,};
+static int mport_crypto_core0[] = {MASTER_PORT_CRYPTO_CORE0,};
+static int mport_crypto_core1[] = {MASTER_PORT_CRYPTO_CORE1};
+static int mport_lpass_proc[] = {MASTER_PORT_LPASS_PROC};
+static int mport_mss[] = {MASTER_PORT_MSS};
+static int mport_mss_nav[] = {MASTER_PORT_MSS_NAV};
+static int mport_ocmem_dma[] = {MASTER_PORT_OCMEM_DMA};
+static int mport_gw_pnoc_snoc[] = {MASTER_PORT_GW_PNOC_SNOC};
+static int mport_wcss[] = {MASTER_PORT_WCSS};
+static int mport_qdss_etr[] = {MASTER_PORT_QDSS_ETR};
+static int mport_usb3[] = {MASTER_PORT_USB3};
+
+static int sport_kmpss[] = {SLAVE_PORT_KMPSS};
+static int sport_lpass[] = {SLAVE_PORT_LPASS};
+static int sport_usb3[] = {SLAVE_PORT_USB3};
+static int sport_wcss[] = {SLAVE_PORT_WCSS};
+static int sport_gw_snoc_bimc[] = {
+	SLAVE_PORT_GW_SNOC_BIMC_P0,
+	SLAVE_PORT_GW_SNOC_BIMC_P1,
+	};
+static int sport_gw_snoc_cnoc[] = {SLAVE_PORT_GW_SNOC_CNOC};
+static int sport_ocimem[] = {SLAVE_PORT_OCIMEM};
+static int sport_snoc_ocmem[] = {SLAVE_PORT_SNOC_OCMEM};
+static int sport_gw_snoc_pnoc[] = {SLAVE_PORT_GW_SNOC_PNOC};
+static int sport_service_snoc[] = {SLAVE_PORT_SERVICE_SNOC};
+static int sport_qdss_stm[] = {SLAVE_PORT_QDSS_STM};
+
+
+/* MMSS NOC nodes */
+static int mport_gw_cnoc_mnoc_cfg[] = {
+	MASTER_PORT_GW_CNOC_MNOC_MMSS_CFG,
+	MASTER_PORT_GW_CNOC_MNOC_CFG,
+};
+static int mport_gfx3d[] = {
+	MASTER_PORT_GFX3D_PORT0,
+	MASTER_PORT_GFX3D_PORT1,
+};
+static int mport_jpeg[] = {MASTER_PORT_JPEG};
+static int mport_mdp[] = {MASTER_PORT_MDP};
+static int mport_video_port0[] = {MASTER_PORT_VIDEO_PORT0};
+static int mport_video_port1[] = {MASTER_PORT_VIDEO_PORT1};
+static int mport_vfe[] = {MASTER_PORT_VFE};
+
+static int sport_camera_cfg[] = {SLAVE_PORT_CAMERA_CFG};
+static int sport_display_cfg[] = {SLAVE_PORT_DISPLAY_CFG};
+static int sport_ocmem_cfg[] = {SLAVE_PORT_OCMEM_CFG};
+static int sport_cpr_cfg[] = {SLAVE_PORT_CPR_CFG};
+static int sport_cpr_xpu_cfg[] = {SLAVE_PORT_CPR_XPU_CFG,};
+static int sport_misc_cfg[] = {SLAVE_PORT_MISC_CFG};
+static int sport_misc_xpu_cfg[] = {SLAVE_PORT_MISC_XPU_CFG};
+static int sport_venus_cfg[] = {SLAVE_PORT_VENUS_CFG};
+static int sport_gfx3d_cfg[] = {SLAVE_PORT_GFX3D_CFG};
+static int sport_mmss_clk_cfg[] = {SLAVE_PORT_MMSS_CLK_CFG};
+static int sport_mmss_clk_xpu_cfg[] = {
+	SLAVE_PORT_MMSS_CLK_XPU_CFG
+};
+static int sport_mnoc_mpu_cfg[] = {SLAVE_PORT_MNOC_MPU_CFG};
+static int sport_onoc_mpu_cfg[] = {SLAVE_PORT_ONOC_MPU_CFG};
+static int sport_gw_mmss_bimc[] = {
+	SLAVE_PORT_GW_MMSS_BIMC_P0,
+	SLAVE_PORT_GW_MMSS_BIMC_P1,
+};
+static int sport_service_mnoc[] = {SLAVE_PORT_SERVICE_MNOC};
+
+/* BIMC Nodes */
+
+static int mport_kmpss_m0[] = {MASTER_PORT_KMPSS_M0,};
+static int mport_kmpss_m1[] = {MASTER_PORT_KMPSS_M1};
+static int mport_mss_proc[] = {MASTER_PORT_MSS_PROC};
+static int mport_gw_mnoc_bimc[] = {
+	MASTER_PORT_GW_MNOC_BIMC_0,
+	MASTER_PORT_GW_MNOC_BIMC_1,
+};
+static int mport_gw_snoc_bimc[] = {
+	MASTER_PORT_GW_SNOC_BIMC_0,
+	MASTER_PORT_GW_SNOC_BIMC_1,
+};
+
+static int sport_ebi1[] = {
+	SLAVE_PORT_EBI1_CH0,
+	SLAVE_PORT_EBI1_CH1,
+};
+static int sport_kmpss_l2[] = {SLAVE_PORT_KMPSS_L2,};
+static int sport_gw_bimc_snoc[] = {SLAVE_PORT_GW_BIMC_SNOC,};
+
+/* OCMEM NOC Nodes */
+static int mport_cnoc_onoc_cfg[] = {
+	MASTER_PORT_CNOC_ONOC_CFG,
+};
+static int mport_jpeg_ocmem[] = {MASTER_PORT_JPEG_OCMEM,};
+static int mport_mdp_ocmem[] = {MASTER_PORT_MDP_OCMEM,};
+static int mport_video_p0_ocmem[] = {
+	MASTER_PORT_VIDEO_P0_OCMEM,
+};
+static int mport_video_p1_ocmem[] = {
+	MASTER_PORT_VIDEO_P1_OCMEM,
+};
+static int mport_vfe_ocmem[] = {MASTER_PORT_VFE_OCMEM,};
+static int sport_ocmem[] = {
+	SLAVE_PORT_OCMEM_P0,
+	SLAVE_PORT_OCMEM_P1,
+};
+
+static int sport_service_onoc[] = {SLAVE_PORT_SERVICE_ONOC,};
+
+/* Peripheral NOC Nodes */
+static int mport_sdcc_1[] = {MASTER_PORT_SDCC_1,};
+static int mport_sdcc_3[] = {MASTER_PORT_SDCC_3,};
+static int mport_sdcc_2[] = {MASTER_PORT_SDCC_2,};
+static int mport_sdcc_4[] = {MASTER_PORT_SDCC_4,};
+static int mport_tsif[] = {MASTER_PORT_TSIF,};
+static int mport_bam_dma[] = {MASTER_PORT_BAM_DMA,};
+static int mport_blsp_2[] = {MASTER_PORT_BLSP_2,};
+static int mport_usb_hsic[] = {MASTER_PORT_USB_HSIC,};
+static int mport_blsp_1[] = {MASTER_PORT_BLSP_1,};
+static int mport_usb_hs[] = {MASTER_PORT_USB_HS,};
+static int mport_pnoc_cfg[] = {MASTER_PORT_PNOC_CFG,};
+static int mport_gw_snoc_pnoc[] = {MASTER_PORT_GW_SNOC_PNOC,};
+
+static int sport_sdcc_1[] = {SLAVE_PORT_SDCC_1,};
+static int sport_sdcc_3[] = {SLAVE_PORT_SDCC_3,};
+static int sport_sdcc_2[] = {SLAVE_PORT_SDCC_2,};
+static int sport_sdcc_4[] = {SLAVE_PORT_SDCC_4,};
+static int sport_tsif[] = {SLAVE_PORT_TSIF,};
+static int sport_bam_dma[] = {SLAVE_PORT_BAM_DMA,};
+static int sport_blsp_2[] = {SLAVE_PORT_BLSP_2,};
+static int sport_usb_hsic[] = {SLAVE_PORT_USB_HSIC,};
+static int sport_blsp_1[] = {SLAVE_PORT_BLSP_1,};
+static int sport_usb_hs[] = {SLAVE_PORT_USB_HS,};
+static int sport_pdm[] = {SLAVE_PORT_PDM,};
+static int sport_periph_apu_cfg[] = {
+	SLAVE_PORT_PERIPH_APU_CFG,
+};
+static int sport_pnoc_mpu_cfg[] = {SLAVE_PORT_PNOC_MPU_CFG,};
+static int sport_prng[] = {SLAVE_PORT_PRNG,};
+static int sport_gw_pnoc_snoc[] = {SLAVE_PORT_GW_PNOC_SNOC,};
+static int sport_service_pnoc[] = {SLAVE_PORT_SERVICE_PNOC,};
+
+/* Config NOC Nodes */
+static int mport_rpm_inst[] = {MASTER_PORT_RPM_INST,};
+static int mport_rpm_data[] = {MASTER_PORT_RPM_DATA,};
+static int mport_rpm_sys[] = {MASTER_PORT_RPM_SYS,};
+static int mport_dehr[] = {MASTER_PORT_DEHR,};
+static int mport_qdss_dap[] = {MASTER_PORT_QDSS_DAP,};
+static int mport_spdm[] = {MASTER_PORT_SPDM,};
+static int mport_tic[] = {MASTER_PORT_TIC,};
+static int mport_gw_snoc_cnoc[] = {MASTER_PORT_GW_SNOC_CNOC,};
+
+static int sport_clk_ctl[] = {SLAVE_PORT_CLK_CTL,};
+static int sport_cnoc_mss[] = {SLAVE_PORT_CNOC_MSS,};
+static int sport_security[] = {SLAVE_PORT_SECURITY,};
+static int sport_tcsr[] = {SLAVE_PORT_TCSR,};
+static int sport_tlmm[] = {SLAVE_PORT_TLMM,};
+static int sport_crypto_0_cfg[] = {SLAVE_PORT_CRYPTO_0_CFG,};
+static int sport_crypto_1_cfg[] = {SLAVE_PORT_CRYPTO_1_CFG,};
+static int sport_imem_cfg[] = {SLAVE_PORT_IMEM_CFG,};
+static int sport_message_ram[] = {SLAVE_PORT_MESSAGE_RAM,};
+static int sport_bimc_cfg[] = {SLAVE_PORT_BIMC_CFG,};
+static int sport_boot_rom[] = {SLAVE_PORT_BOOT_ROM,};
+static int sport_cnoc_mnoc_mmss_cfg[] = {SLAVE_PORT_CNOC_MNOC_MMSS_CFG,};
+static int sport_cnoc_mnoc_cfg[] = {SLAVE_PORT_CNOC_MNOC_CFG,};
+static int sport_pmic_arb[] = {SLAVE_PORT_PMIC_ARB,};
+static int sport_spdm_wrapper[] = {SLAVE_PORT_SPDM_WRAPPER,};
+static int sport_dehr_cfg[] = {SLAVE_PORT_DEHR_CFG,};
+static int sport_mpm[] = {SLAVE_PORT_MPM,};
+static int sport_qdss_cfg[] = {SLAVE_PORT_QDSS_CFG,};
+static int sport_rbcpr_cfg[] = {SLAVE_PORT_RBCPR_CFG,};
+static int sport_rbcpr_qdss_apu_cfg[] = {SLAVE_PORT_RBCPR_QDSS_APU_CFG,};
+static int sport_snoc_mpu_cfg[] = {SLAVE_PORT_SNOC_MPU_CFG,};
+static int sport_cnoc_onoc_cfg[] = {SLAVE_PORT_CNOC_ONOC_CFG,};
+static int sport_pnoc_cfg[] = {SLAVE_PORT_PNOC_CFG,};
+static int sport_snoc_cfg[] = {SLAVE_PORT_SNOC_CFG,};
+static int sport_ebi1_dll_cfg[] = {SLAVE_PORT_EBI1_DLL_CFG,};
+static int sport_phy_apu_cfg[] = {SLAVE_PORT_PHY_APU_CFG,};
+static int sport_ebi1_phy_cfg[] = {SLAVE_PORT_EBI1_PHY_CFG,};
+static int sport_rpm[] = {SLAVE_PORT_RPM,};
+static int sport_gw_cnoc_snoc[] = {SLAVE_PORT_GW_CNOC_SNOC,};
+static int sport_service_cnoc[] = {SLAVE_PORT_SERVICE_CNOC,};
+
+static int tier2[] = {MSM_BUS_BW_TIER2,};
+
+/*
+ * QOS Ports defined only when qos ports are different than
+ * master ports
+ **/
+static int qports_gemini[] = {0};
+static int qports_mdp[] = {1};
+static int qports_venus_p0[] = {4};
+static int qports_venus_p1[] = {5};
+static int qports_vfe[] = {6};
+static int qports_gemini_ocmem[] = {0};
+static int qports_mdp_ocmem[] = {1};
+static int qports_venus_p0_ocmem[] = {2};
+static int qports_venus_p1_ocmem[] = {3};
+static int qports_vfe_ocmem[] = {4};
+static int qports_crypto_c0[] = {2};
+static int qports_crypto_c1[] = {3};
+static int qports_lpass_proc[] = {4};
+static int qports_ocmem_dma[] = {7};
+static int qports_gw_snoc_bimc[] = {5, 6};
+static int qports_kmpss[] = {0, 1};
+static int qports_lpass_ahb[] = {0};
+static int qports_qdss_bam[] = {1};
+static int qports_gw_pnoc_snoc[] = {8};
+static int qports_qdss_etr[] = {10};
+static int qports_usb3[] = {11};
+static int qports_oxili[] = {2, 3};
+static int qports_gw_mnoc_bimc[] = {3, 4};
+
+static struct msm_bus_node_info sys_noc_info[] = {
+	{
+		.id = MSM_BUS_MASTER_LPASS_AHB,
+		.masterp = mport_lpass_ahb,
+		.num_mports = ARRAY_SIZE(mport_lpass_ahb),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.qport = qports_lpass_ahb,
+		.mas_hw_id = MAS_LPASS_AHB,
+		.mode = NOC_QOS_MODE_FIXED,
+		.prio_rd = 2,
+		.prio_wr = 2,
+	},
+	{
+		.id = MSM_BUS_MASTER_QDSS_BAM,
+		.masterp = mport_qdss_bam,
+		.num_mports = ARRAY_SIZE(mport_qdss_bam),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_qdss_bam,
+		.mas_hw_id = MAS_QDSS_BAM,
+	},
+	{
+		.id = MSM_BUS_MASTER_SNOC_CFG,
+		.masterp = mport_snoc_cfg,
+		.num_mports = ARRAY_SIZE(mport_snoc_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.mas_hw_id = MAS_SNOC_CFG,
+	},
+	{
+		.id = MSM_BUS_FAB_BIMC,
+		.gateway = 1,
+		.slavep = sport_gw_snoc_bimc,
+		.num_sports = ARRAY_SIZE(sport_gw_snoc_bimc),
+		.masterp = mport_gw_bimc_snoc,
+		.num_mports = ARRAY_SIZE(mport_gw_bimc_snoc),
+		.buswidth = 8,
+		.mas_hw_id = MAS_BIMC_SNOC,
+		.slv_hw_id = SLV_SNOC_BIMC,
+	},
+	{
+		.id = MSM_BUS_FAB_CONFIG_NOC,
+		.gateway = 1,
+		.slavep = sport_gw_snoc_cnoc,
+		.num_sports = ARRAY_SIZE(sport_gw_snoc_cnoc),
+		.masterp = mport_gw_cnoc_snoc,
+		.num_mports = ARRAY_SIZE(mport_gw_cnoc_snoc),
+		.buswidth = 8,
+		.mas_hw_id = MAS_CNOC_SNOC,
+		.slv_hw_id = SLV_SNOC_CNOC,
+	},
+	{
+		.id = MSM_BUS_FAB_PERIPH_NOC,
+		.gateway = 1,
+		.slavep = sport_gw_snoc_pnoc,
+		.num_sports = ARRAY_SIZE(sport_gw_snoc_pnoc),
+		.masterp = mport_gw_pnoc_snoc,
+		.num_mports = ARRAY_SIZE(mport_gw_pnoc_snoc),
+		.buswidth = 8,
+		.qport = qports_gw_pnoc_snoc,
+		.mas_hw_id = MAS_PNOC_SNOC,
+		.slv_hw_id = SLV_SNOC_PNOC,
+		.mode = NOC_QOS_MODE_FIXED,
+		.prio_rd = 2,
+		.prio_wr = 2,
+	},
+	{
+		.id = MSM_BUS_FAB_OCMEM_VNOC,
+		.gateway = 1,
+		.buswidth = 8,
+		.mas_hw_id = MAS_OVNOC_SNOC,
+		.slv_hw_id = SLV_SNOC_OVNOC,
+	},
+	{
+		.id = MSM_BUS_MASTER_CRYPTO_CORE0,
+		.masterp = mport_crypto_core0,
+		.num_mports = ARRAY_SIZE(mport_crypto_core0),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_crypto_c0,
+		.mas_hw_id = MAS_CRYPTO_CORE0,
+	},
+	{
+		.id = MSM_BUS_MASTER_CRYPTO_CORE1,
+		.masterp = mport_crypto_core1,
+		.num_mports = ARRAY_SIZE(mport_crypto_core1),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_crypto_c1,
+		.mas_hw_id = MAS_CRYPTO_CORE1,
+	},
+	{
+		.id = MSM_BUS_MASTER_LPASS_PROC,
+		.masterp = mport_lpass_proc,
+		.num_mports = ARRAY_SIZE(mport_lpass_proc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.qport = qports_lpass_proc,
+		.mas_hw_id = MAS_LPASS_PROC,
+		.mode = NOC_QOS_MODE_FIXED,
+		.prio_rd = 2,
+		.prio_wr = 2,
+	},
+	{
+		.id = MSM_BUS_MASTER_MSS,
+		.masterp = mport_mss,
+		.num_mports = ARRAY_SIZE(mport_mss),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.mas_hw_id = MAS_MSS,
+	},
+	{
+		.id = MSM_BUS_MASTER_MSS_NAV,
+		.masterp = mport_mss_nav,
+		.num_mports = ARRAY_SIZE(mport_mss_nav),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.mas_hw_id = MAS_MSS_NAV,
+	},
+	{
+		.id = MSM_BUS_MASTER_OCMEM_DMA,
+		.masterp = mport_ocmem_dma,
+		.num_mports = ARRAY_SIZE(mport_ocmem_dma),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_ocmem_dma,
+		.mas_hw_id = MAS_OCMEM_DMA,
+	},
+	{
+		.id = MSM_BUS_MASTER_WCSS,
+		.masterp = mport_wcss,
+		.num_mports = ARRAY_SIZE(mport_wcss),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.mas_hw_id = MAS_WCSS,
+	},
+	{
+		.id = MSM_BUS_MASTER_QDSS_ETR,
+		.masterp = mport_qdss_etr,
+		.num_mports = ARRAY_SIZE(mport_qdss_etr),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.qport = qports_qdss_etr,
+		.mode = NOC_QOS_MODE_FIXED,
+		.mas_hw_id = MAS_QDSS_ETR,
+	},
+	{
+		.id = MSM_BUS_MASTER_USB3,
+		.masterp = mport_usb3,
+		.num_mports = ARRAY_SIZE(mport_usb3),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_usb3,
+		.mas_hw_id = MAS_USB3,
+		.prio_rd = 2,
+		.prio_wr = 2,
+	},
+	{
+		.id = MSM_BUS_SLAVE_AMPSS,
+		.slavep = sport_kmpss,
+		.num_sports = ARRAY_SIZE(sport_kmpss),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_APPSS,
+	},
+	{
+		.id = MSM_BUS_SLAVE_LPASS,
+		.slavep = sport_lpass,
+		.num_sports = ARRAY_SIZE(sport_lpass),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_LPASS,
+	},
+	{
+		.id = MSM_BUS_SLAVE_USB3,
+		.slavep = sport_usb3,
+		.num_sports = ARRAY_SIZE(sport_usb3),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_USB3,
+	},
+	{
+		.id = MSM_BUS_SLAVE_WCSS,
+		.slavep = sport_wcss,
+		.num_sports = ARRAY_SIZE(sport_wcss),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_WCSS,
+	},
+	{
+		.id = MSM_BUS_SLAVE_OCIMEM,
+		.slavep = sport_ocimem,
+		.num_sports = ARRAY_SIZE(sport_ocimem),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_OCIMEM,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SNOC_OCMEM,
+		.slavep = sport_snoc_ocmem,
+		.num_sports = ARRAY_SIZE(sport_snoc_ocmem),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SNOC_OCMEM,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SERVICE_SNOC,
+		.slavep = sport_service_snoc,
+		.num_sports = ARRAY_SIZE(sport_service_snoc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SERVICE_SNOC,
+	},
+	{
+		.id = MSM_BUS_SLAVE_QDSS_STM,
+		.slavep = sport_qdss_stm,
+		.num_sports = ARRAY_SIZE(sport_qdss_stm),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_QDSS_STM,
+	},
+};
+
+
+static struct msm_bus_node_info mmss_noc_info[]  = {
+	{
+		.id = MSM_BUS_MASTER_GRAPHICS_3D,
+		.masterp = mport_gfx3d,
+		.num_mports = ARRAY_SIZE(mport_gfx3d),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.hw_sel = MSM_BUS_NOC,
+		.perm_mode = NOC_QOS_MODES_ALL_PERM,
+		.mode = NOC_QOS_MODE_FIXED,
+		.ws = 10000,
+		.qport = qports_oxili,
+		.mas_hw_id = MAS_GFX3D,
+	},
+	{
+		.id = MSM_BUS_MASTER_JPEG,
+		.masterp = mport_jpeg,
+		.num_mports = ARRAY_SIZE(mport_jpeg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.hw_sel = MSM_BUS_NOC,
+		.perm_mode = NOC_QOS_MODES_ALL_PERM,
+		.mode = NOC_QOS_MODE_BYPASS,
+		.qport = qports_gemini,
+		.ws = 10000,
+		.mas_hw_id = MAS_JPEG,
+	},
+	{
+		.id = MSM_BUS_MASTER_MDP_PORT0,
+		.masterp = mport_mdp,
+		.num_mports = ARRAY_SIZE(mport_mdp),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.hw_sel = MSM_BUS_NOC,
+		.perm_mode = NOC_QOS_MODES_ALL_PERM,
+		.mode = NOC_QOS_MODE_BYPASS,
+		.qport = qports_mdp,
+		.ws = 10000,
+		.mas_hw_id = MAS_MDP,
+	},
+	{
+		.id = MSM_BUS_MASTER_VIDEO_P0,
+		.masterp = mport_video_port0,
+		.num_mports = ARRAY_SIZE(mport_video_port0),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.hw_sel = MSM_BUS_NOC,
+		.perm_mode = NOC_QOS_MODES_ALL_PERM,
+		.mode = NOC_QOS_MODE_BYPASS,
+		.ws = 10000,
+		.qport = qports_venus_p0,
+		.mas_hw_id = MAS_VIDEO_P0,
+	},
+	{
+		.id = MSM_BUS_MASTER_VIDEO_P1,
+		.masterp = mport_video_port1,
+		.num_mports = ARRAY_SIZE(mport_video_port1),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.hw_sel = MSM_BUS_NOC,
+		.perm_mode = NOC_QOS_MODES_ALL_PERM,
+		.mode = NOC_QOS_MODE_BYPASS,
+		.ws = 10000,
+		.qport = qports_venus_p1,
+		.mas_hw_id = MAS_VIDEO_P1,
+	},
+	{
+		.id = MSM_BUS_MASTER_VFE,
+		.masterp = mport_vfe,
+		.num_mports = ARRAY_SIZE(mport_vfe),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.hw_sel = MSM_BUS_NOC,
+		.perm_mode = NOC_QOS_MODES_ALL_PERM,
+		.mode = NOC_QOS_MODE_BYPASS,
+		.ws = 10000,
+		.qport = qports_vfe,
+		.mas_hw_id = MAS_VFE,
+	},
+	{
+		.id = MSM_BUS_FAB_CONFIG_NOC,
+		.gateway = 1,
+		.masterp = mport_gw_cnoc_mnoc_cfg,
+		.num_mports = ARRAY_SIZE(mport_gw_cnoc_mnoc_cfg),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_RPM,
+		.mas_hw_id = MAS_CNOC_MNOC_MMSS_CFG,
+	},
+	{
+		.id = MSM_BUS_FAB_BIMC,
+		.gateway = 1,
+		.slavep = sport_gw_mmss_bimc,
+		.num_sports = ARRAY_SIZE(sport_gw_mmss_bimc),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_MMSS_BIMC,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CAMERA_CFG,
+		.slavep = sport_camera_cfg,
+		.num_sports = ARRAY_SIZE(sport_camera_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_CAMERA_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_DISPLAY_CFG,
+		.slavep = sport_display_cfg,
+		.num_sports = ARRAY_SIZE(sport_display_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_DISPLAY_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_OCMEM_CFG,
+		.slavep = sport_ocmem_cfg,
+		.num_sports = ARRAY_SIZE(sport_ocmem_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_OCMEM_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CPR_CFG,
+		.slavep = sport_cpr_cfg,
+		.num_sports = ARRAY_SIZE(sport_cpr_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_CPR_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CPR_XPU_CFG,
+		.slavep = sport_cpr_xpu_cfg,
+		.num_sports = ARRAY_SIZE(sport_cpr_xpu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_CPR_XPU_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_MISC_CFG,
+		.slavep = sport_misc_cfg,
+		.num_sports = ARRAY_SIZE(sport_misc_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_MISC_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_MISC_XPU_CFG,
+		.slavep = sport_misc_xpu_cfg,
+		.num_sports = ARRAY_SIZE(sport_misc_xpu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_MISC_XPU_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_VENUS_CFG,
+		.slavep = sport_venus_cfg,
+		.num_sports = ARRAY_SIZE(sport_venus_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_VENUS_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_GRAPHICS_3D_CFG,
+		.slavep = sport_gfx3d_cfg,
+		.num_sports = ARRAY_SIZE(sport_gfx3d_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_GFX3D_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_MMSS_CLK_CFG,
+		.slavep = sport_mmss_clk_cfg,
+		.num_sports = ARRAY_SIZE(sport_mmss_clk_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_MMSS_CLK_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG,
+		.slavep = sport_mmss_clk_xpu_cfg,
+		.num_sports = ARRAY_SIZE(sport_mmss_clk_xpu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_MMSS_CLK_XPU_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_MNOC_MPU_CFG,
+		.slavep = sport_mnoc_mpu_cfg,
+		.num_sports = ARRAY_SIZE(sport_mnoc_mpu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_MNOC_MPU_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_ONOC_MPU_CFG,
+		.slavep = sport_onoc_mpu_cfg,
+		.num_sports = ARRAY_SIZE(sport_onoc_mpu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_ONOC_MPU_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SERVICE_MNOC,
+		.slavep = sport_service_mnoc,
+		.num_sports = ARRAY_SIZE(sport_service_mnoc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.hw_sel = MSM_BUS_NOC,
+		.slv_hw_id = SLV_SERVICE_MNOC,
+	},
+};
+
+static struct msm_bus_node_info bimc_info[]  = {
+	{
+		.id = MSM_BUS_MASTER_AMPSS_M0,
+		.masterp = mport_kmpss_m0,
+		.num_mports = ARRAY_SIZE(mport_kmpss_m0),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.hw_sel = MSM_BUS_BIMC,
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_kmpss,
+		.ws = 10000,
+		.mas_hw_id = MAS_APPSS_PROC,
+		.prio_lvl = 0,
+		.prio_rd = 2,
+		.prio_wr = 2,
+	},
+	{
+		.id = MSM_BUS_MASTER_AMPSS_M1,
+		.masterp = mport_kmpss_m1,
+		.num_mports = ARRAY_SIZE(mport_kmpss_m1),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.hw_sel = MSM_BUS_BIMC,
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_kmpss,
+		.ws = 10000,
+		.mas_hw_id = MAS_APPSS_PROC,
+	},
+	{
+		.id = MSM_BUS_MASTER_MSS_PROC,
+		.masterp = mport_mss_proc,
+		.num_mports = ARRAY_SIZE(mport_mss_proc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.hw_sel = MSM_BUS_RPM,
+		.mas_hw_id = MAS_AMSS_PROC,
+	},
+	{
+		.id = MSM_BUS_FAB_MMSS_NOC,
+		.gateway = 1,
+		.masterp = mport_gw_mnoc_bimc,
+		.num_mports = ARRAY_SIZE(mport_gw_mnoc_bimc),
+		.qport = qports_gw_mnoc_bimc,
+		.buswidth = 8,
+		.ws = 10000,
+		.mas_hw_id = MAS_MNOC_BIMC,
+		.hw_sel = MSM_BUS_BIMC,
+		.mode = NOC_QOS_MODE_BYPASS,
+	},
+	{
+		.id = MSM_BUS_FAB_SYS_NOC,
+		.gateway = 1,
+		.slavep = sport_gw_bimc_snoc,
+		.num_sports = ARRAY_SIZE(sport_gw_bimc_snoc),
+		.masterp = mport_gw_snoc_bimc,
+		.num_mports = ARRAY_SIZE(mport_gw_snoc_bimc),
+		.qport = qports_gw_snoc_bimc,
+		.buswidth = 8,
+		.ws = 10000,
+		.mas_hw_id = MAS_SNOC_BIMC,
+		.slv_hw_id = SLV_BIMC_SNOC,
+	},
+	{
+		.id = MSM_BUS_SLAVE_EBI_CH0,
+		.slavep = sport_ebi1,
+		.num_sports = ARRAY_SIZE(sport_ebi1),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_EBI,
+		.mode = NOC_QOS_MODE_BYPASS,
+	},
+	{
+		.id = MSM_BUS_SLAVE_AMPSS_L2,
+		.slavep = sport_kmpss_l2,
+		.num_sports = ARRAY_SIZE(sport_kmpss_l2),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_APSS_L2,
+	},
+};
+
+static struct msm_bus_node_info ocmem_noc_info[]  = {
+	{
+		.id = MSM_BUS_FAB_OCMEM_VNOC,
+		.gateway = 1,
+		.buswidth = 16,
+		.mas_hw_id = MAS_OVNOC_ONOC,
+		.slv_hw_id = SLV_ONOC_OVNOC,
+	},
+	{
+		.id = MSM_BUS_MASTER_JPEG_OCMEM,
+		.masterp = mport_jpeg_ocmem,
+		.num_mports = ARRAY_SIZE(mport_jpeg_ocmem),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.perm_mode = NOC_QOS_PERM_MODE_FIXED,
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_gemini_ocmem,
+		.mas_hw_id = MAS_JPEG_OCMEM,
+		.hw_sel = MSM_BUS_NOC,
+	},
+	{
+		.id = MSM_BUS_MASTER_MDP_OCMEM,
+		.masterp = mport_mdp_ocmem,
+		.num_mports = ARRAY_SIZE(mport_mdp_ocmem),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.perm_mode = NOC_QOS_PERM_MODE_FIXED,
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_mdp_ocmem,
+		.mas_hw_id = MAS_MDP_OCMEM,
+		.hw_sel = MSM_BUS_NOC,
+	},
+	{
+		.id = MSM_BUS_MASTER_VIDEO_P0_OCMEM,
+		.masterp = mport_video_p0_ocmem,
+		.num_mports = ARRAY_SIZE(mport_video_p0_ocmem),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.perm_mode = NOC_QOS_PERM_MODE_FIXED,
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_venus_p0_ocmem,
+		.mas_hw_id = MAS_VIDEO_P0_OCMEM,
+		.hw_sel = MSM_BUS_NOC,
+	},
+	{
+		.id = MSM_BUS_MASTER_VIDEO_P1_OCMEM,
+		.masterp = mport_video_p1_ocmem,
+		.num_mports = ARRAY_SIZE(mport_video_p1_ocmem),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.perm_mode = NOC_QOS_PERM_MODE_FIXED,
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_venus_p1_ocmem,
+		.mas_hw_id = MAS_VIDEO_P1_OCMEM,
+		.hw_sel = MSM_BUS_NOC,
+	},
+	{
+		.id = MSM_BUS_MASTER_VFE_OCMEM,
+		.masterp = mport_vfe_ocmem,
+		.num_mports = ARRAY_SIZE(mport_vfe_ocmem),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.perm_mode = NOC_QOS_PERM_MODE_FIXED,
+		.mode = NOC_QOS_MODE_FIXED,
+		.qport = qports_vfe_ocmem,
+		.mas_hw_id = MAS_VFE_OCMEM,
+		.hw_sel = MSM_BUS_NOC,
+		.prio_rd = 1,
+		.prio_wr = 1,
+	},
+	{
+		.id = MSM_BUS_MASTER_CNOC_ONOC_CFG,
+		.masterp = mport_cnoc_onoc_cfg,
+		.num_mports = ARRAY_SIZE(mport_cnoc_onoc_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.mas_hw_id = MAS_CNOC_ONOC_CFG,
+		.hw_sel = MSM_BUS_NOC,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SERVICE_ONOC,
+		.slavep = sport_service_onoc,
+		.num_sports = ARRAY_SIZE(sport_service_onoc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.slv_hw_id = SLV_SERVICE_ONOC,
+	},
+};
+
+static struct msm_bus_node_info periph_noc_info[] = {
+	{
+		.id = MSM_BUS_MASTER_PNOC_CFG,
+		.masterp = mport_pnoc_cfg,
+		.num_mports = ARRAY_SIZE(mport_pnoc_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_PNOC_CFG,
+	},
+	{
+		.id = MSM_BUS_MASTER_SDCC_1,
+		.masterp = mport_sdcc_1,
+		.num_mports = ARRAY_SIZE(mport_sdcc_1),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_SDCC_1,
+	},
+	{
+		.id = MSM_BUS_MASTER_SDCC_3,
+		.masterp = mport_sdcc_3,
+		.num_mports = ARRAY_SIZE(mport_sdcc_3),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_SDCC_3,
+	},
+	{
+		.id = MSM_BUS_MASTER_SDCC_4,
+		.masterp = mport_sdcc_4,
+		.num_mports = ARRAY_SIZE(mport_sdcc_4),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_SDCC_4,
+	},
+	{
+		.id = MSM_BUS_MASTER_SDCC_2,
+		.masterp = mport_sdcc_2,
+		.num_mports = ARRAY_SIZE(mport_sdcc_2),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_SDCC_2,
+	},
+	{
+		.id = MSM_BUS_MASTER_TSIF,
+		.masterp = mport_tsif,
+		.num_mports = ARRAY_SIZE(mport_tsif),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_TSIF,
+	},
+	{
+		.id = MSM_BUS_MASTER_BAM_DMA,
+		.masterp = mport_bam_dma,
+		.num_mports = ARRAY_SIZE(mport_bam_dma),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_BAM_DMA,
+	},
+	{
+		.id = MSM_BUS_MASTER_BLSP_2,
+		.masterp = mport_blsp_2,
+		.num_mports = ARRAY_SIZE(mport_blsp_2),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_BLSP_2,
+	},
+	{
+		.id = MSM_BUS_MASTER_USB_HSIC,
+		.masterp = mport_usb_hsic,
+		.num_mports = ARRAY_SIZE(mport_usb_hsic),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_USB_HSIC,
+	},
+	{
+		.id = MSM_BUS_MASTER_BLSP_1,
+		.masterp = mport_blsp_1,
+		.num_mports = ARRAY_SIZE(mport_blsp_1),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_BLSP_1,
+	},
+	{
+		.id = MSM_BUS_MASTER_USB_HS,
+		.masterp = mport_usb_hs,
+		.num_mports = ARRAY_SIZE(mport_usb_hs),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_USB_HS,
+	},
+	{
+		.id = MSM_BUS_FAB_SYS_NOC,
+		.gateway = 1,
+		.slavep = sport_gw_pnoc_snoc,
+		.num_sports = ARRAY_SIZE(sport_gw_pnoc_snoc),
+		.masterp = mport_gw_snoc_pnoc,
+		.num_mports = ARRAY_SIZE(mport_gw_snoc_pnoc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_PNOC_SNOC,
+		.mas_hw_id = MAS_SNOC_PNOC,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SDCC_1,
+		.slavep = sport_sdcc_1,
+		.num_sports = ARRAY_SIZE(sport_sdcc_1),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SDCC_1,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SDCC_3,
+		.slavep = sport_sdcc_3,
+		.num_sports = ARRAY_SIZE(sport_sdcc_3),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SDCC_3,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SDCC_2,
+		.slavep = sport_sdcc_2,
+		.num_sports = ARRAY_SIZE(sport_sdcc_2),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SDCC_2,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SDCC_4,
+		.slavep = sport_sdcc_4,
+		.num_sports = ARRAY_SIZE(sport_sdcc_4),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SDCC_4,
+	},
+	{
+		.id = MSM_BUS_SLAVE_TSIF,
+		.slavep = sport_tsif,
+		.num_sports = ARRAY_SIZE(sport_tsif),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_TSIF,
+	},
+	{
+		.id = MSM_BUS_SLAVE_BAM_DMA,
+		.slavep = sport_bam_dma,
+		.num_sports = ARRAY_SIZE(sport_bam_dma),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_BAM_DMA,
+	},
+	{
+		.id = MSM_BUS_SLAVE_BLSP_2,
+		.slavep = sport_blsp_2,
+		.num_sports = ARRAY_SIZE(sport_blsp_2),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_BLSP_2,
+	},
+	{
+		.id = MSM_BUS_SLAVE_USB_HSIC,
+		.slavep = sport_usb_hsic,
+		.num_sports = ARRAY_SIZE(sport_usb_hsic),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_USB_HSIC,
+	},
+	{
+		.id = MSM_BUS_SLAVE_BLSP_1,
+		.slavep = sport_blsp_1,
+		.num_sports = ARRAY_SIZE(sport_blsp_1),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_BLSP_1,
+	},
+	{
+		.id = MSM_BUS_SLAVE_USB_HS,
+		.slavep = sport_usb_hs,
+		.num_sports = ARRAY_SIZE(sport_usb_hs),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_USB_HS,
+	},
+	{
+		.id = MSM_BUS_SLAVE_PDM,
+		.slavep = sport_pdm,
+		.num_sports = ARRAY_SIZE(sport_pdm),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_PDM,
+	},
+	{
+		.id = MSM_BUS_SLAVE_PERIPH_APU_CFG,
+		.slavep = sport_periph_apu_cfg,
+		.num_sports = ARRAY_SIZE(sport_periph_apu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_PERIPH_APU_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_PNOC_MPU_CFG,
+		.slavep = sport_pnoc_mpu_cfg,
+		.num_sports = ARRAY_SIZE(sport_pnoc_mpu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_MPU_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_PRNG,
+		.slavep = sport_prng,
+		.num_sports = ARRAY_SIZE(sport_prng),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_PRNG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SERVICE_PNOC,
+		.slavep = sport_service_pnoc,
+		.num_sports = ARRAY_SIZE(sport_service_pnoc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SERVICE_PNOC,
+	},
+};
+
+static struct msm_bus_node_info config_noc_info[] = {
+	{
+		.id = MSM_BUS_MASTER_RPM_INST,
+		.masterp = mport_rpm_inst,
+		.num_mports = ARRAY_SIZE(mport_rpm_inst),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_RPM_INST,
+	},
+	{
+		.id = MSM_BUS_MASTER_RPM_DATA,
+		.masterp = mport_rpm_data,
+		.num_mports = ARRAY_SIZE(mport_rpm_data),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_RPM_DATA,
+	},
+	{
+		.id = MSM_BUS_MASTER_RPM_SYS,
+		.masterp = mport_rpm_sys,
+		.num_mports = ARRAY_SIZE(mport_rpm_sys),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_RPM_SYS,
+	},
+	{
+		.id = MSM_BUS_MASTER_DEHR,
+		.masterp = mport_dehr,
+		.num_mports = ARRAY_SIZE(mport_dehr),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_DEHR,
+	},
+	{
+		.id = MSM_BUS_MASTER_QDSS_DAP,
+		.masterp = mport_qdss_dap,
+		.num_mports = ARRAY_SIZE(mport_qdss_dap),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_QDSS_DAP,
+	},
+	{
+		.id = MSM_BUS_MASTER_SPDM,
+		.masterp = mport_spdm,
+		.num_mports = ARRAY_SIZE(mport_spdm),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_SPDM,
+	},
+	{
+		.id = MSM_BUS_MASTER_TIC,
+		.masterp = mport_tic,
+		.num_mports = ARRAY_SIZE(mport_tic),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_TIC,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CLK_CTL,
+		.slavep = sport_clk_ctl,
+		.num_sports = ARRAY_SIZE(sport_clk_ctl),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_CLK_CTL,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CNOC_MSS,
+		.slavep = sport_cnoc_mss,
+		.num_sports = ARRAY_SIZE(sport_cnoc_mss),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_CNOC_MSS,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SECURITY,
+		.slavep = sport_security,
+		.num_sports = ARRAY_SIZE(sport_security),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SECURITY,
+	},
+	{
+		.id = MSM_BUS_SLAVE_TCSR,
+		.slavep = sport_tcsr,
+		.num_sports = ARRAY_SIZE(sport_tcsr),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_TCSR,
+	},
+	{
+		.id = MSM_BUS_SLAVE_TLMM,
+		.slavep = sport_tlmm,
+		.num_sports = ARRAY_SIZE(sport_tlmm),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_TLMM,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CRYPTO_0_CFG,
+		.slavep = sport_crypto_0_cfg,
+		.num_sports = ARRAY_SIZE(sport_crypto_0_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_CRYPTO_0_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CRYPTO_1_CFG,
+		.slavep = sport_crypto_1_cfg,
+		.num_sports = ARRAY_SIZE(sport_crypto_1_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_CRYPTO_1_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_IMEM_CFG,
+		.slavep = sport_imem_cfg,
+		.num_sports = ARRAY_SIZE(sport_imem_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_IMEM_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_MESSAGE_RAM,
+		.slavep = sport_message_ram,
+		.num_sports = ARRAY_SIZE(sport_message_ram),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_MESSAGE_RAM,
+	},
+	{
+		.id = MSM_BUS_SLAVE_BIMC_CFG,
+		.slavep = sport_bimc_cfg,
+		.num_sports = ARRAY_SIZE(sport_bimc_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_BIMC_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_BOOT_ROM,
+		.slavep = sport_boot_rom,
+		.num_sports = ARRAY_SIZE(sport_boot_rom),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_BOOT_ROM,
+	},
+	{
+		.id = MSM_BUS_SLAVE_PMIC_ARB,
+		.slavep = sport_pmic_arb,
+		.num_sports = ARRAY_SIZE(sport_pmic_arb),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_PMIC_ARB,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SPDM_WRAPPER,
+		.slavep = sport_spdm_wrapper,
+		.num_sports = ARRAY_SIZE(sport_spdm_wrapper),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SPDM_WRAPPER,
+	},
+	{
+		.id = MSM_BUS_SLAVE_DEHR_CFG,
+		.slavep = sport_dehr_cfg,
+		.num_sports = ARRAY_SIZE(sport_dehr_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_DEHR_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_MPM,
+		.slavep = sport_mpm,
+		.num_sports = ARRAY_SIZE(sport_mpm),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_MPM,
+	},
+	{
+		.id = MSM_BUS_SLAVE_QDSS_CFG,
+		.slavep = sport_qdss_cfg,
+		.num_sports = ARRAY_SIZE(sport_qdss_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_QDSS_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_RBCPR_CFG,
+		.slavep = sport_rbcpr_cfg,
+		.num_sports = ARRAY_SIZE(sport_rbcpr_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_RBCPR_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG,
+		.slavep = sport_rbcpr_qdss_apu_cfg,
+		.num_sports = ARRAY_SIZE(sport_rbcpr_qdss_apu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_RBCPR_QDSS_APU_CFG,
+	},
+	{
+		.id = MSM_BUS_FAB_SYS_NOC,
+		.gateway = 1,
+		.slavep = sport_gw_cnoc_snoc,
+		.num_sports = ARRAY_SIZE(sport_gw_cnoc_snoc),
+		.masterp = mport_gw_snoc_cnoc,
+		.num_mports = ARRAY_SIZE(mport_gw_snoc_cnoc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_SNOC_CNOC,
+		.slv_hw_id = SLV_CNOC_SNOC,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CNOC_ONOC_CFG,
+		.slavep = sport_cnoc_onoc_cfg,
+		.num_sports = ARRAY_SIZE(sport_cnoc_onoc_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_CNOC_ONOC_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG,
+		.slavep = sport_cnoc_mnoc_mmss_cfg,
+		.num_sports = ARRAY_SIZE(sport_cnoc_mnoc_mmss_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_CNOC_MNOC_MMSS_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_CNOC_MNOC_CFG,
+		.slavep = sport_cnoc_mnoc_cfg,
+		.num_sports = ARRAY_SIZE(sport_cnoc_mnoc_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_CNOC_MNOC_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_PNOC_CFG,
+		.slavep = sport_pnoc_cfg,
+		.num_sports = ARRAY_SIZE(sport_pnoc_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_PNOC_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SNOC_MPU_CFG,
+		.slavep = sport_snoc_mpu_cfg,
+		.num_sports = ARRAY_SIZE(sport_snoc_mpu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SNOC_MPU_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SNOC_CFG,
+		.slavep = sport_snoc_cfg,
+		.num_sports = ARRAY_SIZE(sport_snoc_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SNOC_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_EBI1_DLL_CFG,
+		.slavep = sport_ebi1_dll_cfg,
+		.num_sports = ARRAY_SIZE(sport_ebi1_dll_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_EBI1_DLL_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_PHY_APU_CFG,
+		.slavep = sport_phy_apu_cfg,
+		.num_sports = ARRAY_SIZE(sport_phy_apu_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_PHY_APU_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_EBI1_PHY_CFG,
+		.slavep = sport_ebi1_phy_cfg,
+		.num_sports = ARRAY_SIZE(sport_ebi1_phy_cfg),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_EBI1_PHY_CFG,
+	},
+	{
+		.id = MSM_BUS_SLAVE_RPM,
+		.slavep = sport_rpm,
+		.num_sports = ARRAY_SIZE(sport_rpm),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_RPM,
+	},
+	{
+		.id = MSM_BUS_SLAVE_SERVICE_CNOC,
+		.slavep = sport_service_cnoc,
+		.num_sports = ARRAY_SIZE(sport_service_cnoc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.slv_hw_id = SLV_SERVICE_CNOC,
+	},
+};
+
+/* A virtual NoC is needed for connection to OCMEM */
+static struct msm_bus_node_info ocmem_vnoc_info[] = {
+	{
+		.id = MSM_BUS_MASTER_V_OCMEM_GFX3D,
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 8,
+		.mas_hw_id = MAS_V_OCMEM_GFX3D,
+	},
+	{
+		.id = MSM_BUS_SLAVE_OCMEM,
+		.slavep = sport_ocmem,
+		.num_sports = ARRAY_SIZE(sport_ocmem),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+		.buswidth = 16,
+		.slv_hw_id = SLV_OCMEM,
+		.tier = tier2,
+		.slaveclk[DUAL_CTX] = "ocmem_clk",
+		.slaveclk[ACTIVE_CTX] = "ocmem_a_clk",
+	},
+	{
+		.id = MSM_BUS_FAB_SYS_NOC,
+		.gateway = 1,
+		.buswidth = 8,
+		.ws = 10000,
+		.mas_hw_id = MAS_SNOC_OVNOC,
+		.slv_hw_id = SLV_OVNOC_SNOC,
+	},
+	{
+		.id = MSM_BUS_FAB_OCMEM_NOC,
+		.gateway = 1,
+		.buswidth = 16,
+		.ws = 10000,
+		.mas_hw_id = MAS_ONOC_OVNOC,
+		.slv_hw_id = SLV_OVNOC_ONOC,
+	},
+};
+
+static void msm_bus_board_assign_iids(struct msm_bus_fabric_registration
+	*fabreg, int fabid)
+{
+	int i;
+	for (i = 0; i < fabreg->len; i++) {
+		if (!fabreg->info[i].gateway) {
+			fabreg->info[i].priv_id = fabid + fabreg->info[i].id;
+			if (fabreg->info[i].id < SLAVE_ID_KEY) {
+				WARN(fabreg->info[i].id >= NMASTERS,
+					"id %d exceeds array size!\n",
+					fabreg->info[i].id);
+				master_iids[fabreg->info[i].id] =
+					fabreg->info[i].priv_id;
+			} else {
+				WARN((fabreg->info[i].id - SLAVE_ID_KEY) >=
+					NSLAVES, "id %d exceeds array size!\n",
+					fabreg->info[i].id);
+				slave_iids[fabreg->info[i].id - (SLAVE_ID_KEY)]
+					= fabreg->info[i].priv_id;
+			}
+		} else {
+			fabreg->info[i].priv_id = fabreg->info[i].id;
+		}
+	}
+}
+
+static int msm_bus_board_copper_get_iid(int id)
+{
+	if ((id < SLAVE_ID_KEY && id >= NMASTERS) ||
+		id >= (SLAVE_ID_KEY + NSLAVES)) {
+		MSM_BUS_ERR("Cannot get iid. Invalid id %d passed\n", id);
+		return -EINVAL;
+	}
+
+	return CHECK_ID(((id < SLAVE_ID_KEY) ? master_iids[id] :
+		slave_iids[id - SLAVE_ID_KEY]), id);
+}
+
+int msm_bus_board_rpm_get_il_ids(uint16_t *id)
+{
+	return -ENXIO;
+}
+
+static struct msm_bus_board_algorithm msm_bus_board_algo = {
+	.board_nfab = NFAB_COPPER,
+	.get_iid = msm_bus_board_copper_get_iid,
+	.assign_iids = msm_bus_board_assign_iids,
+};
+
+struct msm_bus_fabric_registration msm_bus_copper_sys_noc_pdata = {
+	.id = MSM_BUS_FAB_SYS_NOC,
+	.name = "msm_sys_noc",
+	.info = sys_noc_info,
+	.len = ARRAY_SIZE(sys_noc_info),
+	.ahb = 0,
+	.fabclk[DUAL_CTX] = "bus_clk",
+	.fabclk[ACTIVE_CTX] = "bus_a_clk",
+	.nmasters = 15,
+	.nslaves = 12,
+	.ntieredslaves = 0,
+	.board_algo = &msm_bus_board_algo,
+	.qos_freq = 4800,
+	.hw_sel = MSM_BUS_NOC,
+	.rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_copper_mmss_noc_pdata = {
+	.id = MSM_BUS_FAB_MMSS_NOC,
+	.name = "msm_mmss_noc",
+	.info = mmss_noc_info,
+	.len = ARRAY_SIZE(mmss_noc_info),
+	.ahb = 0,
+	.fabclk[DUAL_CTX] = "bus_clk",
+	.fabclk[ACTIVE_CTX] = "bus_a_clk",
+	.nmasters = 9,
+	.nslaves = 16,
+	.ntieredslaves = 0,
+	.board_algo = &msm_bus_board_algo,
+	.qos_freq = 4800,
+	.hw_sel = MSM_BUS_NOC,
+	.rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_copper_bimc_pdata = {
+	.id = MSM_BUS_FAB_BIMC,
+	.name = "msm_bimc",
+	.info = bimc_info,
+	.len = ARRAY_SIZE(bimc_info),
+	.ahb = 0,
+	.fabclk[DUAL_CTX] = "mem_clk",
+	.fabclk[ACTIVE_CTX] = "mem_a_clk",
+	.nmasters = 7,
+	.nslaves = 4,
+	.ntieredslaves = 0,
+	.board_algo = &msm_bus_board_algo,
+	.qos_freq = 4800,
+	.hw_sel = MSM_BUS_BIMC,
+	.rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_copper_ocmem_noc_pdata = {
+	.id = MSM_BUS_FAB_OCMEM_NOC,
+	.name = "msm_ocmem_noc",
+	.info = ocmem_noc_info,
+	.len = ARRAY_SIZE(ocmem_noc_info),
+	.ahb = 0,
+	.fabclk[DUAL_CTX] = "bus_clk",
+	.fabclk[ACTIVE_CTX] = "bus_a_clk",
+	.nmasters = 6,
+	.nslaves = 3,
+	.ntieredslaves = 0,
+	.board_algo = &msm_bus_board_algo,
+	.qos_freq = 4800,
+	.hw_sel = MSM_BUS_NOC,
+	.rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_copper_periph_noc_pdata = {
+	.id = MSM_BUS_FAB_PERIPH_NOC,
+	.name = "msm_periph_noc",
+	.info = periph_noc_info,
+	.len = ARRAY_SIZE(periph_noc_info),
+	.ahb = 0,
+	.fabclk[DUAL_CTX] = "bus_clk",
+	.fabclk[ACTIVE_CTX] = "bus_a_clk",
+	.nmasters = 12,
+	.nslaves = 16,
+	.ntieredslaves = 0,
+	.board_algo = &msm_bus_board_algo,
+	.hw_sel = MSM_BUS_NOC,
+	.rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_copper_config_noc_pdata = {
+	.id = MSM_BUS_FAB_CONFIG_NOC,
+	.name = "msm_config_noc",
+	.info = config_noc_info,
+	.len = ARRAY_SIZE(config_noc_info),
+	.ahb = 0,
+	.fabclk[DUAL_CTX] = "bus_clk",
+	.fabclk[ACTIVE_CTX] = "bus_a_clk",
+	.nmasters = 8,
+	.nslaves = 30,
+	.ntieredslaves = 0,
+	.board_algo = &msm_bus_board_algo,
+	.hw_sel = MSM_BUS_NOC,
+	.rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_copper_ocmem_vnoc_pdata = {
+	.id = MSM_BUS_FAB_OCMEM_VNOC,
+	.name = "msm_ocmem_vnoc",
+	.info = ocmem_vnoc_info,
+	.len = ARRAY_SIZE(ocmem_vnoc_info),
+	.ahb = 0,
+	.nmasters = 5,
+	.nslaves = 4,
+	.ntieredslaves = 0,
+	.board_algo = &msm_bus_board_algo,
+	.hw_sel = MSM_BUS_NOC,
+	.virt = 1,
+	.rpm_enabled = 1,
+};
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
index 3671916..a4b9b51 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
@@ -131,7 +131,7 @@
 		fabric->pdata->id, fabric->pdata->len);
 	fabric->hw_data = fabric->fabdev.hw_algo.allocate_hw_data(pdev,
 		fabric->pdata);
-	if (ZERO_OR_NULL_PTR(fabric->hw_data)) {
+	if (ZERO_OR_NULL_PTR(fabric->hw_data) && fabric->pdata->ahb == 0) {
 		MSM_BUS_ERR("Couldn't allocate hw_data for fab: %d\n",
 			fabric->fabdev.id);
 		goto error;
diff --git a/arch/arm/mach-msm/msm_watchdog_v2.c b/arch/arm/mach-msm/msm_watchdog_v2.c
new file mode 100644
index 0000000..a5f8bcc
--- /dev/null
+++ b/arch/arm/mach-msm/msm_watchdog_v2.c
@@ -0,0 +1,400 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <linux/platform_device.h>
+
+#define MODULE_NAME "msm_watchdog"
+#define WDT0_ACCSCSSNBARK_INT 0
+#define TCSR_WDT_CFG	0x30
+#define WDT0_RST	0x04
+#define WDT0_EN		0x08
+#define WDT0_STS	0x0C
+#define WDT0_BARK_TIME	0x10
+#define WDT0_BITE_TIME	0x14
+
+#define MASK_SIZE	32
+
+struct msm_watchdog_data {
+	unsigned int __iomem phys_base;
+	size_t size;
+	void __iomem *base;
+	struct device *dev;
+	unsigned int pet_time;
+	unsigned int bark_time;
+	unsigned int bark_irq;
+	unsigned int bite_irq;
+	unsigned int do_ipi_ping;
+	unsigned long long last_pet;
+	unsigned min_slack_ticks;
+	unsigned long long min_slack_ns;
+	cpumask_t alive_mask;
+	struct work_struct init_dogwork_struct;
+	struct delayed_work dogwork_struct;
+	struct notifier_block panic_blk;
+};
+
+/*
+ * On the kernel command line specify
+ * msm_watchdog.enable=1 to enable the watchdog
+ * By default watchdog is turned on
+ */
+static int enable = 1;
+module_param(enable, int, 0);
+
+/*
+ * On the kernel command line specify
+ * msm_watchdog.WDT_HZ=<clock val in HZ> to set Watchdog
+ * ticks. By default it is set to 32765.
+ */
+static long WDT_HZ = 32765;
+module_param(WDT_HZ, long, 0);
+
+/*
+ * If the watchdog is enabled at bootup (enable=1),
+ * the runtime_disable sysfs node at
+ * /sys/module/msm_watchdog/parameters/runtime_disable
+ * can be used to deactivate the watchdog.
+ * This is a one-time setting. The watchdog
+ * cannot be re-enabled once it is disabled.
+ */
+static int runtime_disable;
+static int wdog_enable_set(const char *val, struct kernel_param *kp);
+module_param_call(runtime_disable, wdog_enable_set, param_get_int,
+			&runtime_disable, 0644);
+
+static void pet_watchdog_work(struct work_struct *work);
+static void init_watchdog_work(struct work_struct *work);
+
+static void dump_cpu_alive_mask(struct msm_watchdog_data *wdog_dd)
+{
+	static char alive_mask_buf[MASK_SIZE];
+	size_t count = cpulist_scnprintf(alive_mask_buf, MASK_SIZE,
+						&wdog_dd->alive_mask);
+	alive_mask_buf[count] = '\n';
+	alive_mask_buf[count++] = '\0';
+	printk(KERN_INFO "cpu alive mask from last pet\n%s", alive_mask_buf);
+}
+
+static int msm_watchdog_suspend(struct device *dev)
+{
+	struct msm_watchdog_data *wdog_dd =
+			(struct msm_watchdog_data *)dev_get_drvdata(dev);
+	if (!enable)
+		return 0;
+	__raw_writel(1, wdog_dd->base + WDT0_RST);
+	__raw_writel(0, wdog_dd->base + WDT0_EN);
+	mb();
+	return 0;
+}
+
+static int msm_watchdog_resume(struct device *dev)
+{
+	struct msm_watchdog_data *wdog_dd =
+			(struct msm_watchdog_data *)dev_get_drvdata(dev);
+	if (!enable)
+		return 0;
+	__raw_writel(1, wdog_dd->base + WDT0_EN);
+	__raw_writel(1, wdog_dd->base + WDT0_RST);
+	mb();
+	return 0;
+}
+
+static int panic_wdog_handler(struct notifier_block *this,
+			      unsigned long event, void *ptr)
+{
+	struct msm_watchdog_data *wdog_dd = container_of(this,
+				struct msm_watchdog_data, panic_blk);
+	if (panic_timeout == 0) {
+		__raw_writel(0, wdog_dd->base + WDT0_EN);
+		mb();
+	} else {
+		__raw_writel(WDT_HZ * (panic_timeout + 4),
+				wdog_dd->base + WDT0_BARK_TIME);
+		__raw_writel(WDT_HZ * (panic_timeout + 4),
+				wdog_dd->base + WDT0_BITE_TIME);
+		__raw_writel(1, wdog_dd->base + WDT0_RST);
+	}
+	return NOTIFY_DONE;
+}
+/*
+ * TODO: implement enable/disable.
+ */
+static int wdog_enable_set(const char *val, struct kernel_param *kp)
+{
+	return 0;
+}
+
+
+static void pet_watchdog(struct msm_watchdog_data *wdog_dd)
+{
+	int slack;
+	unsigned long long time_ns;
+	unsigned long long slack_ns;
+	unsigned long long bark_time_ns = wdog_dd->bark_time * 1000000ULL;
+
+	slack = __raw_readl(wdog_dd->base + WDT0_STS) >> 3;
+	slack = ((wdog_dd->bark_time*WDT_HZ)/1000) - slack;
+	if (slack < wdog_dd->min_slack_ticks)
+		wdog_dd->min_slack_ticks = slack;
+	__raw_writel(1, wdog_dd->base + WDT0_RST);
+	time_ns = sched_clock();
+	slack_ns = (wdog_dd->last_pet + bark_time_ns) - time_ns;
+	if (slack_ns < wdog_dd->min_slack_ns)
+		wdog_dd->min_slack_ns = slack_ns;
+	wdog_dd->last_pet = time_ns;
+}
+
+static void keep_alive_response(void *info)
+{
+	int cpu = smp_processor_id();
+	struct msm_watchdog_data *wdog_dd = (struct msm_watchdog_data *)info;
+	cpumask_set_cpu(cpu, &wdog_dd->alive_mask);
+	smp_mb();
+}
+
+/*
+ * If this function does not return, it implies one of the
+ * other cpu's is not responsive.
+ */
+static void ping_other_cpus(struct msm_watchdog_data *wdog_dd)
+{
+	int cpu;
+	cpumask_clear(&wdog_dd->alive_mask);
+	smp_mb();
+	for_each_cpu(cpu, cpu_online_mask)
+		smp_call_function_single(cpu, keep_alive_response, wdog_dd, 1);
+}
+
+static void pet_watchdog_work(struct work_struct *work)
+{
+	unsigned long delay_time;
+	struct delayed_work *delayed_work = to_delayed_work(work);
+	struct msm_watchdog_data *wdog_dd = container_of(delayed_work,
+						struct msm_watchdog_data,
+							dogwork_struct);
+	delay_time = msecs_to_jiffies(wdog_dd->pet_time);
+	if (wdog_dd->do_ipi_ping)
+		ping_other_cpus(wdog_dd);
+	pet_watchdog(wdog_dd);
+	if (wdog_dd->do_ipi_ping)
+		dump_cpu_alive_mask(wdog_dd);
+	if (enable)
+		schedule_delayed_work(&wdog_dd->dogwork_struct,
+							delay_time);
+}
+
+static int msm_watchdog_remove(struct platform_device *pdev)
+{
+	struct msm_watchdog_data *wdog_dd =
+			(struct msm_watchdog_data *)platform_get_drvdata(pdev);
+	if (enable) {
+		__raw_writel(0, wdog_dd->base + WDT0_EN);
+		mb();
+		enable = 0;
+		/*
+		 * TODO: Not sure if we need to call into TZ to disable
+		 * secure wdog.
+		 */
+		/* In case we got suspended mid-exit */
+		__raw_writel(0, wdog_dd->base + WDT0_EN);
+	}
+	printk(KERN_INFO "MSM Watchdog Exit - Deactivated\n");
+	kzfree(wdog_dd);
+	return 0;
+}
+
+static irqreturn_t wdog_bark_handler(int irq, void *dev_id)
+{
+	struct msm_watchdog_data *wdog_dd = (struct msm_watchdog_data *)dev_id;
+	unsigned long nanosec_rem;
+	unsigned long long t = sched_clock();
+
+	nanosec_rem = do_div(t, 1000000000);
+	printk(KERN_INFO "Watchdog bark! Now = %lu.%06lu\n", (unsigned long) t,
+		nanosec_rem / 1000);
+
+	nanosec_rem = do_div(wdog_dd->last_pet, 1000000000);
+	printk(KERN_INFO "Watchdog last pet at %lu.%06lu\n", (unsigned long)
+		wdog_dd->last_pet, nanosec_rem / 1000);
+	if (wdog_dd->do_ipi_ping)
+		dump_cpu_alive_mask(wdog_dd);
+	panic("Apps watchdog bark received!");
+	return IRQ_HANDLED;
+}
+
+static void init_watchdog_work(struct work_struct *work)
+{
+	struct msm_watchdog_data *wdog_dd = container_of(work,
+						struct msm_watchdog_data,
+							init_dogwork_struct);
+	unsigned long delay_time;
+	u64 timeout;
+	delay_time = msecs_to_jiffies(wdog_dd->pet_time);
+	wdog_dd->min_slack_ticks = UINT_MAX;
+	wdog_dd->min_slack_ns = ULLONG_MAX;
+	timeout = (wdog_dd->bark_time * WDT_HZ)/1000;
+	__raw_writel(timeout, wdog_dd->base + WDT0_BARK_TIME);
+	__raw_writel(timeout + 3*WDT_HZ, wdog_dd->base + WDT0_BITE_TIME);
+
+	wdog_dd->panic_blk.notifier_call = panic_wdog_handler;
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &wdog_dd->panic_blk);
+	schedule_delayed_work(&wdog_dd->dogwork_struct, delay_time);
+
+	__raw_writel(1, wdog_dd->base + WDT0_EN);
+	__raw_writel(1, wdog_dd->base + WDT0_RST);
+	wdog_dd->last_pet = sched_clock();
+	printk(KERN_INFO "MSM Watchdog Initialized\n");
+	return;
+}
+
+static struct of_device_id msm_wdog_match_table[] = {
+	{ .compatible = "qcom,msm-watchdog" },
+	{}
+};
+
+static void __devinit dump_pdata(struct msm_watchdog_data *pdata)
+{
+	dev_dbg(pdata->dev, "wdog bark_time %d", pdata->bark_time);
+	dev_dbg(pdata->dev, "wdog pet_time %d", pdata->pet_time);
+	dev_dbg(pdata->dev, "wdog perform ipi ping %d", pdata->do_ipi_ping);
+	dev_dbg(pdata->dev, "wdog base address is 0x%x\n", (unsigned int)
+								pdata->base);
+}
+
+static int __devinit msm_wdog_dt_to_pdata(struct platform_device *pdev,
+					struct msm_watchdog_data *pdata)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct resource *wdog_resource;
+	int ret;
+
+	wdog_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pdata->size = resource_size(wdog_resource);
+	pdata->phys_base = wdog_resource->start;
+	if (unlikely(!(devm_request_region(&pdev->dev, pdata->phys_base,
+					pdata->size, "msm-watchdog")))) {
+		dev_err(&pdev->dev, "%s cannot reserve watchdog region\n",
+								__func__);
+		return -ENXIO;
+	}
+	pdata->base  = devm_ioremap(&pdev->dev, pdata->phys_base,
+							pdata->size);
+	if (!pdata->base) {
+		dev_err(&pdev->dev, "%s cannot map wdog register space\n",
+				__func__);
+		return -ENXIO;
+	}
+
+	pdata->bark_irq = platform_get_irq(pdev, 0);
+	pdata->bite_irq = platform_get_irq(pdev, 1);
+	ret = of_property_read_u32(node, "qcom,bark-time", &pdata->bark_time);
+	if (ret) {
+		dev_err(&pdev->dev, "reading bark time failed\n");
+		return -ENXIO;
+	}
+	ret = of_property_read_u32(node, "qcom,pet-time", &pdata->pet_time);
+	if (ret) {
+		dev_err(&pdev->dev, "reading pet time failed\n");
+		return -ENXIO;
+	}
+	ret = of_property_read_u32(node, "qcom,ipi-ping", &pdata->do_ipi_ping);
+	if (ret) {
+		dev_err(&pdev->dev, "reading do ipi failed\n");
+		return -ENXIO;
+	}
+	if (!pdata->bark_time) {
+		dev_err(&pdev->dev, "%s watchdog bark time not setup\n",
+								__func__);
+		return -ENXIO;
+	}
+	if (!pdata->pet_time) {
+		dev_err(&pdev->dev, "%s watchdog pet time not setup\n",
+								__func__);
+		return -ENXIO;
+	}
+	if (pdata->do_ipi_ping > 1) {
+		dev_err(&pdev->dev, "%s invalid watchdog ipi value\n",
+								__func__);
+		return -ENXIO;
+	}
+	dump_pdata(pdata);
+	return 0;
+}
+
+static int __devinit msm_watchdog_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct msm_watchdog_data *wdog_dd;
+
+	if (!pdev->dev.of_node || !enable)
+		return -ENODEV;
+	wdog_dd = kzalloc(sizeof(struct msm_watchdog_data), GFP_KERNEL);
+	if (!wdog_dd)
+		return -EIO;
+	ret = msm_wdog_dt_to_pdata(pdev, wdog_dd);
+	if (ret)
+		goto err;
+	wdog_dd->dev = &pdev->dev;
+	platform_set_drvdata(pdev, wdog_dd);
+	ret = devm_request_irq(&pdev->dev, wdog_dd->bark_irq, wdog_bark_handler,
+				IRQF_TRIGGER_RISING, "apps_wdog_bark", wdog_dd);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to request bark irq\n");
+		ret = -ENXIO;
+		goto err;
+	}
+	cpumask_clear(&wdog_dd->alive_mask);
+	INIT_WORK(&wdog_dd->init_dogwork_struct, init_watchdog_work);
+	INIT_DELAYED_WORK(&wdog_dd->dogwork_struct, pet_watchdog_work);
+	schedule_work_on(0, &wdog_dd->init_dogwork_struct);
+	return 0;
+err:
+	kzfree(wdog_dd);
+	return ret;
+}
+
+static const struct dev_pm_ops msm_watchdog_dev_pm_ops = {
+	.suspend_noirq = msm_watchdog_suspend,
+	.resume_noirq = msm_watchdog_resume,
+};
+
+static struct platform_driver msm_watchdog_driver = {
+	.probe = msm_watchdog_probe,
+	.remove = msm_watchdog_remove,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.pm = &msm_watchdog_dev_pm_ops,
+		.of_match_table = msm_wdog_match_table,
+	},
+};
+
+static int __devinit init_watchdog(void)
+{
+	return platform_driver_register(&msm_watchdog_driver);
+}
+
+late_initcall(init_watchdog);
+MODULE_DESCRIPTION("MSM Watchdog Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/perf_event_msm_krait_l2.c b/arch/arm/mach-msm/perf_event_msm_krait_l2.c
index d82f4dd..22dbff3 100644
--- a/arch/arm/mach-msm/perf_event_msm_krait_l2.c
+++ b/arch/arm/mach-msm/perf_event_msm_krait_l2.c
@@ -14,6 +14,7 @@
 #include <linux/irq.h>
 #include <asm/pmu.h>
 #include <linux/platform_device.h>
+#include <linux/spinlock.h>
 
 #include <mach/msm-krait-l2-accessors.h>
 
@@ -56,6 +57,44 @@
 
 #define	RESRX_VALUE_EN	0x80000000
 
+/*
+ * The L2 PMU is shared between all CPU's, so protect
+ * its bitmap access.
+ */
+struct pmu_constraints {
+	u64 pmu_bitmap;
+	raw_spinlock_t lock;
+} l2_pmu_constraints = {
+	.pmu_bitmap = 0,
+	.lock = __RAW_SPIN_LOCK_UNLOCKED(l2_pmu_constraints.lock),
+};
+
+/* NRCCG format for perf RAW codes. */
+PMU_FORMAT_ATTR(l2_reg,	"config:12-15");
+PMU_FORMAT_ATTR(l2_code, "config:4-11");
+PMU_FORMAT_ATTR(l2_grp,	"config:0-3");
+
+static struct attribute *msm_l2_ev_formats[] = {
+	&format_attr_l2_reg.attr,
+	&format_attr_l2_code.attr,
+	&format_attr_l2_grp.attr,
+	NULL,
+};
+
+/*
+ * Format group is essential to access PMU's from userspace
+ * via their .name field.
+ */
+static struct attribute_group msm_l2_pmu_format_group = {
+	.name = "format",
+	.attrs = msm_l2_ev_formats,
+};
+
+static const struct attribute_group *msm_l2_pmu_attr_grps[] = {
+	&msm_l2_pmu_format_group,
+	NULL,
+};
+
 static u32 l2_orig_filter_prefix = 0x000f0030;
 
 static u32 pmu_type;
@@ -358,7 +397,7 @@
 static int krait_l2_map_event(struct perf_event *event)
 {
 	if (pmu_type > 0 && pmu_type == event->attr.type)
-		return event->attr.config & 0xfffff;
+		return event->attr.config & 0xffff;
 	else
 		return -ENOENT;
 }
@@ -378,6 +417,50 @@
 		free_irq(irq, NULL);
 }
 
+static int msm_l2_test_set_ev_constraint(struct perf_event *event)
+{
+	u32 evt_type = event->attr.config & 0xffff;
+	u8 reg   = (evt_type & 0x0F000) >> 12;
+	u8 group =  evt_type & 0x0000F;
+	unsigned long flags;
+	u32 err = 0;
+	u64 bitmap_t;
+
+	raw_spin_lock_irqsave(&l2_pmu_constraints.lock, flags);
+
+	bitmap_t = 1 << ((reg * 4) + group);
+
+	if (!(l2_pmu_constraints.pmu_bitmap & bitmap_t)) {
+		l2_pmu_constraints.pmu_bitmap |= bitmap_t;
+		goto out;
+	}
+
+	/* Bit is already set. Constraint failed. */
+	err = -EPERM;
+out:
+	raw_spin_unlock_irqrestore(&l2_pmu_constraints.lock, flags);
+	return err;
+}
+
+static int msm_l2_clear_ev_constraint(struct perf_event *event)
+{
+	u32 evt_type = event->attr.config & 0xffff;
+	u8 reg   = (evt_type & 0x0F000) >> 12;
+	u8 group =  evt_type & 0x0000F;
+	unsigned long flags;
+	u64 bitmap_t;
+
+	raw_spin_lock_irqsave(&l2_pmu_constraints.lock, flags);
+
+	bitmap_t = 1 << ((reg * 4) + group);
+
+	/* Clear constraint bit. */
+	l2_pmu_constraints.pmu_bitmap &= ~bitmap_t;
+
+	raw_spin_unlock_irqrestore(&l2_pmu_constraints.lock, flags);
+	return 1;
+}
+
 static struct arm_pmu krait_l2_pmu = {
 	.id		=	ARM_PERF_PMU_ID_KRAIT_L2,
 	.type		=	ARM_PMU_DEVICE_L2CC,
@@ -396,13 +479,16 @@
 	.max_period	=	(1LLU << 32) - 1,
 	.get_hw_events	=	krait_l2_get_hw_events,
 	.num_events	=	MAX_KRAIT_L2_CTRS,
+	.test_set_event_constraints	= msm_l2_test_set_ev_constraint,
+	.clear_event_constraints	= msm_l2_clear_ev_constraint,
+	.pmu.attr_groups		= msm_l2_pmu_attr_grps,
 };
 
 static int __devinit krait_l2_pmu_device_probe(struct platform_device *pdev)
 {
 	krait_l2_pmu.plat_device = pdev;
 
-	if (!armpmu_register(&krait_l2_pmu, "krait-l2", -1))
+	if (!armpmu_register(&krait_l2_pmu, "kraitl2", -1))
 		pmu_type = krait_l2_pmu.pmu.type;
 
 	return 0;
diff --git a/arch/arm/mach-msm/perf_event_msm_l2.c b/arch/arm/mach-msm/perf_event_msm_l2.c
index 3310d92..e3ab64a 100644
--- a/arch/arm/mach-msm/perf_event_msm_l2.c
+++ b/arch/arm/mach-msm/perf_event_msm_l2.c
@@ -13,6 +13,7 @@
 #include <linux/irq.h>
 #include <asm/pmu.h>
 #include <linux/platform_device.h>
+#include <linux/spinlock.h>
 
 
 #define MAX_SCORPION_L2_CTRS 5
@@ -23,6 +24,47 @@
 #define SCORPION_L2_EVT_PREFIX 3
 #define SCORPION_MAX_L2_REG 4
 
+
+/*
+ * The L2 PMU is shared between all CPU's, so protect
+ * its bitmap access.
+ */
+struct pmu_constraints {
+	u64 pmu_bitmap;
+	raw_spinlock_t lock;
+} l2_pmu_constraints = {
+	.pmu_bitmap = 0,
+	.lock = __RAW_SPIN_LOCK_UNLOCKED(l2_pmu_constraints.lock),
+};
+
+/* NRCCG format for perf RAW codes. */
+PMU_FORMAT_ATTR(l2_prefix,	"config:16-19");
+PMU_FORMAT_ATTR(l2_reg,		"config:12-15");
+PMU_FORMAT_ATTR(l2_code,	"config:4-11");
+PMU_FORMAT_ATTR(l2_grp,		"config:0-3");
+
+static struct attribute *msm_l2_ev_formats[] = {
+	&format_attr_l2_prefix.attr,
+	&format_attr_l2_reg.attr,
+	&format_attr_l2_code.attr,
+	&format_attr_l2_grp.attr,
+	NULL,
+};
+
+/*
+ * Format group is essential to access PMU's from userspace
+ * via their .name field.
+ */
+static struct attribute_group msm_l2_pmu_format_group = {
+	.name = "format",
+	.attrs = msm_l2_ev_formats,
+};
+
+static const struct attribute_group *msm_l2_pmu_attr_grps[] = {
+	&msm_l2_pmu_format_group,
+	NULL,
+};
+
 static u32 pmu_type;
 
 static struct arm_pmu scorpion_l2_pmu;
@@ -713,6 +755,59 @@
 		free_irq(irq, NULL);
 }
 
+static int msm_l2_test_set_ev_constraint(struct perf_event *event)
+{
+	u32 evt_type = event->attr.config & 0xfffff;
+	u8 prefix = (evt_type & 0xF0000) >> 16;
+	u8 reg   = (evt_type & 0x0F000) >> 12;
+	u8 group =  evt_type & 0x0000F;
+	unsigned long flags;
+	u32 err = 0;
+	u64 bitmap_t;
+
+	if (!prefix)
+		return 0;
+
+	raw_spin_lock_irqsave(&l2_pmu_constraints.lock, flags);
+
+	bitmap_t = 1 << ((reg * 4) + group);
+
+	if (!(l2_pmu_constraints.pmu_bitmap & bitmap_t)) {
+		l2_pmu_constraints.pmu_bitmap |= bitmap_t;
+		goto out;
+	}
+
+	/* Bit is already set. Constraint failed. */
+	err = -EPERM;
+
+out:
+	raw_spin_unlock_irqrestore(&l2_pmu_constraints.lock, flags);
+	return err;
+}
+
+static int msm_l2_clear_ev_constraint(struct perf_event *event)
+{
+	u32 evt_type = event->attr.config & 0xfffff;
+	u8 prefix = (evt_type & 0xF0000) >> 16;
+	u8 reg   = (evt_type & 0x0F000) >> 12;
+	u8 group =  evt_type & 0x0000F;
+	unsigned long flags;
+	u64 bitmap_t;
+
+	if (!prefix)
+		return 0;
+
+	raw_spin_lock_irqsave(&l2_pmu_constraints.lock, flags);
+
+	bitmap_t = 1 << ((reg * 4) + group);
+
+	/* Clear constraint bit. */
+	l2_pmu_constraints.pmu_bitmap &= ~bitmap_t;
+
+	raw_spin_unlock_irqrestore(&l2_pmu_constraints.lock, flags);
+	return 1;
+}
+
 static struct arm_pmu scorpion_l2_pmu = {
 	.id		=	ARM_PERF_PMU_ID_SCORPIONMP_L2,
 	.type		=	ARM_PMU_DEVICE_L2CC,
@@ -731,13 +826,16 @@
 	.max_period	=	(1LLU << 32) - 1,
 	.get_hw_events	=	scorpion_l2_get_hw_events,
 	.num_events	=	MAX_SCORPION_L2_CTRS,
+	.test_set_event_constraints	= msm_l2_test_set_ev_constraint,
+	.clear_event_constraints	= msm_l2_clear_ev_constraint,
+	.pmu.attr_groups		= msm_l2_pmu_attr_grps,
 };
 
 static int __devinit scorpion_l2_pmu_device_probe(struct platform_device *pdev)
 {
 	scorpion_l2_pmu.plat_device = pdev;
 
-	if (!armpmu_register(&scorpion_l2_pmu, "scorpion-l2", -1))
+	if (!armpmu_register(&scorpion_l2_pmu, "scorpionl2", -1))
 		pmu_type = scorpion_l2_pmu.pmu.type;
 
 	return 0;
diff --git a/arch/arm/mach-msm/platsmp-8625.c b/arch/arm/mach-msm/platsmp-8625.c
index b31d94b..700f966 100644
--- a/arch/arm/mach-msm/platsmp-8625.c
+++ b/arch/arm/mach-msm/platsmp-8625.c
@@ -18,6 +18,7 @@
 #include <linux/smp.h>
 #include <linux/io.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 
 #include <asm/cacheflush.h>
 #include <asm/hardware/gic.h>
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_acdb.c b/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
index e7a81d3..22779b4 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
@@ -617,11 +617,14 @@
 {
 	if (atomic64_read(&acdb_data.mem_len)) {
 		mutex_lock(&acdb_data.acdb_mutex);
+		atomic_set(&acdb_data.vocstrm_total_cal_size, 0);
+		atomic_set(&acdb_data.vocproc_total_cal_size, 0);
+		atomic_set(&acdb_data.vocvol_total_cal_size, 0);
+		atomic64_set(&acdb_data.mem_len, 0);
 		ion_unmap_kernel(acdb_data.ion_client, acdb_data.ion_handle);
 		ion_free(acdb_data.ion_client, acdb_data.ion_handle);
 		ion_client_destroy(acdb_data.ion_client);
 		mutex_unlock(&acdb_data.acdb_mutex);
-		atomic64_set(&acdb_data.mem_len, 0);
 	}
 	return 0;
 }
@@ -666,11 +669,11 @@
 		goto err_ion_handle;
 	}
 	kvaddr = (unsigned long)kvptr;
-	mutex_unlock(&acdb_data.acdb_mutex);
-
 	atomic64_set(&acdb_data.paddr, paddr);
 	atomic64_set(&acdb_data.kvaddr, kvaddr);
 	atomic64_set(&acdb_data.mem_len, mem_len);
+	mutex_unlock(&acdb_data.acdb_mutex);
+
 	pr_debug("%s done! paddr = 0x%lx, "
 		"kvaddr = 0x%lx, len = x%lx\n",
 		 __func__,
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c b/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c
index 9253056..fbd94c5 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c
@@ -42,13 +42,16 @@
 		struct asm_aac_cfg aac_cfg;
 		struct msm_audio_aac_config *aac_config;
 		uint32_t sbr_ps = 0x00;
+		aac_config = (struct msm_audio_aac_config *)audio->codec_cfg;
+		aac_cfg.ch_cfg = aac_config->channel_configuration;
+		aac_cfg.sample_rate =  audio->pcm_cfg.sample_rate;
 		pr_debug("%s: AUDIO_START session_id[%d]\n", __func__,
 						audio->ac->session);
 		if (audio->feedback == NON_TUNNEL_MODE) {
 			/* Configure PCM output block */
-			rc = q6asm_enc_cfg_blk_pcm(audio->ac,
-				0, /*native sampling rate*/
-				0 /*native channel count*/);
+			rc = q6asm_enc_cfg_blk_pcm_native(audio->ac,
+				aac_cfg.sample_rate,
+				aac_cfg.ch_cfg);
 			if (rc < 0) {
 				pr_err("pcm output block config failed\n");
 				break;
@@ -58,7 +61,6 @@
 		rc = q6asm_enable_sbrps(audio->ac, sbr_ps);
 		if (rc < 0)
 			pr_err("sbr-ps enable failed\n");
-		aac_config = (struct msm_audio_aac_config *)audio->codec_cfg;
 		if (aac_config->sbr_ps_on_flag)
 			aac_cfg.aot = AAC_ENC_MODE_EAAC_P;
 		else if (aac_config->sbr_on_flag)
@@ -87,8 +89,6 @@
 			aac_config->aac_scalefactor_data_resilience_flag;
 		aac_cfg.spectral_data_resilience =
 			aac_config->aac_spectral_data_resilience_flag;
-		aac_cfg.ch_cfg = aac_config->channel_configuration;
-		aac_cfg.sample_rate =  audio->pcm_cfg.sample_rate;
 
 		pr_debug("%s:format=%x aot=%d  ch=%d sr=%d\n",
 			__func__, aac_cfg.format,
@@ -146,16 +146,14 @@
 				AUDIO_AAC_DUAL_MONO_PL_PR) ||
 				(aac_config->dual_mono_mode >
 				AUDIO_AAC_DUAL_MONO_PL_SR)) {
-				pr_err("%s:AUDIO_SET_AAC_CONFIG: Invalid"
-					"dual_mono mode =%d\n", __func__,
-					aac_config->dual_mono_mode);
+				pr_err("%s:AUDIO_SET_AAC_CONFIG: Invalid dual_mono mode =%d\n",
+					 __func__, aac_config->dual_mono_mode);
 			} else {
 				/* convert the data from user into sce_left
 				 * and sce_right based on the definitions
 				 */
-				pr_debug("%s: AUDIO_SET_AAC_CONFIG: modify"
-					 "dual_mono mode =%d\n", __func__,
-					 aac_config->dual_mono_mode);
+				pr_debug("%s: AUDIO_SET_AAC_CONFIG: modify dual_mono mode =%d\n",
+					 __func__, aac_config->dual_mono_mode);
 				switch (aac_config->dual_mono_mode) {
 				case AUDIO_AAC_DUAL_MONO_PL_PR:
 					sce_left = 1;
@@ -178,8 +176,8 @@
 				rc = q6asm_cfg_dual_mono_aac(audio->ac,
 							sce_left, sce_right);
 				if (rc < 0)
-					pr_err("%s: asm cmd dualmono failed"
-						" rc=%d\n", __func__, rc);
+					pr_err("%s: asm cmd dualmono failed rc=%d\n",
+								 __func__, rc);
 			}			break;
 		}
 		break;
@@ -212,8 +210,8 @@
 	audio->codec_cfg = kzalloc(sizeof(struct msm_audio_aac_config),
 					GFP_KERNEL);
 	if (audio->codec_cfg == NULL) {
-		pr_err("%s: Could not allocate memory for aac"
-			"config\n", __func__);
+		pr_err("%s: Could not allocate memory for aac config\n",
+							 __func__);
 		kfree(audio);
 		return -ENOMEM;
 	}
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
index 6a99be2..fdc596d 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
@@ -33,7 +33,7 @@
 	return 0;
 }
 
-ssize_t audio_aio_debug_read(struct file *file, char __user * buf,
+ssize_t audio_aio_debug_read(struct file *file, char __user *buf,
 				size_t count, loff_t *ppos)
 {
 	const int debug_bufmax = 4096;
@@ -67,7 +67,7 @@
 }
 #endif
 
-static int insert_eos_buf(struct q6audio_aio *audio,
+int insert_eos_buf(struct q6audio_aio *audio,
 		struct audio_aio_buffer_node *buf_node)
 {
 	struct dec_meta_out *eos_buf = buf_node->kvaddr;
@@ -93,7 +93,7 @@
 		sizeof(meta_data->meta_out_dsp[0]);
 }
 
-static void extract_meta_out_info(struct q6audio_aio *audio,
+void extract_meta_out_info(struct q6audio_aio *audio,
 		struct audio_aio_buffer_node *buf_node, int dir)
 {
 	struct dec_meta_out *meta_data = buf_node->kvaddr;
@@ -114,8 +114,7 @@
 			&buf_node->meta_info.meta_out,
 			sizeof(struct dec_meta_out));
 		meta_data->meta_out_dsp[0].nflags = 0x00000000;
-		pr_debug("%s[%p]:o/p: msw_ts 0x%8x lsw_ts 0x%8x nflags 0x%8x,"
-				"num_frames = %d\n",
+		pr_debug("%s[%p]:o/p: msw_ts 0x%8x lsw_ts 0x%8x nflags 0x%8x, num_frames = %d\n",
 		__func__, audio,
 		((struct dec_meta_out *)buf_node->kvaddr)->\
 			meta_out_dsp[0].msw_ts,
@@ -293,8 +292,8 @@
 		kfree(used_buf);
 		if (list_empty(&audio->out_queue) &&
 			(audio->drv_status & ADRV_STATUS_FSYNC)) {
-			pr_debug("%s[%p]: list is empty, reached EOS in"
-				"Tunnel\n", __func__, audio);
+			pr_debug("%s[%p]: list is empty, reached EOS in Tunnel\n",
+				 __func__, audio);
 			wake_up(&audio->write_wait);
 		}
 	} else {
@@ -304,60 +303,6 @@
 	}
 }
 
-/* Read buffer from DSP / Handle Ack from DSP */
-void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token,
-			uint32_t *payload)
-{
-	unsigned long flags;
-	union msm_audio_event_payload event_payload;
-	struct audio_aio_buffer_node *filled_buf;
-
-	/* No active flush in progress */
-	if (audio->rflush)
-		return;
-
-	/* Statistics of read */
-	atomic_add(payload[2], &audio->in_bytes);
-	atomic_add(payload[7], &audio->in_samples);
-
-	spin_lock_irqsave(&audio->dsp_lock, flags);
-	BUG_ON(list_empty(&audio->in_queue));
-	filled_buf = list_first_entry(&audio->in_queue,
-					struct audio_aio_buffer_node, list);
-	if (token == (filled_buf->token)) {
-		list_del(&filled_buf->list);
-		spin_unlock_irqrestore(&audio->dsp_lock, flags);
-		event_payload.aio_buf = filled_buf->buf;
-		/* Read done Buffer due to flush/normal condition
-		after EOS event, so append EOS buffer */
-		if (audio->eos_rsp == 0x1) {
-			event_payload.aio_buf.data_len =
-			insert_eos_buf(audio, filled_buf);
-			/* Reset flag back to indicate eos intimated */
-			audio->eos_rsp = 0;
-		} else {
-			filled_buf->meta_info.meta_out.num_of_frames =
-			payload[7];
-			event_payload.aio_buf.data_len = payload[2] + \
-						payload[3] + \
-						sizeof(struct dec_meta_out);
-			pr_debug("%s[%p]:nr of frames 0x%8x len=%d\n",
-				__func__, audio,
-				filled_buf->meta_info.meta_out.num_of_frames,
-				event_payload.aio_buf.data_len);
-			extract_meta_out_info(audio, filled_buf, 0);
-			audio->eos_rsp = 0;
-		}
-		audio_aio_post_event(audio, AUDIO_EVENT_READ_DONE,
-					event_payload);
-		kfree(filled_buf);
-	} else {
-		pr_err("%s[%p]:expected=%lx ret=%x\n",
-			__func__, audio, filled_buf->token, token);
-		spin_unlock_irqrestore(&audio->dsp_lock, flags);
-	}
-}
-
 /* ------------------- device --------------------- */
 void audio_aio_async_out_flush(struct q6audio_aio *audio)
 {
@@ -404,8 +349,8 @@
 		/* Forcefull send o/p eos buffer after flush, if no eos response
 		 * received by dsp even after sending eos command */
 		if ((audio->eos_rsp != 1) && audio->eos_flag) {
-			pr_debug("%s[%p]: send eos on o/p buffer during"
-				"flush\n", __func__, audio);
+			pr_debug("%s[%p]: send eos on o/p buffer during flush\n",
+				 __func__, audio);
 			payload.aio_buf = buf_node->buf;
 			payload.aio_buf.data_len =
 					insert_eos_buf(audio, buf_node);
@@ -716,9 +661,7 @@
 	list_for_each_entry(region_elt, &audio->ion_region_queue, list) {
 		if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) ||
 			OVERLAPS(region_elt, &t)) {
-			pr_err("%s[%p]:region (vaddr %p len %ld)"
-				" clashes with registered region"
-				" (vaddr %p paddr %p len %ld)\n",
+			pr_err("%s[%p]:region (vaddr %p len %ld) clashes with registered region (vaddr %p paddr %p len %ld)\n",
 				__func__, audio, vaddr, len,
 				region_elt->vaddr,
 				(void *)region_elt->paddr, region_elt->len);
@@ -870,8 +813,7 @@
 	struct audio_client *ac;
 	struct audio_aio_write_param param;
 
-	pr_debug("%s[%p]: Send write buff %p phy %lx len %d"
-		"meta_enable = %d\n",
+	pr_debug("%s[%p]: Send write buff %p phy %lx len %d meta_enable = %d\n",
 		__func__, audio, buf_node, buf_node->paddr,
 		buf_node->buf.data_len,
 		audio->buf_cfg.meta_info_enable);
@@ -973,8 +915,8 @@
 		return -EFAULT;
 	}
 
-	pr_debug("%s[%p]:node %p dir %x buf_addr %p buf_len %d data_len"
-		"%d\n", __func__, audio, buf_node, dir, buf_node->buf.buf_addr,
+	pr_debug("%s[%p]:node %p dir %x buf_addr %p buf_len %d data_len %d\n",
+		 __func__, audio, buf_node, dir, buf_node->buf.buf_addr,
 		buf_node->buf.buf_len, buf_node->buf.data_len);
 	buf_node->paddr = audio_aio_ion_fixup(audio, buf_node->buf.buf_addr,
 						buf_node->buf.buf_len, 1,
@@ -1335,8 +1277,8 @@
 			break;
 		}
 		if (audio->feedback != NON_TUNNEL_MODE) {
-			pr_err("%s[%p]:Not sufficient permission to"
-				"change the playback mode\n", __func__, audio);
+			pr_err("%s[%p]:Not sufficient permission to change the playback mode\n",
+				 __func__, audio);
 			rc = -EACCES;
 			mutex_unlock(&audio->lock);
 			break;
@@ -1379,8 +1321,8 @@
 		break;
 	}
 	case AUDIO_GET_BUF_CFG: {
-		pr_debug("%s[%p]:session id %d: Get-buf-cfg: meta[%d]"
-			"framesperbuf[%d]\n", __func__, audio,
+		pr_debug("%s[%p]:session id %d: Get-buf-cfg: meta[%d] framesperbuf[%d]\n",
+			 __func__, audio,
 			audio->ac->session, audio->buf_cfg.meta_info_enable,
 			audio->buf_cfg.frames_per_buf);
 
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h
index 77288da..4a65304 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h
@@ -195,6 +195,12 @@
 void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token,
 			uint32_t *payload);
 
+int insert_eos_buf(struct q6audio_aio *audio,
+		struct audio_aio_buffer_node *buf_node);
+
+void extract_meta_out_info(struct q6audio_aio *audio,
+		struct audio_aio_buffer_node *buf_node, int dir);
+
 int audio_aio_open(struct q6audio_aio *audio, struct file *file);
 int audio_aio_enable(struct q6audio_aio  *audio);
 void audio_aio_post_event(struct q6audio_aio *audio, int type,
@@ -206,6 +212,6 @@
 void audio_aio_async_in_flush(struct q6audio_aio *audio);
 #ifdef CONFIG_DEBUG_FS
 ssize_t audio_aio_debug_open(struct inode *inode, struct file *file);
-ssize_t audio_aio_debug_read(struct file *file, char __user * buf,
+ssize_t audio_aio_debug_read(struct file *file, char __user *buf,
 			size_t count, loff_t *ppos);
 #endif
diff --git a/arch/arm/mach-msm/qdsp6v2/q6audio_v1_aio.c b/arch/arm/mach-msm/qdsp6v2/q6audio_v1_aio.c
index 112de62..078eea8 100644
--- a/arch/arm/mach-msm/qdsp6v2/q6audio_v1_aio.c
+++ b/arch/arm/mach-msm/qdsp6v2/q6audio_v1_aio.c
@@ -97,9 +97,8 @@
 				"payload[2] = %d, payload[3] = %d\n", __func__,
 				audio, payload[0], payload[1], payload[2],
 				payload[3]);
-		pr_debug("%s[%p]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, "
-				"sr(prev) = %d, chl(prev) = %d,",
-				__func__, audio, audio->pcm_cfg.sample_rate,
+		pr_debug("%s[%p]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, sr(prev) = %d, chl(prev) = %d,",
+		__func__, audio, audio->pcm_cfg.sample_rate,
 		audio->pcm_cfg.channel_count);
 		audio->pcm_cfg.sample_rate = payload[0];
 		audio->pcm_cfg.channel_count = payload[1] & 0xFFFF;
@@ -111,3 +110,57 @@
 		break;
 	}
 }
+
+/* Read buffer from DSP / Handle Ack from DSP */
+void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token,
+			uint32_t *payload)
+{
+	unsigned long flags;
+	union msm_audio_event_payload event_payload;
+	struct audio_aio_buffer_node *filled_buf;
+
+	/* No active flush in progress */
+	if (audio->rflush)
+		return;
+
+	/* Statistics of read */
+	atomic_add(payload[2], &audio->in_bytes);
+	atomic_add(payload[7], &audio->in_samples);
+
+	spin_lock_irqsave(&audio->dsp_lock, flags);
+	BUG_ON(list_empty(&audio->in_queue));
+	filled_buf = list_first_entry(&audio->in_queue,
+					struct audio_aio_buffer_node, list);
+	if (token == (filled_buf->token)) {
+		list_del(&filled_buf->list);
+		spin_unlock_irqrestore(&audio->dsp_lock, flags);
+		event_payload.aio_buf = filled_buf->buf;
+		/* Read done Buffer due to flush/normal condition
+		after EOS event, so append EOS buffer */
+		if (audio->eos_rsp == 0x1) {
+			event_payload.aio_buf.data_len =
+			insert_eos_buf(audio, filled_buf);
+			/* Reset flag back to indicate eos intimated */
+			audio->eos_rsp = 0;
+		} else {
+			filled_buf->meta_info.meta_out.num_of_frames =
+			payload[7];
+			event_payload.aio_buf.data_len = payload[2] + \
+						payload[3] + \
+						sizeof(struct dec_meta_out);
+			pr_debug("%s[%p]:nr of frames 0x%8x len=%d\n",
+				__func__, audio,
+				filled_buf->meta_info.meta_out.num_of_frames,
+				event_payload.aio_buf.data_len);
+			extract_meta_out_info(audio, filled_buf, 0);
+			audio->eos_rsp = 0;
+		}
+		audio_aio_post_event(audio, AUDIO_EVENT_READ_DONE,
+					event_payload);
+		kfree(filled_buf);
+	} else {
+		pr_err("%s[%p]:expected=%lx ret=%x\n",
+			__func__, audio, filled_buf->token, token);
+		spin_unlock_irqrestore(&audio->dsp_lock, flags);
+	}
+}
diff --git a/arch/arm/mach-msm/qdsp6v2/q6audio_v2_aio.c b/arch/arm/mach-msm/qdsp6v2/q6audio_v2_aio.c
index aab7b19..ad4fc6f 100644
--- a/arch/arm/mach-msm/qdsp6v2/q6audio_v2_aio.c
+++ b/arch/arm/mach-msm/qdsp6v2/q6audio_v2_aio.c
@@ -91,14 +91,13 @@
 		break;
 	case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
 	case ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY:
-
 		pr_debug("%s[%p]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, payload[0]-sr = %d, payload[1]-chl = %d, payload[2] = %d, payload[3] = %d\n",
 					 __func__, audio, payload[0],
 					 payload[1], payload[2], payload[3]);
 
 		pr_debug("%s[%p]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, sr(prev) = %d, chl(prev) = %d,",
-				__func__, audio, audio->pcm_cfg.sample_rate,
-				audio->pcm_cfg.channel_count);
+		__func__, audio, audio->pcm_cfg.sample_rate,
+		audio->pcm_cfg.channel_count);
 
 		audio->pcm_cfg.sample_rate = payload[0];
 		audio->pcm_cfg.channel_count = payload[1] & 0xFFFF;
@@ -110,3 +109,61 @@
 		break;
 	}
 }
+
+/* Read buffer from DSP / Handle Ack from DSP */
+void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token,
+			uint32_t *payload)
+{
+	unsigned long flags;
+	union msm_audio_event_payload event_payload;
+	struct audio_aio_buffer_node *filled_buf;
+	pr_debug("%s\n", __func__);
+
+	/* No active flush in progress */
+	if (audio->rflush)
+		return;
+
+	/* Statistics of read */
+	atomic_add(payload[4], &audio->in_bytes);
+	atomic_add(payload[9], &audio->in_samples);
+
+	spin_lock_irqsave(&audio->dsp_lock, flags);
+	BUG_ON(list_empty(&audio->in_queue));
+	filled_buf = list_first_entry(&audio->in_queue,
+					struct audio_aio_buffer_node, list);
+
+	pr_debug("%s token: 0x[%d], filled_buf->token: 0x[%lu]",
+				 __func__, token, filled_buf->token);
+	if (token == (filled_buf->token)) {
+		list_del(&filled_buf->list);
+		spin_unlock_irqrestore(&audio->dsp_lock, flags);
+		event_payload.aio_buf = filled_buf->buf;
+		/* Read done Buffer due to flush/normal condition
+		after EOS event, so append EOS buffer */
+		if (audio->eos_rsp == 0x1) {
+			event_payload.aio_buf.data_len =
+			insert_eos_buf(audio, filled_buf);
+			/* Reset flag back to indicate eos intimated */
+			audio->eos_rsp = 0;
+		} else {
+			filled_buf->meta_info.meta_out.num_of_frames\
+							 = payload[9];
+			event_payload.aio_buf.data_len = payload[4]\
+				 + payload[5] + sizeof(struct dec_meta_out);
+			pr_debug("%s[%p]:nr of frames 0x%8x len=%d\n",
+				__func__, audio,
+				filled_buf->meta_info.meta_out.num_of_frames,
+				event_payload.aio_buf.data_len);
+			extract_meta_out_info(audio, filled_buf, 0);
+			audio->eos_rsp = 0;
+		}
+		pr_debug("%s, posting read done to the app here\n", __func__);
+		audio_aio_post_event(audio, AUDIO_EVENT_READ_DONE,
+					event_payload);
+		kfree(filled_buf);
+	} else {
+		pr_err("%s[%p]:expected=%lx ret=%x\n",
+			__func__, audio, filled_buf->token, token);
+		spin_unlock_irqrestore(&audio->dsp_lock, flags);
+	}
+}
diff --git a/arch/arm/mach-msm/qdss-etb.c b/arch/arm/mach-msm/qdss-etb.c
index 7837af0..ccea2fa 100644
--- a/arch/arm/mach-msm/qdss-etb.c
+++ b/arch/arm/mach-msm/qdss-etb.c
@@ -388,11 +388,18 @@
 	return 0;
 }
 
+static struct of_device_id etb_match[] = {
+	{.compatible = "qcom,msm-etb"},
+	{}
+};
+
 static struct platform_driver etb_driver = {
 	.probe          = etb_probe,
 	.remove         = __devexit_p(etb_remove),
 	.driver         = {
 		.name   = "msm_etb",
+		.owner	= THIS_MODULE,
+		.of_match_table = etb_match,
 	},
 };
 
diff --git a/arch/arm/mach-msm/qdss-etm.c b/arch/arm/mach-msm/qdss-etm.c
index ca6e0c6..e2a38de 100644
--- a/arch/arm/mach-msm/qdss-etm.c
+++ b/arch/arm/mach-msm/qdss-etm.c
@@ -1305,11 +1305,18 @@
 	return 0;
 }
 
+static struct of_device_id etm_match[] = {
+	{.compatible = "qcom,msm-etm"},
+	{}
+};
+
 static struct platform_driver etm_driver = {
 	.probe          = etm_probe,
 	.remove         = __devexit_p(etm_remove),
 	.driver         = {
 		.name   = "msm_etm",
+		.owner	= THIS_MODULE,
+		.of_match_table = etm_match,
 	},
 };
 
diff --git a/arch/arm/mach-msm/qdss-funnel.c b/arch/arm/mach-msm/qdss-funnel.c
index 52eb2b6..1c19ebd 100644
--- a/arch/arm/mach-msm/qdss-funnel.c
+++ b/arch/arm/mach-msm/qdss-funnel.c
@@ -208,11 +208,18 @@
 	return 0;
 }
 
+static struct of_device_id funnel_match[] = {
+	{.compatible = "qcom,msm-funnel"},
+	{}
+};
+
 static struct platform_driver funnel_driver = {
 	.probe          = funnel_probe,
 	.remove         = __devexit_p(funnel_remove),
 	.driver         = {
 		.name   = "msm_funnel",
+		.owner	= THIS_MODULE,
+		.of_match_table = funnel_match,
 	},
 };
 
diff --git a/arch/arm/mach-msm/qdss-stm.c b/arch/arm/mach-msm/qdss-stm.c
index 9ce6318..0d44c1a 100644
--- a/arch/arm/mach-msm/qdss-stm.c
+++ b/arch/arm/mach-msm/qdss-stm.c
@@ -571,11 +571,18 @@
 	return 0;
 }
 
+static struct of_device_id stm_match[] = {
+	{.compatible = "qcom,msm-stm"},
+	{}
+};
+
 static struct platform_driver stm_driver = {
 	.probe          = stm_probe,
 	.remove         = __devexit_p(stm_remove),
 	.driver         = {
 		.name   = "msm_stm",
+		.owner	= THIS_MODULE,
+		.of_match_table = stm_match,
 	},
 };
 
diff --git a/arch/arm/mach-msm/qdss-tpiu.c b/arch/arm/mach-msm/qdss-tpiu.c
index fa15635..23905f0 100644
--- a/arch/arm/mach-msm/qdss-tpiu.c
+++ b/arch/arm/mach-msm/qdss-tpiu.c
@@ -117,11 +117,18 @@
 	return 0;
 }
 
+static struct of_device_id tpiu_match[] = {
+	{.compatible = "qcom,msm-tpiu"},
+	{}
+};
+
 static struct platform_driver tpiu_driver = {
 	.probe          = tpiu_probe,
 	.remove         = __devexit_p(tpiu_remove),
 	.driver         = {
 		.name   = "msm_tpiu",
+		.owner	= THIS_MODULE,
+		.of_match_table = tpiu_match,
 	},
 };
 
diff --git a/arch/arm/mach-msm/qdss.c b/arch/arm/mach-msm/qdss.c
index fd1fc2b..83a6a36 100644
--- a/arch/arm/mach-msm/qdss.c
+++ b/arch/arm/mach-msm/qdss.c
@@ -41,7 +41,7 @@
  */
 struct qdss_ctx {
 	struct kobject			*modulekobj;
-	struct msm_qdss_platform_data	*pdata;
+	uint8_t				afamily;
 	struct list_head		sources;	/* S: sources list */
 	struct mutex			sources_mutex;
 	uint8_t				sink_count;	/* I: sink count */
@@ -120,7 +120,7 @@
 	if (ret)
 		goto err;
 
-	if ((qdss.pdata)->afamily) {
+	if (qdss.afamily) {
 		mutex_lock(&qdss.sink_mutex);
 		if (qdss.sink_count == 0) {
 			etb_disable();
@@ -154,7 +154,7 @@
 	if (!src)
 		return;
 
-	if ((qdss.pdata)->afamily) {
+	if (qdss.afamily) {
 		mutex_lock(&qdss.sink_mutex);
 		if (WARN(qdss.sink_count == 0, "qdss is unbalanced\n"))
 			goto out;
@@ -187,7 +187,7 @@
  */
 void qdss_disable_sink(void)
 {
-	if ((qdss.pdata)->afamily) {
+	if (qdss.afamily) {
 		etb_dump();
 		etb_disable();
 	}
@@ -330,24 +330,20 @@
 static int __devinit qdss_probe(struct platform_device *pdev)
 {
 	int ret;
-	struct qdss_source *src_table;
-	size_t num_srcs;
+	struct msm_qdss_platform_data *pdata;
 
 	mutex_init(&qdss.sources_mutex);
 	mutex_init(&qdss.clk_mutex);
 	mutex_init(&qdss.sink_mutex);
 
-	if (pdev->dev.platform_data == NULL) {
-		pr_err("%s: platform data is NULL\n", __func__);
-		ret = -ENODEV;
-		goto err_pdata;
-	}
-	qdss.pdata = pdev->dev.platform_data;
-
 	INIT_LIST_HEAD(&qdss.sources);
-	src_table = (qdss.pdata)->src_table;
-	num_srcs = (qdss.pdata)->size;
-	qdss_add_sources(src_table, num_srcs);
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata)
+		goto err_pdata;
+
+	qdss.afamily = pdata->afamily;
+	qdss_add_sources(pdata->src_table, pdata->size);
 
 	pr_info("QDSS arch initialized\n");
 	return 0;
@@ -369,11 +365,18 @@
 	return 0;
 }
 
+static struct of_device_id qdss_match[] = {
+	{.compatible = "qcom,msm-qdss"},
+	{}
+};
+
 static struct platform_driver qdss_driver = {
 	.probe          = qdss_probe,
 	.remove         = __devexit_p(qdss_remove),
 	.driver         = {
 		.name   = "msm_qdss",
+		.owner	= THIS_MODULE,
+		.of_match_table = qdss_match,
 	},
 };
 
diff --git a/arch/arm/mach-msm/scm.c b/arch/arm/mach-msm/scm.c
index ac48990..6052918 100644
--- a/arch/arm/mach-msm/scm.c
+++ b/arch/arm/mach-msm/scm.c
@@ -297,6 +297,9 @@
 		__asmeq("%1", "r0")
 		__asmeq("%2", "r1")
 		__asmeq("%3", "r2")
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
 		"smc	#0	@ switch to secure world\n"
 		: "=r" (r0)
 		: "r" (r0), "r" (r1), "r" (r2)
@@ -329,6 +332,9 @@
 		__asmeq("%2", "r1")
 		__asmeq("%3", "r2")
 		__asmeq("%4", "r3")
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
 		"smc	#0	@ switch to secure world\n"
 		: "=r" (r0)
 		: "r" (r0), "r" (r1), "r" (r2), "r" (r3));
@@ -356,6 +362,9 @@
 		__asmeq("%4", "r1")
 		__asmeq("%5", "r2")
 		__asmeq("%6", "r3")
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
 		"smc	#0	@ switch to secure world\n"
 		: "=r" (r0), "=r" (r1), "=r" (r2)
 		: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5));
@@ -388,6 +397,9 @@
 			__asmeq("%1", "r1")
 			__asmeq("%2", "r0")
 			__asmeq("%3", "r1")
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
 			"smc	#0	@ switch to secure world\n"
 			: "=r" (r0), "=r" (r1)
 			: "r" (r0), "r" (r1)
diff --git a/arch/arm/mach-msm/smd_pkt.c b/arch/arm/mach-msm/smd_pkt.c
index 8d567f8..b6f74f9 100644
--- a/arch/arm/mach-msm/smd_pkt.c
+++ b/arch/arm/mach-msm/smd_pkt.c
@@ -43,6 +43,7 @@
 #define NUM_SMD_PKT_PORTS 15
 #endif
 
+#define PDRIVER_NAME_MAX_SIZE 32
 #define LOOPBACK_INX (NUM_SMD_PKT_PORTS - 1)
 
 #define DEVICE_NAME "smdpkt"
@@ -52,6 +53,7 @@
 	struct cdev cdev;
 	struct device *devicep;
 	void *pil;
+	char pdriver_name[PDRIVER_NAME_MAX_SIZE];
 	struct platform_driver driver;
 
 	struct smd_channel *ch;
@@ -729,7 +731,10 @@
 	int i;
 
 	for (i = 0; i < NUM_SMD_PKT_PORTS; i++) {
-		if (!strncmp(pdev->name, smd_ch_name[i], SMD_MAX_CH_NAME_LEN)) {
+		if (smd_ch_edge[i] == pdev->id
+		    && !strncmp(pdev->name, smd_ch_name[i],
+				SMD_MAX_CH_NAME_LEN)
+		    && smd_pkt_devp[i]->driver.probe) {
 			complete_all(&smd_pkt_devp[i]->ch_allocated);
 			D_STATUS("%s allocated SMD ch for smd_pkt_dev id:%d\n",
 				 __func__, i);
@@ -772,8 +777,10 @@
 	if (smd_pkt_devp->ch == 0) {
 		init_completion(&smd_pkt_devp->ch_allocated);
 		smd_pkt_devp->driver.probe = smd_pkt_dummy_probe;
-		smd_pkt_devp->driver.driver.name =
-			smd_ch_name[smd_pkt_devp->i];
+		scnprintf(smd_pkt_devp->pdriver_name, PDRIVER_NAME_MAX_SIZE,
+			  "%s.%d", smd_ch_name[smd_pkt_devp->i],
+			  smd_ch_edge[smd_pkt_devp->i]);
+		smd_pkt_devp->driver.driver.name = smd_pkt_devp->pdriver_name;
 		smd_pkt_devp->driver.driver.owner = THIS_MODULE;
 		r = platform_driver_register(&smd_pkt_devp->driver);
 		if (r) {
@@ -870,8 +877,10 @@
 		pil_put(smd_pkt_devp->pil);
 
 release_pd:
-	if (r < 0)
+	if (r < 0) {
 		platform_driver_unregister(&smd_pkt_devp->driver);
+		smd_pkt_devp->driver.probe = NULL;
+	}
 out:
 	mutex_unlock(&smd_pkt_devp->ch_lock);
 
@@ -904,6 +913,7 @@
 		smd_pkt_devp->blocking_write = 0;
 		smd_pkt_devp->poll_mode = 0;
 		platform_driver_unregister(&smd_pkt_devp->driver);
+		smd_pkt_devp->driver.probe = NULL;
 		if (smd_pkt_devp->pil)
 			pil_put(smd_pkt_devp->pil);
 	}
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 3199b76..4142d91 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -12,6 +12,17 @@
 	  that do their own scheduling and require only minimal assistance from
 	  the kernel.
 
+config IOSCHED_TEST
+	tristate "Test I/O scheduler"
+	depends on DEBUG_FS
+	default m
+	---help---
+	  The test I/O scheduler is a duplicate of the noop scheduler with
+	  addition of test utlity.
+	  It allows testing a block device by dispatching specific requests
+	  according to the test case and declare PASS/FAIL according to the
+	  requests completion error code.
+
 config IOSCHED_DEADLINE
 	tristate "Deadline I/O scheduler"
 	default y
diff --git a/block/Makefile b/block/Makefile
index 39b76ba..436b220 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -15,6 +15,7 @@
 obj-$(CONFIG_IOSCHED_NOOP)	+= noop-iosched.o
 obj-$(CONFIG_IOSCHED_DEADLINE)	+= deadline-iosched.o
 obj-$(CONFIG_IOSCHED_CFQ)	+= cfq-iosched.o
+obj-$(CONFIG_IOSCHED_TEST)	+= test-iosched.o
 
 obj-$(CONFIG_BLOCK_COMPAT)	+= compat_ioctl.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY)	+= blk-integrity.o
diff --git a/block/blk-core.c b/block/blk-core.c
index 038d11f..68d7158 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -977,8 +977,6 @@
 {
 	struct request *rq;
 
-	BUG_ON(rw != READ && rw != WRITE);
-
 	spin_lock_irq(q->queue_lock);
 	if (gfp_mask & __GFP_WAIT)
 		rq = get_request_wait(q, rw, NULL);
@@ -1311,6 +1309,7 @@
 	req->ioprio = bio_prio(bio);
 	blk_rq_bio_prep(req->q, req, bio);
 }
+EXPORT_SYMBOL(init_request_from_bio);
 
 void blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
diff --git a/block/test-iosched.c b/block/test-iosched.c
new file mode 100644
index 0000000..3c38734
--- /dev/null
+++ b/block/test-iosched.c
@@ -0,0 +1,1019 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * The test scheduler allows to test the block device by dispatching
+ * specific requests according to the test case and declare PASS/FAIL
+ * according to the requests completion error code.
+ * Each test is exposed via debugfs and can be triggered by writing to
+ * the debugfs file.
+ *
+ */
+
+/* elevator test iosched */
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/test-iosched.h>
+#include <linux/delay.h>
+#include "blk.h"
+
+#define MODULE_NAME "test-iosched"
+#define WR_RD_START_REQ_ID 1234
+#define UNIQUE_START_REQ_ID 5678
+#define TIMEOUT_TIMER_MS 40000
+#define TEST_MAX_TESTCASE_ROUNDS 15
+
+#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
+#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
+#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
+
+static DEFINE_SPINLOCK(blk_dev_test_list_lock);
+static LIST_HEAD(blk_dev_test_list);
+static struct test_data *ptd;
+
+/* Get the request after `test_rq' in the test requests list */
+static struct test_request *
+latter_test_request(struct request_queue *q,
+				 struct test_request *test_rq)
+{
+	struct test_data *td = q->elevator->elevator_data;
+
+	if (test_rq->queuelist.next == &td->test_queue)
+		return NULL;
+	return list_entry(test_rq->queuelist.next, struct test_request,
+			  queuelist);
+}
+
+/**
+ * test_iosched_get_req_queue() - returns the request queue
+ * served by the scheduler
+ */
+struct request_queue *test_iosched_get_req_queue(void)
+{
+	if (!ptd)
+		return NULL;
+
+	return ptd->req_q;
+}
+EXPORT_SYMBOL(test_iosched_get_req_queue);
+
+/**
+ * test_iosched_mark_test_completion() - Wakeup the debugfs
+ * thread, waiting on the test completion
+ */
+void test_iosched_mark_test_completion(void)
+{
+	if (!ptd)
+		return;
+
+	ptd->test_state = TEST_COMPLETED;
+	wake_up(&ptd->wait_q);
+}
+EXPORT_SYMBOL(test_iosched_mark_test_completion);
+
+/* Check if all the queued test requests were completed */
+static void check_test_completion(void)
+{
+	struct test_request *test_rq;
+	struct request *rq;
+
+	list_for_each_entry(test_rq, &ptd->test_queue, queuelist) {
+		rq = test_rq->rq;
+		if (!test_rq->req_completed)
+			return;
+	}
+
+	test_pr_info("%s: Test is completed", __func__);
+
+	test_iosched_mark_test_completion();
+}
+
+/*
+ * A callback to be called per bio completion.
+ * Frees the bio memory.
+ */
+static void end_test_bio(struct bio *bio, int err)
+{
+	if (err)
+		clear_bit(BIO_UPTODATE, &bio->bi_flags);
+
+	bio_put(bio);
+}
+
+/*
+ * A callback to be called per request completion.
+ * the request memory is not freed here, will be freed later after the test
+ * results checking.
+ */
+static void end_test_req(struct request *rq, int err)
+{
+	struct test_request *test_rq;
+
+	test_rq = (struct test_request *)rq->elv.priv[0];
+	BUG_ON(!test_rq);
+
+	test_pr_info("%s: request %d completed, err=%d",
+	       __func__, test_rq->req_id, err);
+
+	test_rq->req_completed = 1;
+	test_rq->req_result = err;
+
+	check_test_completion();
+}
+
+/**
+ * test_iosched_add_unique_test_req() - Create and queue a non
+ * read/write request (such as FLUSH/DISCRAD/SANITIZE).
+ * @is_err_expcted:	A flag to indicate if this request
+ *			should succeed or not
+ * @req_unique:		The type of request to add
+ * @start_sec:		start address of the first bio
+ * @nr_sects:		number of sectors in the request
+ * @end_req_io:		specific completion callback. When not
+ *			set, the defaulcallback will be used
+ */
+int test_iosched_add_unique_test_req(int is_err_expcted,
+			enum req_unique_type req_unique,
+			int start_sec, int nr_sects, rq_end_io_fn *end_req_io)
+{
+	struct bio *bio;
+	struct request *rq;
+	int rw_flags;
+	struct test_request *test_rq;
+
+	if (!ptd)
+		return -ENODEV;
+
+	bio = bio_alloc(GFP_KERNEL, 0);
+	if (!bio) {
+		test_pr_err("%s: Failed to allocate a bio", __func__);
+		return -ENODEV;
+	}
+	bio_get(bio);
+	bio->bi_end_io = end_test_bio;
+
+	switch (req_unique) {
+	case REQ_UNIQUE_FLUSH:
+		bio->bi_rw = WRITE_FLUSH;
+		break;
+	case REQ_UNIQUE_DISCARD:
+		bio->bi_rw = REQ_WRITE | REQ_DISCARD;
+		bio->bi_size = nr_sects << 9;
+		bio->bi_sector = start_sec;
+		break;
+	default:
+		test_pr_err("%s: Invalid request type %d", __func__,
+			    req_unique);
+		bio_put(bio);
+		return -ENODEV;
+	}
+
+	rw_flags = bio_data_dir(bio);
+	if (bio->bi_rw & REQ_SYNC)
+		rw_flags |= REQ_SYNC;
+
+	rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
+	if (!rq) {
+		test_pr_err("%s: Failed to allocate a request", __func__);
+		bio_put(bio);
+		return -ENODEV;
+	}
+
+	init_request_from_bio(rq, bio);
+	if (end_req_io)
+		rq->end_io = end_req_io;
+	else
+		rq->end_io = end_test_req;
+
+	test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
+	if (!test_rq) {
+		test_pr_err("%s: Failed to allocate a test request", __func__);
+		bio_put(bio);
+		blk_put_request(rq);
+		return -ENODEV;
+	}
+	test_rq->req_completed = 0;
+	test_rq->req_result = -1;
+	test_rq->rq = rq;
+	test_rq->is_err_expected = is_err_expcted;
+	rq->elv.priv[0] = (void *)test_rq;
+	test_rq->req_id = ptd->unique_next_req_id++;
+
+	test_pr_debug(
+		"%s: added request %d to the test requests list, type = %d",
+		__func__, test_rq->req_id, req_unique);
+
+	list_add_tail(&test_rq->queuelist, &ptd->test_queue);
+
+	return 0;
+}
+EXPORT_SYMBOL(test_iosched_add_unique_test_req);
+
+/*
+ * Get a pattern to be filled in the request data buffer.
+ * If the pattern used is (-1) the buffer will be filled with sequential
+ * numbers
+ */
+static void fill_buf_with_pattern(int *buf, int num_bytes, int pattern)
+{
+	int i = 0;
+	int num_of_dwords = num_bytes/sizeof(int);
+
+	if (pattern == TEST_NO_PATTERN)
+		return;
+
+	/* num_bytes should be aligned to sizeof(int) */
+	BUG_ON((num_bytes % sizeof(int)) != 0);
+
+	if (pattern == TEST_PATTERN_SEQUENTIAL) {
+		for (i = 0; i < num_of_dwords; i++)
+			buf[i] = i;
+	} else {
+		for (i = 0; i < num_of_dwords; i++)
+			buf[i] = pattern;
+	}
+}
+
+/**
+ * test_iosched_add_wr_rd_test_req() - Create and queue a
+ * read/write request.
+ * @is_err_expcted:	A flag to indicate if this request
+ *			should succeed or not
+ * @direction:		READ/WRITE
+ * @start_sec:		start address of the first bio
+ * @num_bios:		number of BIOs to be allocated for the
+ *			request
+ * @pattern:		A pattern, to be written into the write
+ *			requests data buffer. In case of READ
+ *			request, the given pattern is kept as
+ *			the expected pattern. The expected
+ *			pattern will be compared in the test
+ *			check result function. If no comparisson
+ *			is required, set pattern to
+ *			TEST_NO_PATTERN.
+ * @end_req_io:		specific completion callback. When not
+ *			set,the default callback will be used
+ *
+ * This function allocates the test request and the block
+ * request and calls blk_rq_map_kern which allocates the
+ * required BIO. The allocated test request and the block
+ * request memory is freed at the end of the test and the
+ * allocated BIO memory is freed by end_test_bio.
+ */
+int test_iosched_add_wr_rd_test_req(int is_err_expcted,
+		      int direction, int start_sec,
+		      int num_bios, int pattern, rq_end_io_fn *end_req_io)
+{
+	struct request *rq = NULL;
+	struct test_request *test_rq = NULL;
+	int rw_flags = 0;
+	int buf_size = 0;
+	int ret = 0, i = 0;
+	unsigned int *bio_ptr = NULL;
+	struct bio *bio = NULL;
+
+	if (!ptd)
+		return -ENODEV;
+
+	rw_flags = direction;
+
+	rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
+	if (!rq) {
+		test_pr_err("%s: Failed to allocate a request", __func__);
+		return -ENODEV;
+	}
+
+	test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
+	if (!test_rq) {
+		test_pr_err("%s: Failed to allocate test request", __func__);
+		blk_put_request(rq);
+		return -ENODEV;
+	}
+
+	buf_size = sizeof(unsigned int) * BIO_U32_SIZE * num_bios;
+	test_rq->bios_buffer = kzalloc(buf_size, GFP_KERNEL);
+	if (!test_rq->bios_buffer) {
+		test_pr_err("%s: Failed to allocate the data buf", __func__);
+		goto err;
+	}
+	test_rq->buf_size = buf_size;
+
+	if (direction == WRITE)
+		fill_buf_with_pattern(test_rq->bios_buffer,
+						   buf_size, pattern);
+	test_rq->wr_rd_data_pattern = pattern;
+
+	bio_ptr = test_rq->bios_buffer;
+	for (i = 0; i < num_bios; ++i) {
+		ret = blk_rq_map_kern(ptd->req_q, rq,
+				      (void *)bio_ptr,
+				      sizeof(unsigned int)*BIO_U32_SIZE,
+				      GFP_KERNEL);
+		if (ret) {
+			test_pr_err("%s: blk_rq_map_kern returned error %d",
+				    __func__, ret);
+			goto err;
+		}
+		bio_ptr += BIO_U32_SIZE;
+	}
+
+	if (end_req_io)
+		rq->end_io = end_req_io;
+	else
+		rq->end_io = end_test_req;
+	rq->__sector = start_sec;
+	rq->cmd_type |= REQ_TYPE_FS;
+
+	if (rq->bio) {
+		rq->bio->bi_sector = start_sec;
+		rq->bio->bi_end_io = end_test_bio;
+		bio = rq->bio;
+		while ((bio = bio->bi_next) != NULL)
+			bio->bi_end_io = end_test_bio;
+	}
+
+	ptd->num_of_write_bios += num_bios;
+	test_rq->req_id = ptd->wr_rd_next_req_id++;
+
+	test_rq->req_completed = 0;
+	test_rq->req_result = -1;
+	test_rq->rq = rq;
+	test_rq->is_err_expected = is_err_expcted;
+	rq->elv.priv[0] = (void *)test_rq;
+
+	test_pr_debug(
+		"%s: added request %d to the test requests list, buf_size=%d",
+		__func__, test_rq->req_id, buf_size);
+
+	list_add_tail(&test_rq->queuelist, &ptd->test_queue);
+
+	return 0;
+err:
+	blk_put_request(rq);
+	kfree(test_rq->bios_buffer);
+	return -ENODEV;
+}
+EXPORT_SYMBOL(test_iosched_add_wr_rd_test_req);
+
+/* Converts the testcase number into a string */
+static char *get_test_case_str(struct test_data *td)
+{
+	if (td->test_info.get_test_case_str_fn)
+		return td->test_info.get_test_case_str_fn(td);
+
+	return "Unknown testcase";
+}
+
+/*
+ * Verify that the test request data buffer includes the expected
+ * pattern
+ */
+static int compare_buffer_to_pattern(struct test_request *test_rq)
+{
+	int i = 0;
+	int num_of_dwords = test_rq->buf_size/sizeof(int);
+
+	/* num_bytes should be aligned to sizeof(int) */
+	BUG_ON((test_rq->buf_size % sizeof(int)) != 0);
+	BUG_ON(test_rq->bios_buffer == NULL);
+
+	if (test_rq->wr_rd_data_pattern == TEST_NO_PATTERN)
+		return 0;
+
+	if (test_rq->wr_rd_data_pattern == TEST_PATTERN_SEQUENTIAL) {
+		for (i = 0; i < num_of_dwords; i++) {
+			if (test_rq->bios_buffer[i] != i) {
+				test_pr_err(
+					"%s: wrong pattern 0x%x in index %d",
+					__func__, test_rq->bios_buffer[i], i);
+				return -EINVAL;
+			}
+		}
+	} else {
+		for (i = 0; i < num_of_dwords; i++) {
+			if (test_rq->bios_buffer[i] !=
+			    test_rq->wr_rd_data_pattern) {
+				test_pr_err(
+					"%s: wrong pattern 0x%x in index %d",
+					__func__, test_rq->bios_buffer[i], i);
+				return -EINVAL;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Determine if the test passed or failed.
+ * The function checks the test request completion value and calls
+ * check_testcase_result for result checking that are specific
+ * to a test case.
+ */
+static int check_test_result(struct test_data *td)
+{
+	struct test_request *test_rq;
+	struct request *rq;
+	int res = 0;
+	static int run;
+
+	list_for_each_entry(test_rq, &ptd->test_queue, queuelist) {
+		rq = test_rq->rq;
+		if (!test_rq->req_completed) {
+			test_pr_err("%s: rq %d not completed", __func__,
+				    test_rq->req_id);
+			res = -EINVAL;
+			goto err;
+		}
+
+		if ((test_rq->req_result < 0) && !test_rq->is_err_expected) {
+			test_pr_err(
+				"%s: rq %d completed with err, not as expected",
+				__func__, test_rq->req_id);
+			res = -EINVAL;
+			goto err;
+		}
+		if ((test_rq->req_result == 0) && test_rq->is_err_expected) {
+			test_pr_err("%s: rq %d succeeded, not as expected",
+				    __func__, test_rq->req_id);
+			res = -EINVAL;
+			goto err;
+		}
+		if (rq_data_dir(test_rq->rq) == READ) {
+			res = compare_buffer_to_pattern(test_rq);
+			if (res) {
+				test_pr_err("%s: read pattern not as expected",
+					    __func__);
+				res = -EINVAL;
+				goto err;
+			}
+		}
+	}
+
+	if (td->test_info.check_test_result_fn) {
+		res = td->test_info.check_test_result_fn(td);
+		if (res)
+			goto err;
+	}
+
+	test_pr_info("%s: %s, run# %03d, PASSED",
+			    __func__, get_test_case_str(td), ++run);
+	td->test_result = TEST_PASSED;
+
+	return 0;
+err:
+	test_pr_err("%s: %s, run# %03d, FAILED",
+		    __func__, get_test_case_str(td), ++run);
+	td->test_result = TEST_FAILED;
+	return res;
+}
+
+/* Create and queue the required requests according to the test case */
+static int prepare_test(struct test_data *td)
+{
+	int ret = 0;
+
+	if (td->test_info.prepare_test_fn) {
+		ret = td->test_info.prepare_test_fn(td);
+		return ret;
+	}
+
+	return 0;
+}
+
+/* Run the test */
+static int run_test(struct test_data *td)
+{
+	int ret = 0;
+
+	if (td->test_info.run_test_fn) {
+		ret = td->test_info.run_test_fn(td);
+		return ret;
+	}
+
+	/*
+	 * Set the next_req pointer to the first request in the test requests
+	 * list
+	 */
+	if (!list_empty(&td->test_queue))
+		td->next_req = list_entry(td->test_queue.next,
+					  struct test_request, queuelist);
+	__blk_run_queue(td->req_q);
+
+	return 0;
+}
+
+/* Free the allocated test requests, their requests and BIOs buffer */
+static void free_test_requests(struct test_data *td)
+{
+	struct test_request *test_rq;
+	while (!list_empty(&td->test_queue)) {
+		test_rq = list_entry(td->test_queue.next, struct test_request,
+				     queuelist);
+		list_del_init(&test_rq->queuelist);
+		blk_put_request(test_rq->rq);
+		kfree(test_rq->bios_buffer);
+		kfree(test_rq);
+	}
+}
+
+/*
+ * Do post test operations.
+ * Free the allocated test requests, their requests and BIOs buffer.
+ */
+static int post_test(struct test_data *td)
+{
+	int ret = 0;
+
+	if (td->test_info.post_test_fn)
+		ret = td->test_info.post_test_fn(td);
+
+	ptd->test_info.testcase = 0;
+	ptd->test_state = TEST_IDLE;
+
+	free_test_requests(td);
+
+	return ret;
+}
+
+/*
+ * The timer verifies that the test will be completed even if we don't get
+ * the completion callback for all the requests.
+ */
+static void test_timeout_handler(unsigned long data)
+{
+	struct test_data *td = (struct test_data *)data;
+
+	test_pr_info("%s: TIMEOUT timer expired", __func__);
+	td->test_state = TEST_COMPLETED;
+	wake_up(&td->wait_q);
+	return;
+}
+
+static unsigned int get_timeout_msec(struct test_data *td)
+{
+	if (td->test_info.timeout_msec)
+		return td->test_info.timeout_msec;
+	else
+		return TIMEOUT_TIMER_MS;
+}
+
+/**
+ * test_iosched_start_test() - Prepares and runs the test.
+ * @t_info:	the current test testcase and callbacks
+ *		functions
+ *
+ * The function also checks the test result upon test completion
+ */
+int test_iosched_start_test(struct test_info *t_info)
+{
+	int ret = 0;
+	unsigned timeout_msec;
+	int counter = 0;
+	char *test_name = NULL;
+
+	if (!ptd)
+		return -ENODEV;
+
+	if (!t_info) {
+		ptd->test_result = TEST_FAILED;
+		return -EINVAL;
+	}
+
+	do {
+		if (ptd->ignore_round)
+			/*
+			 * We ignored the last run due to FS write requests.
+			 * Sleep to allow those requests to be issued
+			 */
+			msleep(2000);
+
+		spin_lock(&ptd->lock);
+
+		if (ptd->test_state != TEST_IDLE) {
+			test_pr_info(
+				"%s: Another test is running, try again later",
+				__func__);
+			return -EINVAL;
+		}
+
+		if (ptd->start_sector == 0) {
+			test_pr_err("%s: Invalid start sector", __func__);
+			ptd->test_result = TEST_FAILED;
+			spin_unlock(&ptd->lock);
+			return -EINVAL;
+		}
+
+		memcpy(&ptd->test_info, t_info, sizeof(struct test_info));
+
+		ptd->next_req = NULL;
+		ptd->test_result = TEST_NO_RESULT;
+		ptd->num_of_write_bios = 0;
+
+		ptd->unique_next_req_id = UNIQUE_START_REQ_ID;
+		ptd->wr_rd_next_req_id = WR_RD_START_REQ_ID;
+
+		ptd->ignore_round = false;
+		ptd->fs_wr_reqs_during_test = false;
+
+		ptd->test_state = TEST_RUNNING;
+
+		spin_unlock(&ptd->lock);
+
+		timeout_msec = get_timeout_msec(ptd);
+		mod_timer(&ptd->timeout_timer, jiffies +
+			  msecs_to_jiffies(timeout_msec));
+
+		if (ptd->test_info.get_test_case_str_fn)
+			test_name = ptd->test_info.get_test_case_str_fn(ptd);
+		else
+			test_name = "Unknown testcase";
+		test_pr_info("%s: Starting test %s\n", __func__, test_name);
+
+		ret = prepare_test(ptd);
+		if (ret) {
+			test_pr_err("%s: failed to prepare the test\n",
+				    __func__);
+			goto error;
+		}
+
+		ret = run_test(ptd);
+		if (ret) {
+			test_pr_err("%s: failed to run the test\n", __func__);
+			goto error;
+		}
+
+		test_pr_info("%s: Waiting for the test completion", __func__);
+
+		wait_event(ptd->wait_q, ptd->test_state == TEST_COMPLETED);
+		del_timer_sync(&ptd->timeout_timer);
+
+		ret = check_test_result(ptd);
+		if (ret) {
+			test_pr_err("%s: check_test_result failed\n",
+				    __func__);
+			goto error;
+		}
+
+		ret = post_test(ptd);
+		if (ret) {
+			test_pr_err("%s: post_test failed\n", __func__);
+			goto error;
+		}
+
+		/*
+		 * Wakeup the queue thread to fetch FS requests that might got
+		 * postponded due to the test
+		 */
+		__blk_run_queue(ptd->req_q);
+
+		if (ptd->ignore_round)
+			test_pr_info(
+			"%s: Round canceled (Got wr reqs in the middle)",
+			__func__);
+
+		if (++counter == TEST_MAX_TESTCASE_ROUNDS) {
+			test_pr_info("%s: Too many rounds, did not succeed...",
+			     __func__);
+			ptd->test_result = TEST_FAILED;
+		}
+
+	} while ((ptd->ignore_round) && (counter < TEST_MAX_TESTCASE_ROUNDS));
+
+	if (ptd->test_result == TEST_PASSED)
+		return 0;
+	else
+		return -EINVAL;
+
+error:
+	ptd->test_result = TEST_FAILED;
+	ptd->test_info.testcase = 0;
+	post_test(ptd);
+	return ret;
+}
+EXPORT_SYMBOL(test_iosched_start_test);
+
+/**
+ * test_iosched_register() - register a block device test
+ * utility.
+ * @bdt:	the block device test type to register
+ */
+void test_iosched_register(struct blk_dev_test_type *bdt)
+{
+	spin_lock(&blk_dev_test_list_lock);
+	list_add_tail(&bdt->list, &blk_dev_test_list);
+	spin_unlock(&blk_dev_test_list_lock);
+}
+EXPORT_SYMBOL_GPL(test_iosched_register);
+
+/**
+ * test_iosched_unregister() - unregister a block device test
+ * utility.
+ * @bdt:	the block device test type to unregister
+ */
+void test_iosched_unregister(struct blk_dev_test_type *bdt)
+{
+	spin_lock(&blk_dev_test_list_lock);
+	list_del_init(&bdt->list);
+	spin_unlock(&blk_dev_test_list_lock);
+}
+EXPORT_SYMBOL_GPL(test_iosched_unregister);
+
+/**
+ * test_iosched_set_test_result() - Set the test
+ * result(PASS/FAIL)
+ * @test_result:	the test result
+ */
+void test_iosched_set_test_result(int test_result)
+{
+	if (!ptd)
+		return;
+
+	ptd->test_result = test_result;
+}
+EXPORT_SYMBOL(test_iosched_set_test_result);
+
+
+/**
+ * test_iosched_set_ignore_round() - Set the ignore_round flag
+ * @ignore_round:	A flag to indicate if this test round
+ * should be ignored and re-run
+ */
+void test_iosched_set_ignore_round(bool ignore_round)
+{
+	if (!ptd)
+		return;
+
+	ptd->ignore_round = ignore_round;
+}
+EXPORT_SYMBOL(test_iosched_set_ignore_round);
+
+/**
+ * test_iosched_get_debugfs_tests_root() - returns the root
+ * debugfs directory for the test_iosched tests
+ */
+struct dentry *test_iosched_get_debugfs_tests_root(void)
+{
+	if (!ptd)
+		return NULL;
+
+	return ptd->debug.debug_tests_root;
+}
+EXPORT_SYMBOL(test_iosched_get_debugfs_tests_root);
+
+/**
+ * test_iosched_get_debugfs_utils_root() - returns the root
+ * debugfs directory for the test_iosched utils
+ */
+struct dentry *test_iosched_get_debugfs_utils_root(void)
+{
+	if (!ptd)
+		return NULL;
+
+	return ptd->debug.debug_utils_root;
+}
+EXPORT_SYMBOL(test_iosched_get_debugfs_utils_root);
+
+static int test_debugfs_init(struct test_data *td)
+{
+	td->debug.debug_root = debugfs_create_dir("test-iosched", NULL);
+	if (!td->debug.debug_root)
+		return -ENOENT;
+
+	td->debug.debug_tests_root = debugfs_create_dir("tests",
+							td->debug.debug_root);
+	if (!td->debug.debug_tests_root)
+		goto err;
+
+	td->debug.debug_utils_root = debugfs_create_dir("utils",
+							td->debug.debug_root);
+	if (!td->debug.debug_utils_root)
+		goto err;
+
+	td->debug.debug_test_result = debugfs_create_u32(
+					"test_result",
+					S_IRUGO | S_IWUGO,
+					td->debug.debug_utils_root,
+					&td->test_result);
+	if (!td->debug.debug_test_result)
+		goto err;
+
+	td->debug.start_sector = debugfs_create_u32(
+					"start_sector",
+					S_IRUGO | S_IWUGO,
+					td->debug.debug_utils_root,
+					&td->start_sector);
+	if (!td->debug.start_sector)
+		goto err;
+
+	return 0;
+
+err:
+	debugfs_remove_recursive(td->debug.debug_root);
+	return -ENOENT;
+}
+
+static void test_debugfs_cleanup(struct test_data *td)
+{
+	debugfs_remove_recursive(td->debug.debug_root);
+}
+
+static void print_req(struct request *req)
+{
+	struct bio *bio;
+	struct test_request *test_rq;
+
+	if (!req)
+		return;
+
+	test_rq = (struct test_request *)req->elv.priv[0];
+
+	if (test_rq) {
+		test_pr_debug("%s: Dispatch request %d: __sector=0x%lx",
+		       __func__, test_rq->req_id, (unsigned long)req->__sector);
+		test_pr_debug("%s: nr_phys_segments=%d, num_of_sectors=%d",
+		       __func__, req->nr_phys_segments, blk_rq_sectors(req));
+		bio = req->bio;
+		test_pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
+			      __func__, bio->bi_size,
+			      (unsigned long)bio->bi_sector);
+		while ((bio = bio->bi_next) != NULL) {
+			test_pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
+				      __func__, bio->bi_size,
+				      (unsigned long)bio->bi_sector);
+		}
+	}
+}
+
+static void test_merged_requests(struct request_queue *q,
+			 struct request *rq, struct request *next)
+{
+	list_del_init(&next->queuelist);
+}
+
+/*
+ * Dispatch a test request in case there is a running test Otherwise, dispatch
+ * a request that was queued by the FS to keep the card functional.
+ */
+static int test_dispatch_requests(struct request_queue *q, int force)
+{
+	struct test_data *td = q->elevator->elevator_data;
+	struct request *rq = NULL;
+
+	switch (td->test_state) {
+	case TEST_IDLE:
+		if (!list_empty(&td->queue)) {
+			rq = list_entry(td->queue.next, struct request,
+					queuelist);
+			list_del_init(&rq->queuelist);
+			elv_dispatch_sort(q, rq);
+			return 1;
+		}
+		break;
+	case TEST_RUNNING:
+		if (td->next_req) {
+			rq = td->next_req->rq;
+			td->next_req =
+				latter_test_request(td->req_q, td->next_req);
+			if (!rq)
+				return 0;
+			print_req(rq);
+			elv_dispatch_sort(q, rq);
+			return 1;
+		}
+		break;
+	case TEST_COMPLETED:
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static void test_add_request(struct request_queue *q, struct request *rq)
+{
+	struct test_data *td = q->elevator->elevator_data;
+
+	list_add_tail(&rq->queuelist, &td->queue);
+
+	/*
+	 * The write requests can be followed by a FLUSH request that might
+	 * cause unexpected results of the test.
+	 */
+	if ((rq_data_dir(rq) == WRITE) && (td->test_state == TEST_RUNNING)) {
+		test_pr_debug("%s: got WRITE req in the middle of the test",
+			__func__);
+		td->fs_wr_reqs_during_test = true;
+	}
+}
+
+static struct request *
+test_former_request(struct request_queue *q, struct request *rq)
+{
+	struct test_data *td = q->elevator->elevator_data;
+
+	if (rq->queuelist.prev == &td->queue)
+		return NULL;
+	return list_entry(rq->queuelist.prev, struct request, queuelist);
+}
+
+static struct request *
+test_latter_request(struct request_queue *q, struct request *rq)
+{
+	struct test_data *td = q->elevator->elevator_data;
+
+	if (rq->queuelist.next == &td->queue)
+		return NULL;
+	return list_entry(rq->queuelist.next, struct request, queuelist);
+}
+
+static void *test_init_queue(struct request_queue *q)
+{
+	struct blk_dev_test_type *__bdt;
+
+	ptd = kmalloc_node(sizeof(struct test_data), GFP_KERNEL,
+			     q->node);
+	if (!ptd) {
+		test_pr_err("%s: failed to allocate test data", __func__);
+		return NULL;
+	}
+	memset((void *)ptd, 0, sizeof(struct test_data));
+	INIT_LIST_HEAD(&ptd->queue);
+	INIT_LIST_HEAD(&ptd->test_queue);
+	init_waitqueue_head(&ptd->wait_q);
+	ptd->req_q = q;
+
+	setup_timer(&ptd->timeout_timer, test_timeout_handler,
+		    (unsigned long)ptd);
+
+	spin_lock_init(&ptd->lock);
+
+	if (test_debugfs_init(ptd)) {
+		test_pr_err("%s: Failed to create debugfs files", __func__);
+		return NULL;
+	}
+
+	list_for_each_entry(__bdt, &blk_dev_test_list, list)
+		__bdt->init_fn();
+
+	return ptd;
+}
+
+static void test_exit_queue(struct elevator_queue *e)
+{
+	struct test_data *td = e->elevator_data;
+	struct blk_dev_test_type *__bdt;
+
+	BUG_ON(!list_empty(&td->queue));
+
+	list_for_each_entry(__bdt, &blk_dev_test_list, list)
+		__bdt->exit_fn();
+
+	test_debugfs_cleanup(td);
+
+	kfree(td);
+}
+
+static struct elevator_type elevator_test_iosched = {
+	.ops = {
+		.elevator_merge_req_fn = test_merged_requests,
+		.elevator_dispatch_fn = test_dispatch_requests,
+		.elevator_add_req_fn = test_add_request,
+		.elevator_former_req_fn = test_former_request,
+		.elevator_latter_req_fn = test_latter_request,
+		.elevator_init_fn = test_init_queue,
+		.elevator_exit_fn = test_exit_queue,
+	},
+	.elevator_name = "test-iosched",
+	.elevator_owner = THIS_MODULE,
+};
+
+static int __init test_init(void)
+{
+	elv_register(&elevator_test_iosched);
+
+	return 0;
+}
+
+static void __exit test_exit(void)
+{
+	elv_unregister(&elevator_test_iosched);
+}
+
+module_init(test_init);
+module_exit(test_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Test IO scheduler");
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index fdff32e..826ba9a 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -122,7 +122,7 @@
 }
 EXPORT_SYMBOL(clkdev_add);
 
-void __init clkdev_add_table(struct clk_lookup *cl, size_t num)
+void clkdev_add_table(struct clk_lookup *cl, size_t num)
 {
 	mutex_lock(&clocks_mutex);
 	while (num--) {
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 0dcf1a4..fdbc36f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -555,20 +555,24 @@
 	  This option enables support for on-chip GPIO found on Qualcomm PM8xxx
 	  PMICs through RPC.
 
-config GPIO_QPNP
+config GPIO_QPNP_PIN
 	depends on ARCH_MSMCOPPER
+	depends on SPMI
 	depends on OF_SPMI
 	depends on MSM_QPNP_INT
-	tristate "Qualcomm QPNP GPIO support"
+	tristate "Qualcomm QPNP gpio support"
 	help
 	  Say 'y' here to include support for the Qualcomm QPNP gpio
-	  support. QPNP is a SPMI based PMIC implementation.
+	  driver. This driver supports Device Tree and allows a
+	  device_node to be registered as a gpio-controller. It
+	  does not handle gpio interrupts directly. That work is handled
+	  by CONFIG_MSM_QPNP_INT.
 
-config GPIO_QPNP_DEBUG
-	depends on GPIO_QPNP
+config GPIO_QPNP_PIN_DEBUG
+	depends on GPIO_QPNP_PIN
 	depends on DEBUG_FS
 	bool "Qualcomm QPNP GPIO debug support"
 	help
 	  Say 'y' here to include debug support for the Qualcomm
-	  QPNP gpio support
+	  QPNP gpio driver.
 endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index d15b628..405e498 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -48,7 +48,7 @@
 obj-$(CONFIG_GPIO_PM8XXX_MPP) 	+= pm8xxx-mpp.o
 obj-$(CONFIG_GPIO_PM8XXX_RPC)	+= gpio-pm8xxx-rpc.o
 obj-$(CONFIG_GPIO_PXA)		+= gpio-pxa.o
-obj-$(CONFIG_GPIO_QPNP)		+= qpnp-gpio.o
+obj-$(CONFIG_GPIO_QPNP_PIN)	+= qpnp-pin.o
 obj-$(CONFIG_GPIO_RDC321X)	+= gpio-rdc321x.o
 obj-$(CONFIG_PLAT_SAMSUNG)	+= gpio-samsung.o
 obj-$(CONFIG_ARCH_SA1100)	+= gpio-sa1100.o
diff --git a/drivers/gpio/gpio-msm-common.c b/drivers/gpio/gpio-msm-common.c
index 9a9a783..5539950 100644
--- a/drivers/gpio/gpio-msm-common.c
+++ b/drivers/gpio/gpio-msm-common.c
@@ -100,7 +100,7 @@
 	DECLARE_BITMAP(enabled_irqs, NR_MSM_GPIOS);
 	DECLARE_BITMAP(wake_irqs, NR_MSM_GPIOS);
 	DECLARE_BITMAP(dual_edge_irqs, NR_MSM_GPIOS);
-	struct irq_domain domain;
+	struct irq_domain *domain;
 };
 
 static DEFINE_SPINLOCK(tlmm_lock);
@@ -152,15 +152,14 @@
 static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
 {
 	struct msm_gpio_dev *g_dev = to_msm_gpio_dev(chip);
-	struct irq_domain *domain = &g_dev->domain;
-	return domain->irq_base + (offset - chip->base);
+	struct irq_domain *domain = g_dev->domain;
+	return irq_linear_revmap(domain, offset - chip->base);
 }
 
 static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq)
 {
-	struct msm_gpio_dev *g_dev = to_msm_gpio_dev(chip);
-	struct irq_domain *domain = &g_dev->domain;
-	return irq - domain->irq_base;
+	struct irq_data *irq_data = irq_get_irq_data(irq);
+	return irq_data->hwirq;
 }
 #else
 static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -391,6 +390,7 @@
  */
 static struct lock_class_key msm_gpio_lock_class;
 
+/* TODO: This should be a real platform_driver */
 static int __devinit msm_gpio_probe(void)
 {
 	int i, irq, ret;
@@ -573,12 +573,12 @@
 EXPORT_SYMBOL(msm_gpio_install_direct_irq);
 
 #ifdef CONFIG_OF
-static int msm_gpio_domain_dt_translate(struct irq_domain *d,
-					struct device_node *controller,
-					const u32 *intspec,
-					unsigned int intsize,
-					unsigned long *out_hwirq,
-					unsigned int *out_type)
+static int msm_gpio_irq_domain_xlate(struct irq_domain *d,
+				     struct device_node *controller,
+				     const u32 *intspec,
+				     unsigned int intsize,
+				     unsigned long *out_hwirq,
+				     unsigned int *out_type)
 {
 	if (d->of_node != controller)
 		return -EINVAL;
@@ -593,32 +593,32 @@
 	return 0;
 }
 
+/*
+ * TODO: this really should be doing all the things that msm_gpio_probe() does,
+ * but since the msm_gpio_probe is called unconditionally for DT and non-DT
+ * configs, we can't duplicate it here. This should be fixed.
+ */
+int msm_gpio_irq_domain_map(struct irq_domain *d, unsigned int irq,
+			  irq_hw_number_t hwirq)
+{
+	return 0;
+}
+
 static struct irq_domain_ops msm_gpio_irq_domain_ops = {
-	.dt_translate = msm_gpio_domain_dt_translate,
+	.xlate = msm_gpio_irq_domain_xlate,
+	.map = msm_gpio_irq_domain_map,
 };
 
 int __init msm_gpio_of_init(struct device_node *node,
 			    struct device_node *parent)
 {
-	struct irq_domain *domain = &msm_gpio.domain;
-	int start;
-
-	start = irq_domain_find_free_range(0, NR_MSM_GPIOS);
-	domain->irq_base = irq_alloc_descs(start, 0, NR_MSM_GPIOS,
-							numa_node_id());
-	if (IS_ERR_VALUE(domain->irq_base)) {
-		WARN(1, "Cannot allocate irq_descs @ IRQ%d\n", start);
-		return domain->irq_base;
+	msm_gpio.domain = irq_domain_add_linear(node, NR_MSM_GPIOS,
+			&msm_gpio_irq_domain_ops, &msm_gpio);
+	if (!msm_gpio.domain) {
+		WARN(1, "Cannot allocate irq_domain\n");
+		return -ENOMEM;
 	}
 
-	domain->nr_irq = NR_MSM_GPIOS;
-	domain->of_node = of_node_get(node);
-	domain->priv = &msm_gpio;
-	domain->ops = &msm_gpio_irq_domain_ops;
-	irq_domain_add(domain);
-	msm_gpio.gpio_chip.of_node = of_node_get(node);
-	pr_debug("%s: irq_base = %u\n", __func__, domain->irq_base);
-
 	return 0;
 }
 #endif
diff --git a/drivers/gpio/qpnp-gpio.c b/drivers/gpio/qpnp-gpio.c
deleted file mode 100644
index 97859e5..0000000
--- a/drivers/gpio/qpnp-gpio.c
+++ /dev/null
@@ -1,1091 +0,0 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
-#include <linux/interrupt.h>
-#include <linux/types.h>
-#include <linux/spmi.h>
-#include <linux/platform_device.h>
-#include <linux/debugfs.h>
-#include <linux/gpio.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/of_irq.h>
-#include <linux/export.h>
-#include <linux/module.h>
-#include <linux/qpnp/gpio.h>
-#include <linux/export.h>
-
-#include <mach/qpnp.h>
-
-#define Q_REG_ADDR(q_spec, reg_index)	\
-		((q_spec)->offset + reg_index)
-
-#define Q_REG_STATUS1			0x8
-#define Q_NUM_CTL_REGS			7
-
-/* type registers base address offsets */
-#define Q_REG_TYPE			0x10
-#define Q_REG_SUBTYPE			0x11
-
-/* gpio peripheral type and subtype values */
-#define Q_GPIO_TYPE			0x10
-#define Q_GPIO_SUBTYPE_GPIO_4CH		0x1
-#define Q_GPIO_SUBTYPE_GPIOC_4CH	0x5
-#define Q_GPIO_SUBTYPE_GPIO_8CH		0x9
-#define Q_GPIO_SUBTYPE_GPIOC_8CH	0xD
-
-/* control register base address offsets */
-#define Q_REG_MODE_CTL			0x40
-#define Q_REG_DIG_PULL_CTL		0x42
-#define Q_REG_DIG_IN_CTL		0x43
-#define Q_REG_DIG_VIN_CTL		0x44
-#define Q_REG_DIG_OUT_CTL		0x45
-#define Q_REG_EN_CTL			0x46
-
-/* control register regs array indices */
-#define Q_REG_I_MODE_CTL		0
-#define Q_REG_I_DIG_PULL_CTL		2
-#define Q_REG_I_DIG_IN_CTL		3
-#define Q_REG_I_DIG_VIN_CTL		4
-#define Q_REG_I_DIG_OUT_CTL		5
-#define Q_REG_I_EN_CTL			6
-
-/* control reg: mode */
-#define Q_REG_OUT_INVERT_SHIFT		0
-#define Q_REG_OUT_INVERT_MASK		0x1
-#define Q_REG_SRC_SEL_SHIFT		1
-#define Q_REG_SRC_SEL_MASK		0xE
-#define Q_REG_MODE_SEL_SHIFT		4
-#define Q_REG_MODE_SEL_MASK		0x70
-
-/* control reg: dig_vin */
-#define Q_REG_VIN_SHIFT			0
-#define Q_REG_VIN_MASK			0x7
-
-/* control reg: dig_pull */
-#define Q_REG_PULL_SHIFT		0
-#define Q_REG_PULL_MASK			0x7
-
-/* control reg: dig_out */
-#define Q_REG_OUT_STRENGTH_SHIFT	0
-#define Q_REG_OUT_STRENGTH_MASK		0x3
-#define Q_REG_OUT_TYPE_SHIFT		4
-#define Q_REG_OUT_TYPE_MASK		0x30
-
-/* control reg: en */
-#define Q_REG_MASTER_EN_SHIFT		7
-#define Q_REG_MASTER_EN_MASK		0x80
-
-enum qpnp_gpio_param_type {
-	Q_GPIO_CFG_DIRECTION,
-	Q_GPIO_CFG_OUTPUT_TYPE,
-	Q_GPIO_CFG_INVERT,
-	Q_GPIO_CFG_PULL,
-	Q_GPIO_CFG_VIN_SEL,
-	Q_GPIO_CFG_OUT_STRENGTH,
-	Q_GPIO_CFG_SRC_SELECT,
-	Q_GPIO_CFG_MASTER_EN,
-	Q_GPIO_CFG_INVALID,
-};
-
-#define Q_NUM_PARAMS			Q_GPIO_CFG_INVALID
-
-/* param error checking */
-#define QPNP_GPIO_DIR_INVALID		3
-#define QPNP_GPIO_INVERT_INVALID	2
-#define QPNP_GPIO_OUT_BUF_INVALID	3
-#define QPNP_GPIO_VIN_INVALID		8
-#define QPNP_GPIO_PULL_INVALID		6
-#define QPNP_GPIO_OUT_STRENGTH_INVALID	4
-#define QPNP_GPIO_SRC_INVALID		8
-#define QPNP_GPIO_MASTER_INVALID	2
-
-struct qpnp_gpio_spec {
-	uint8_t slave;			/* 0-15 */
-	uint16_t offset;		/* 0-255 */
-	uint32_t gpio_chip_idx;		/* offset from gpio_chip base */
-	uint32_t pmic_gpio;		/* PMIC gpio number */
-	int irq;			/* logical IRQ number */
-	u8 regs[Q_NUM_CTL_REGS];	/* Control regs */
-	u8 type;			/* peripheral type */
-	u8 subtype;			/* peripheral subtype */
-	struct device_node *node;
-	enum qpnp_gpio_param_type params[Q_NUM_PARAMS];
-	struct qpnp_gpio_chip *q_chip;
-};
-
-struct qpnp_gpio_chip {
-	struct gpio_chip	gpio_chip;
-	struct spmi_device	*spmi;
-	struct qpnp_gpio_spec	**pmic_gpios;
-	struct qpnp_gpio_spec	**chip_gpios;
-	uint32_t		pmic_gpio_lowest;
-	uint32_t		pmic_gpio_highest;
-	struct device_node	*int_ctrl;
-	struct list_head	chip_list;
-	struct dentry		*dfs_dir;
-};
-
-static LIST_HEAD(qpnp_gpio_chips);
-static DEFINE_MUTEX(qpnp_gpio_chips_lock);
-
-static inline void qpnp_pmic_gpio_set_spec(struct qpnp_gpio_chip *q_chip,
-					      uint32_t pmic_gpio,
-					      struct qpnp_gpio_spec *spec)
-{
-	q_chip->pmic_gpios[pmic_gpio - q_chip->pmic_gpio_lowest] = spec;
-}
-
-static inline struct qpnp_gpio_spec *qpnp_pmic_gpio_get_spec(
-						struct qpnp_gpio_chip *q_chip,
-						uint32_t pmic_gpio)
-{
-	if (pmic_gpio < q_chip->pmic_gpio_lowest ||
-	    pmic_gpio > q_chip->pmic_gpio_highest)
-		return NULL;
-
-	return q_chip->pmic_gpios[pmic_gpio - q_chip->pmic_gpio_lowest];
-}
-
-static inline struct qpnp_gpio_spec *qpnp_chip_gpio_get_spec(
-						struct qpnp_gpio_chip *q_chip,
-						uint32_t chip_gpio)
-{
-	if (chip_gpio > q_chip->gpio_chip.ngpio)
-		return NULL;
-
-	return q_chip->chip_gpios[chip_gpio];
-}
-
-static inline void qpnp_chip_gpio_set_spec(struct qpnp_gpio_chip *q_chip,
-					      uint32_t chip_gpio,
-					      struct qpnp_gpio_spec *spec)
-{
-	q_chip->chip_gpios[chip_gpio] = spec;
-}
-
-static int qpnp_gpio_check_config(struct qpnp_gpio_spec *q_spec,
-				  struct qpnp_gpio_cfg *param)
-{
-	int gpio = q_spec->pmic_gpio;
-
-	if (param->direction >= QPNP_GPIO_DIR_INVALID)
-		pr_err("invalid direction for gpio %d\n", gpio);
-	else if (param->invert >= QPNP_GPIO_INVERT_INVALID)
-		pr_err("invalid invert polarity for gpio %d\n", gpio);
-	else if (param->src_select >= QPNP_GPIO_SRC_INVALID)
-		pr_err("invalid source select for gpio %d\n", gpio);
-	else if (param->out_strength >= QPNP_GPIO_OUT_STRENGTH_INVALID ||
-		 param->out_strength == 0)
-		pr_err("invalid out strength for gpio %d\n", gpio);
-	else if (param->output_type >= QPNP_GPIO_OUT_BUF_INVALID)
-		pr_err("invalid out type for gpio %d\n", gpio);
-	else if ((param->output_type == QPNP_GPIO_OUT_BUF_OPEN_DRAIN_NMOS ||
-		 param->output_type == QPNP_GPIO_OUT_BUF_OPEN_DRAIN_PMOS) &&
-		 (q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_4CH ||
-		 (q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_8CH)))
-		pr_err("invalid out type for gpio %d\n"
-		       "gpioc does not support open-drain\n", gpio);
-	else if (param->vin_sel >= QPNP_GPIO_VIN_INVALID)
-		pr_err("invalid vin select value for gpio %d\n", gpio);
-	else if (param->pull >= QPNP_GPIO_PULL_INVALID)
-		pr_err("invalid pull value for gpio %d\n", gpio);
-	else if (param->master_en >= QPNP_GPIO_MASTER_INVALID)
-		pr_err("invalid master_en value for gpio %d\n", gpio);
-	else
-		return 0;
-
-	return -EINVAL;
-}
-
-static inline u8 q_reg_get(u8 *reg, int shift, int mask)
-{
-	return (*reg & mask) >> shift;
-}
-
-static inline void q_reg_set(u8 *reg, int shift, int mask, int value)
-{
-	*reg |= (value << shift) & mask;
-}
-
-static inline void q_reg_clr_set(u8 *reg, int shift, int mask, int value)
-{
-	*reg &= ~mask;
-	*reg |= (value << shift) & mask;
-}
-
-static int qpnp_gpio_cache_regs(struct qpnp_gpio_chip *q_chip,
-				struct qpnp_gpio_spec *q_spec)
-{
-	int rc;
-	struct device *dev = &q_chip->spmi->dev;
-
-	rc = spmi_ext_register_readl(q_chip->spmi->ctrl, q_spec->slave,
-				     Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
-				     &q_spec->regs[Q_REG_I_MODE_CTL],
-				     Q_NUM_CTL_REGS);
-	if (rc)
-		dev_err(dev, "%s: unable to read control regs\n", __func__);
-
-	return rc;
-}
-
-static int _qpnp_gpio_config(struct qpnp_gpio_chip *q_chip,
-			     struct qpnp_gpio_spec *q_spec,
-			     struct qpnp_gpio_cfg *param)
-{
-	struct device *dev = &q_chip->spmi->dev;
-	int rc;
-
-	rc = qpnp_gpio_check_config(q_spec, param);
-	if (rc)
-		goto gpio_cfg;
-
-	/* set direction */
-	q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
-			  Q_REG_MODE_SEL_SHIFT, Q_REG_MODE_SEL_MASK,
-			  param->direction);
-
-	/* output specific configuration */
-	q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
-			  Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK,
-			  param->invert);
-	q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
-			  Q_REG_SRC_SEL_SHIFT, Q_REG_SRC_SEL_MASK,
-			  param->src_select);
-	q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
-			  Q_REG_OUT_STRENGTH_SHIFT, Q_REG_OUT_STRENGTH_MASK,
-			  param->out_strength);
-	q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
-			  Q_REG_OUT_TYPE_SHIFT, Q_REG_OUT_TYPE_MASK,
-			  param->output_type);
-
-	/* config applicable for both input / output */
-	q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_VIN_CTL],
-			  Q_REG_VIN_SHIFT, Q_REG_VIN_MASK,
-			  param->vin_sel);
-	q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_PULL_CTL],
-			  Q_REG_PULL_SHIFT, Q_REG_PULL_MASK,
-			  param->pull);
-	q_reg_clr_set(&q_spec->regs[Q_REG_I_EN_CTL],
-			  Q_REG_MASTER_EN_SHIFT, Q_REG_MASTER_EN_MASK,
-			  param->master_en);
-
-	rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
-			      Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
-			      &q_spec->regs[Q_REG_I_MODE_CTL], Q_NUM_CTL_REGS);
-	if (rc) {
-		dev_err(&q_chip->spmi->dev, "%s: unable to write master"
-						" enable\n", __func__);
-		goto gpio_cfg;
-	}
-
-	return 0;
-
-gpio_cfg:
-	dev_err(dev, "%s: unable to set default config for"
-		     " pmic gpio %d\n", __func__, q_spec->pmic_gpio);
-
-	return rc;
-}
-
-int qpnp_gpio_config(int gpio, struct qpnp_gpio_cfg *param)
-{
-	int rc, chip_offset;
-	struct qpnp_gpio_chip *q_chip;
-	struct qpnp_gpio_spec *q_spec = NULL;
-	struct gpio_chip *gpio_chip;
-
-	if (param == NULL)
-		return -EINVAL;
-
-	mutex_lock(&qpnp_gpio_chips_lock);
-	list_for_each_entry(q_chip, &qpnp_gpio_chips, chip_list) {
-		gpio_chip = &q_chip->gpio_chip;
-		if (gpio >= gpio_chip->base
-				&& gpio < gpio_chip->base + gpio_chip->ngpio) {
-			chip_offset = gpio - gpio_chip->base;
-			q_spec = qpnp_chip_gpio_get_spec(q_chip, chip_offset);
-			if (WARN_ON(!q_spec)) {
-				mutex_unlock(&qpnp_gpio_chips_lock);
-				return -ENODEV;
-			}
-			break;
-		}
-	}
-	mutex_unlock(&qpnp_gpio_chips_lock);
-
-	rc = _qpnp_gpio_config(q_chip, q_spec, param);
-
-	return rc;
-}
-EXPORT_SYMBOL(qpnp_gpio_config);
-
-int qpnp_gpio_map_gpio(uint16_t slave_id, uint32_t pmic_gpio)
-{
-	struct qpnp_gpio_chip *q_chip;
-	struct qpnp_gpio_spec *q_spec = NULL;
-
-	mutex_lock(&qpnp_gpio_chips_lock);
-	list_for_each_entry(q_chip, &qpnp_gpio_chips, chip_list) {
-		if (q_chip->spmi->sid != slave_id)
-			continue;
-		if (q_chip->pmic_gpio_lowest <= pmic_gpio &&
-		    q_chip->pmic_gpio_highest >= pmic_gpio) {
-			q_spec = qpnp_pmic_gpio_get_spec(q_chip, pmic_gpio);
-			mutex_unlock(&qpnp_gpio_chips_lock);
-			if (WARN_ON(!q_spec))
-				return -ENODEV;
-			return q_chip->gpio_chip.base + q_spec->gpio_chip_idx;
-		}
-	}
-	mutex_unlock(&qpnp_gpio_chips_lock);
-	return -EINVAL;
-}
-EXPORT_SYMBOL(qpnp_gpio_map_gpio);
-
-static int qpnp_gpio_to_irq(struct gpio_chip *gpio_chip, unsigned offset)
-{
-	struct qpnp_gpio_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
-	struct qpnp_gpio_spec *q_spec;
-
-	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
-	if (!q_spec)
-		return -EINVAL;
-
-	return q_spec->irq;
-}
-
-static int qpnp_gpio_get(struct gpio_chip *gpio_chip, unsigned offset)
-{
-	int rc, ret_val;
-	struct qpnp_gpio_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
-	struct qpnp_gpio_spec *q_spec = NULL;
-	u8 buf[1];
-
-	if (WARN_ON(!q_chip))
-		return -ENODEV;
-
-	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
-	if (WARN_ON(!q_spec))
-		return -ENODEV;
-
-	/* gpio val is from RT status iff input is enabled */
-	if ((q_spec->regs[Q_REG_I_MODE_CTL] & Q_REG_MODE_SEL_MASK)
-						== QPNP_GPIO_DIR_IN) {
-		/* INT_RT_STS */
-		rc = spmi_ext_register_readl(q_chip->spmi->ctrl, q_spec->slave,
-				Q_REG_ADDR(q_spec, Q_REG_STATUS1),
-				&buf[0], 1);
-		return buf[0];
-
-	} else {
-		ret_val = (q_spec->regs[Q_REG_I_MODE_CTL] &
-			       Q_REG_OUT_INVERT_MASK) >> Q_REG_OUT_INVERT_SHIFT;
-		return ret_val;
-	}
-
-	return 0;
-}
-
-static int __qpnp_gpio_set(struct qpnp_gpio_chip *q_chip,
-			   struct qpnp_gpio_spec *q_spec, int value)
-{
-	int rc;
-
-	if (!q_chip || !q_spec)
-		return -EINVAL;
-
-	if (value)
-		q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
-			  Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK, 1);
-	else
-		q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
-			  Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK, 0);
-
-	rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
-			      Q_REG_ADDR(q_spec, Q_REG_I_MODE_CTL),
-			      &q_spec->regs[Q_REG_I_MODE_CTL], 1);
-	if (rc)
-		dev_err(&q_chip->spmi->dev, "%s: spmi write failed\n",
-								__func__);
-	return rc;
-}
-
-
-static void qpnp_gpio_set(struct gpio_chip *gpio_chip,
-		unsigned offset, int value)
-{
-	struct qpnp_gpio_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
-	struct qpnp_gpio_spec *q_spec;
-
-	if (WARN_ON(!q_chip))
-		return;
-
-	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
-	if (WARN_ON(!q_spec))
-		return;
-
-	__qpnp_gpio_set(q_chip, q_spec, value);
-}
-
-static int qpnp_gpio_set_direction(struct qpnp_gpio_chip *q_chip,
-				   struct qpnp_gpio_spec *q_spec, int direction)
-{
-	int rc;
-
-	if (!q_chip || !q_spec)
-		return -EINVAL;
-
-	if (direction >= QPNP_GPIO_DIR_INVALID) {
-		pr_err("invalid direction specification %d\n", direction);
-		return -EINVAL;
-	}
-
-	q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
-			Q_REG_MODE_SEL_SHIFT,
-			Q_REG_MODE_SEL_MASK,
-			direction);
-
-	rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
-			      Q_REG_ADDR(q_spec, Q_REG_I_MODE_CTL),
-			      &q_spec->regs[Q_REG_I_MODE_CTL], 1);
-	return rc;
-}
-
-static int qpnp_gpio_direction_input(struct gpio_chip *gpio_chip,
-		unsigned offset)
-{
-	struct qpnp_gpio_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
-	struct qpnp_gpio_spec *q_spec;
-
-	if (WARN_ON(!q_chip))
-		return -ENODEV;
-
-	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
-	if (WARN_ON(!q_spec))
-		return -ENODEV;
-
-	return qpnp_gpio_set_direction(q_chip, q_spec, QPNP_GPIO_DIR_IN);
-}
-
-static int qpnp_gpio_direction_output(struct gpio_chip *gpio_chip,
-		unsigned offset,
-		int val)
-{
-	int rc;
-	struct qpnp_gpio_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
-	struct qpnp_gpio_spec *q_spec;
-
-	if (WARN_ON(!q_chip))
-		return -ENODEV;
-
-	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
-	if (WARN_ON(!q_spec))
-		return -ENODEV;
-
-	rc = __qpnp_gpio_set(q_chip, q_spec, val);
-	if (rc)
-		return rc;
-
-	rc = qpnp_gpio_set_direction(q_chip, q_spec, QPNP_GPIO_DIR_OUT);
-
-	return rc;
-}
-
-static int qpnp_gpio_of_gpio_xlate(struct gpio_chip *gpio_chip,
-				   const struct of_phandle_args *gpio_spec,
-				   u32 *flags)
-{
-	struct qpnp_gpio_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
-	struct qpnp_gpio_spec *q_spec;
-
-	if (WARN_ON(gpio_chip->of_gpio_n_cells < 2)) {
-		pr_err("of_gpio_n_cells < 2\n");
-		return -EINVAL;
-	}
-
-	q_spec = qpnp_pmic_gpio_get_spec(q_chip, gpio_spec->args[0]);
-	if (!q_spec) {
-		pr_err("no such PMIC gpio %u in device topology\n",
-							gpio_spec->args[0]);
-		return -EINVAL;
-	}
-
-	if (flags)
-		*flags = gpio_spec->args[1];
-
-	return q_spec->gpio_chip_idx;
-}
-
-static int qpnp_gpio_apply_config(struct qpnp_gpio_chip *q_chip,
-				  struct qpnp_gpio_spec *q_spec)
-{
-	struct qpnp_gpio_cfg param;
-	struct device_node *node = q_spec->node;
-	int rc;
-
-	param.direction    = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
-				       Q_REG_MODE_SEL_SHIFT,
-				       Q_REG_MODE_SEL_MASK);
-	param.output_type  = q_reg_get(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
-				       Q_REG_OUT_TYPE_SHIFT,
-				       Q_REG_OUT_TYPE_MASK);
-	param.invert	   = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
-				       Q_REG_OUT_INVERT_MASK,
-				       Q_REG_OUT_INVERT_MASK);
-	param.pull	   = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
-				       Q_REG_PULL_SHIFT, Q_REG_PULL_MASK);
-	param.vin_sel	   = q_reg_get(&q_spec->regs[Q_REG_I_DIG_VIN_CTL],
-				       Q_REG_VIN_SHIFT, Q_REG_VIN_MASK);
-	param.out_strength = q_reg_get(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
-				       Q_REG_OUT_STRENGTH_SHIFT,
-				       Q_REG_OUT_STRENGTH_MASK);
-	param.src_select   = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
-				       Q_REG_SRC_SEL_SHIFT, Q_REG_SRC_SEL_MASK);
-	param.master_en    = q_reg_get(&q_spec->regs[Q_REG_I_EN_CTL],
-				       Q_REG_MASTER_EN_SHIFT,
-				       Q_REG_MASTER_EN_MASK);
-
-	of_property_read_u32(node, "qcom,direction",
-		&param.direction);
-	of_property_read_u32(node, "qcom,output-type",
-		&param.output_type);
-	of_property_read_u32(node, "qcom,invert",
-		&param.invert);
-	of_property_read_u32(node, "qcom,pull",
-		&param.pull);
-	of_property_read_u32(node, "qcom,vin-sel",
-		&param.vin_sel);
-	of_property_read_u32(node, "qcom,out-strength",
-		&param.out_strength);
-	of_property_read_u32(node, "qcom,src-select",
-		&param.src_select);
-	rc = of_property_read_u32(node, "qcom,master-en",
-		&param.master_en);
-
-	rc = _qpnp_gpio_config(q_chip, q_spec, &param);
-
-	return rc;
-}
-
-static int qpnp_gpio_free_chip(struct qpnp_gpio_chip *q_chip)
-{
-	struct spmi_device *spmi = q_chip->spmi;
-	int rc, i;
-
-	if (q_chip->chip_gpios)
-		for (i = 0; i < spmi->num_dev_node; i++)
-			kfree(q_chip->chip_gpios[i]);
-
-	mutex_lock(&qpnp_gpio_chips_lock);
-	list_del(&q_chip->chip_list);
-	mutex_unlock(&qpnp_gpio_chips_lock);
-	rc = gpiochip_remove(&q_chip->gpio_chip);
-	if (rc)
-		dev_err(&q_chip->spmi->dev, "%s: unable to remove gpio\n",
-				__func__);
-	kfree(q_chip->chip_gpios);
-	kfree(q_chip->pmic_gpios);
-	kfree(q_chip);
-	return rc;
-}
-
-#ifdef CONFIG_GPIO_QPNP_DEBUG
-struct qpnp_gpio_reg {
-	uint32_t addr;
-	uint32_t idx;
-	uint32_t shift;
-	uint32_t mask;
-};
-
-static struct dentry *driver_dfs_dir;
-
-static int qpnp_gpio_reg_attr(enum qpnp_gpio_param_type type,
-			     struct qpnp_gpio_reg *cfg)
-{
-	switch (type) {
-	case Q_GPIO_CFG_DIRECTION:
-		cfg->addr = Q_REG_MODE_CTL;
-		cfg->idx = Q_REG_I_MODE_CTL;
-		cfg->shift = Q_REG_MODE_SEL_SHIFT;
-		cfg->mask = Q_REG_MODE_SEL_MASK;
-		break;
-	case Q_GPIO_CFG_OUTPUT_TYPE:
-		cfg->addr = Q_REG_DIG_OUT_CTL;
-		cfg->idx = Q_REG_I_DIG_OUT_CTL;
-		cfg->shift = Q_REG_OUT_TYPE_SHIFT;
-		cfg->mask = Q_REG_OUT_TYPE_MASK;
-		break;
-	case Q_GPIO_CFG_INVERT:
-		cfg->addr = Q_REG_MODE_CTL;
-		cfg->idx = Q_REG_I_MODE_CTL;
-		cfg->shift = Q_REG_OUT_INVERT_SHIFT;
-		cfg->mask = Q_REG_OUT_INVERT_MASK;
-		break;
-	case Q_GPIO_CFG_PULL:
-		cfg->addr = Q_REG_DIG_PULL_CTL;
-		cfg->idx = Q_REG_I_DIG_PULL_CTL;
-		cfg->shift = Q_REG_PULL_SHIFT;
-		cfg->mask = Q_REG_PULL_MASK;
-		break;
-	case Q_GPIO_CFG_VIN_SEL:
-		cfg->addr = Q_REG_DIG_VIN_CTL;
-		cfg->idx = Q_REG_I_DIG_VIN_CTL;
-		cfg->shift = Q_REG_VIN_SHIFT;
-		cfg->mask = Q_REG_VIN_MASK;
-		break;
-	case Q_GPIO_CFG_OUT_STRENGTH:
-		cfg->addr = Q_REG_DIG_OUT_CTL;
-		cfg->idx = Q_REG_I_DIG_OUT_CTL;
-		cfg->shift = Q_REG_OUT_STRENGTH_SHIFT;
-		cfg->mask = Q_REG_OUT_STRENGTH_MASK;
-		break;
-	case Q_GPIO_CFG_SRC_SELECT:
-		cfg->addr = Q_REG_MODE_CTL;
-		cfg->idx = Q_REG_I_MODE_CTL;
-		cfg->shift = Q_REG_SRC_SEL_SHIFT;
-		cfg->mask = Q_REG_SRC_SEL_MASK;
-		break;
-	case Q_GPIO_CFG_MASTER_EN:
-		cfg->addr = Q_REG_EN_CTL;
-		cfg->idx = Q_REG_I_EN_CTL;
-		cfg->shift = Q_REG_MASTER_EN_SHIFT;
-		cfg->mask = Q_REG_MASTER_EN_MASK;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int qpnp_gpio_debugfs_get(void *data, u64 *val)
-{
-	enum qpnp_gpio_param_type *idx = data;
-	struct qpnp_gpio_spec *q_spec;
-	struct qpnp_gpio_reg cfg = {};
-	int rc;
-
-	rc = qpnp_gpio_reg_attr(*idx, &cfg);
-	if (rc)
-		return rc;
-	q_spec = container_of(idx, struct qpnp_gpio_spec, params[*idx]);
-	*val = q_reg_get(&q_spec->regs[cfg.idx], cfg.shift, cfg.mask);
-	return 0;
-}
-
-static int qpnp_gpio_check_reg_val(enum qpnp_gpio_param_type idx,
-				   struct qpnp_gpio_spec *q_spec,
-				   uint32_t val)
-{
-	switch (idx) {
-	case Q_GPIO_CFG_DIRECTION:
-		if (val >= QPNP_GPIO_DIR_INVALID)
-			return -EINVAL;
-		break;
-	case Q_GPIO_CFG_OUTPUT_TYPE:
-		if ((val >= QPNP_GPIO_OUT_BUF_INVALID) ||
-		   ((val == QPNP_GPIO_OUT_BUF_OPEN_DRAIN_NMOS ||
-		   val == QPNP_GPIO_OUT_BUF_OPEN_DRAIN_PMOS) &&
-		   (q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_4CH ||
-		   (q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_8CH))))
-			return -EINVAL;
-		break;
-	case Q_GPIO_CFG_INVERT:
-		if (val >= QPNP_GPIO_INVERT_INVALID)
-			return -EINVAL;
-		break;
-	case Q_GPIO_CFG_PULL:
-		if (val >= QPNP_GPIO_PULL_INVALID)
-			return -EINVAL;
-		break;
-	case Q_GPIO_CFG_VIN_SEL:
-		if (val >= QPNP_GPIO_VIN_INVALID)
-			return -EINVAL;
-		break;
-	case Q_GPIO_CFG_OUT_STRENGTH:
-		if (val >= QPNP_GPIO_OUT_STRENGTH_INVALID ||
-		    val == 0)
-			return -EINVAL;
-		break;
-	case Q_GPIO_CFG_SRC_SELECT:
-		if (val >= QPNP_GPIO_SRC_INVALID)
-			return -EINVAL;
-		break;
-	case Q_GPIO_CFG_MASTER_EN:
-		if (val >= QPNP_GPIO_MASTER_INVALID)
-			return -EINVAL;
-		break;
-	default:
-		pr_err("invalid param type %u specified\n", idx);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static int qpnp_gpio_debugfs_set(void *data, u64 val)
-{
-	enum qpnp_gpio_param_type *idx = data;
-	struct qpnp_gpio_spec *q_spec;
-	struct qpnp_gpio_chip *q_chip;
-	struct qpnp_gpio_reg cfg = {};
-	int rc;
-
-	q_spec = container_of(idx, struct qpnp_gpio_spec, params[*idx]);
-	q_chip = q_spec->q_chip;
-
-	rc = qpnp_gpio_check_reg_val(*idx, q_spec, val);
-	if (rc)
-		return rc;
-
-	rc = qpnp_gpio_reg_attr(*idx, &cfg);
-	if (rc)
-		return rc;
-	q_reg_clr_set(&q_spec->regs[cfg.idx], cfg.shift, cfg.mask, val);
-	rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
-				      Q_REG_ADDR(q_spec, cfg.addr),
-				      &q_spec->regs[cfg.idx], 1);
-
-	return rc;
-}
-DEFINE_SIMPLE_ATTRIBUTE(qpnp_gpio_fops, qpnp_gpio_debugfs_get,
-			qpnp_gpio_debugfs_set, "%llu\n");
-
-#define DEBUGFS_BUF_SIZE 11 /* supports 2^32 in decimal */
-
-struct qpnp_gpio_debugfs_args {
-	enum qpnp_gpio_param_type type;
-	const char *filename;
-};
-
-static struct qpnp_gpio_debugfs_args dfs_args[] = {
-	{ Q_GPIO_CFG_DIRECTION, "direction" },
-	{ Q_GPIO_CFG_OUTPUT_TYPE, "output_type" },
-	{ Q_GPIO_CFG_INVERT, "invert" },
-	{ Q_GPIO_CFG_PULL, "pull" },
-	{ Q_GPIO_CFG_VIN_SEL, "vin_sel" },
-	{ Q_GPIO_CFG_OUT_STRENGTH, "out_strength" },
-	{ Q_GPIO_CFG_SRC_SELECT, "src_select" },
-	{ Q_GPIO_CFG_MASTER_EN, "master_en" }
-};
-
-static int qpnp_gpio_debugfs_create(struct qpnp_gpio_chip *q_chip)
-{
-	struct spmi_device *spmi = q_chip->spmi;
-	struct device *dev = &spmi->dev;
-	struct qpnp_gpio_spec *q_spec;
-	enum qpnp_gpio_param_type *params;
-	enum qpnp_gpio_param_type type;
-	char pmic_gpio[DEBUGFS_BUF_SIZE];
-	const char *filename;
-	struct dentry *dfs, *dfs_io_dir;
-	int i, j;
-
-	BUG_ON(Q_NUM_PARAMS != ARRAY_SIZE(dfs_args));
-
-	q_chip->dfs_dir = debugfs_create_dir(dev->of_node->name,
-							driver_dfs_dir);
-	if (q_chip->dfs_dir == NULL) {
-		dev_err(dev, "%s: cannot register chip debugfs directory %s\n",
-						__func__, dev->of_node->name);
-		return -ENODEV;
-	}
-
-	for (i = 0; i < spmi->num_dev_node; i++) {
-		q_spec = qpnp_chip_gpio_get_spec(q_chip, i);
-		params = q_spec->params;
-		snprintf(pmic_gpio, DEBUGFS_BUF_SIZE, "%u", q_spec->pmic_gpio);
-		dfs_io_dir = debugfs_create_dir(pmic_gpio,
-							q_chip->dfs_dir);
-		if (dfs_io_dir == NULL)
-			goto dfs_err;
-
-		for (j = 0; j < Q_NUM_PARAMS; j++) {
-			type = dfs_args[j].type;
-			filename = dfs_args[j].filename;
-
-			params[type] = type;
-			dfs = debugfs_create_file(
-					filename,
-					S_IRUGO | S_IWUSR,
-					dfs_io_dir,
-					&q_spec->params[type],
-					&qpnp_gpio_fops);
-			if (dfs == NULL)
-				goto dfs_err;
-		}
-	}
-	return 0;
-dfs_err:
-	dev_err(dev, "%s: cannot register debugfs for pmic gpio %u on"
-				     " chip %s\n", __func__,
-				     q_spec->pmic_gpio, dev->of_node->name);
-	debugfs_remove_recursive(q_chip->dfs_dir);
-	return -ENFILE;
-}
-#else
-static int qpnp_gpio_debugfs_create(struct qpnp_gpio_chip *q_chip)
-{
-	return 0;
-}
-#endif
-
-static int qpnp_gpio_probe(struct spmi_device *spmi)
-{
-	struct qpnp_gpio_chip *q_chip;
-	struct resource *res;
-	struct qpnp_gpio_spec *q_spec;
-	int i, rc;
-	int lowest_gpio = UINT_MAX, highest_gpio = 0;
-	u32 intspec[3], gpio;
-	char buf[2];
-
-	q_chip = kzalloc(sizeof(*q_chip), GFP_KERNEL);
-	if (!q_chip) {
-		dev_err(&spmi->dev, "%s: Can't allocate gpio_chip\n",
-								__func__);
-		return -ENOMEM;
-	}
-	q_chip->spmi = spmi;
-	dev_set_drvdata(&spmi->dev, q_chip);
-
-	mutex_lock(&qpnp_gpio_chips_lock);
-	list_add(&q_chip->chip_list, &qpnp_gpio_chips);
-	mutex_unlock(&qpnp_gpio_chips_lock);
-
-	/* first scan through nodes to find the range required for allocation */
-	for (i = 0; i < spmi->num_dev_node; i++) {
-		rc = of_property_read_u32(spmi->dev_node[i].of_node,
-							"qcom,gpio-num", &gpio);
-		if (rc) {
-			dev_err(&spmi->dev, "%s: unable to get"
-				" qcom,gpio-num property\n", __func__);
-			goto err_probe;
-		}
-
-		if (gpio < lowest_gpio)
-			lowest_gpio = gpio;
-		if (gpio > highest_gpio)
-			highest_gpio = gpio;
-	}
-
-	if (highest_gpio < lowest_gpio) {
-		dev_err(&spmi->dev, "%s: no device nodes specified in"
-					" topology\n", __func__);
-		rc = -EINVAL;
-		goto err_probe;
-	} else if (lowest_gpio == 0) {
-		dev_err(&spmi->dev, "%s: 0 is not a valid PMIC GPIO\n",
-								__func__);
-		rc = -EINVAL;
-		goto err_probe;
-	}
-
-	q_chip->pmic_gpio_lowest = lowest_gpio;
-	q_chip->pmic_gpio_highest = highest_gpio;
-
-	/* allocate gpio lookup tables */
-	q_chip->pmic_gpios = kzalloc(sizeof(struct qpnp_gpio_spec *) *
-						highest_gpio - lowest_gpio + 1,
-						GFP_KERNEL);
-	q_chip->chip_gpios = kzalloc(sizeof(struct qpnp_gpio_spec *) *
-						spmi->num_dev_node, GFP_KERNEL);
-	if (!q_chip->pmic_gpios || !q_chip->chip_gpios) {
-		dev_err(&spmi->dev, "%s: unable to allocate memory\n",
-								__func__);
-		rc = -ENOMEM;
-		goto err_probe;
-	}
-
-	/* get interrupt controller device_node */
-	q_chip->int_ctrl = of_irq_find_parent(spmi->dev.of_node);
-	if (!q_chip->int_ctrl) {
-		dev_err(&spmi->dev, "%s: Can't find interrupt parent\n",
-								__func__);
-		rc = -EINVAL;
-		goto err_probe;
-	}
-
-	/* now scan through again and populate the lookup table */
-	for (i = 0; i < spmi->num_dev_node; i++) {
-		res = qpnp_get_resource(spmi, i, IORESOURCE_MEM, 0);
-		if (!res) {
-			dev_err(&spmi->dev, "%s: node %s is missing has no"
-				" base address definition\n",
-				__func__, spmi->dev_node[i].of_node->full_name);
-		}
-
-		rc = of_property_read_u32(spmi->dev_node[i].of_node,
-							"qcom,gpio-num", &gpio);
-		if (rc) {
-			dev_err(&spmi->dev, "%s: unable to get"
-				" qcom,gpio-num property\n", __func__);
-			goto err_probe;
-		}
-
-		q_spec = kzalloc(sizeof(struct qpnp_gpio_spec),
-							GFP_KERNEL);
-		if (!q_spec) {
-			dev_err(&spmi->dev, "%s: unable to allocate"
-						" memory\n",
-					__func__);
-			rc = -ENOMEM;
-			goto err_probe;
-		}
-
-		q_spec->slave = spmi->sid;
-		q_spec->offset = res->start;
-		q_spec->gpio_chip_idx = i;
-		q_spec->pmic_gpio = gpio;
-		q_spec->node = spmi->dev_node[i].of_node;
-		q_spec->q_chip = q_chip;
-
-		rc = spmi_ext_register_readl(spmi->ctrl, q_spec->slave,
-				Q_REG_ADDR(q_spec, Q_REG_TYPE), &buf[0], 2);
-		if (rc) {
-			dev_err(&spmi->dev, "%s: unable to read type regs\n",
-						__func__);
-			goto err_probe;
-		}
-		q_spec->type	= buf[0];
-		q_spec->subtype = buf[1];
-
-		/* call into irq_domain to get irq mapping */
-		intspec[0] = q_chip->spmi->sid;
-		intspec[1] = (q_spec->offset >> 8) & 0xFF;
-		intspec[2] = 0;
-		q_spec->irq = irq_create_of_mapping(q_chip->int_ctrl,
-							intspec, 3);
-		if (!q_spec->irq) {
-			dev_err(&spmi->dev, "%s: invalid irq for gpio"
-					" %u\n", __func__, gpio);
-			rc = -EINVAL;
-			goto err_probe;
-		}
-		/* initialize lookup table params */
-		qpnp_pmic_gpio_set_spec(q_chip, gpio, q_spec);
-		qpnp_chip_gpio_set_spec(q_chip, i, q_spec);
-	}
-
-	q_chip->gpio_chip.base = -1;
-	q_chip->gpio_chip.ngpio = spmi->num_dev_node;
-	q_chip->gpio_chip.label = "qpnp-gpio";
-	q_chip->gpio_chip.direction_input = qpnp_gpio_direction_input;
-	q_chip->gpio_chip.direction_output = qpnp_gpio_direction_output;
-	q_chip->gpio_chip.to_irq = qpnp_gpio_to_irq;
-	q_chip->gpio_chip.get = qpnp_gpio_get;
-	q_chip->gpio_chip.set = qpnp_gpio_set;
-	q_chip->gpio_chip.dev = &spmi->dev;
-	q_chip->gpio_chip.of_xlate = qpnp_gpio_of_gpio_xlate;
-	q_chip->gpio_chip.of_gpio_n_cells = 2;
-	q_chip->gpio_chip.can_sleep = 0;
-
-	rc = gpiochip_add(&q_chip->gpio_chip);
-	if (rc) {
-		dev_err(&spmi->dev, "%s: Can't add gpio chip, rc = %d\n",
-								__func__, rc);
-		goto err_probe;
-	}
-
-	/* now configure gpio config defaults if they exist */
-	for (i = 0; i < spmi->num_dev_node; i++) {
-		q_spec = qpnp_chip_gpio_get_spec(q_chip, i);
-		if (WARN_ON(!q_spec)) {
-			rc = -ENODEV;
-			goto err_probe;
-		}
-
-		rc = qpnp_gpio_cache_regs(q_chip, q_spec);
-		if (rc)
-			goto err_probe;
-
-		rc = qpnp_gpio_apply_config(q_chip, q_spec);
-		if (rc)
-			goto err_probe;
-	}
-
-	dev_dbg(&spmi->dev, "%s: gpio_chip registered between %d-%u\n",
-			__func__, q_chip->gpio_chip.base,
-			(q_chip->gpio_chip.base + q_chip->gpio_chip.ngpio) - 1);
-
-	rc = qpnp_gpio_debugfs_create(q_chip);
-	if (rc) {
-		dev_err(&spmi->dev, "%s: debugfs creation failed\n", __func__);
-		goto err_probe;
-	}
-
-	return 0;
-
-err_probe:
-	qpnp_gpio_free_chip(q_chip);
-	return rc;
-}
-
-static int qpnp_gpio_remove(struct spmi_device *spmi)
-{
-	struct qpnp_gpio_chip *q_chip = dev_get_drvdata(&spmi->dev);
-
-	debugfs_remove_recursive(q_chip->dfs_dir);
-
-	return qpnp_gpio_free_chip(q_chip);
-}
-
-static struct of_device_id spmi_match_table[] = {
-	{	.compatible = "qcom,qpnp-gpio",
-	},
-	{}
-};
-
-static const struct spmi_device_id qpnp_gpio_id[] = {
-	{ "qcom,qpnp-gpio", 0 },
-	{ }
-};
-MODULE_DEVICE_TABLE(spmi, qpnp_gpio_id);
-
-static struct spmi_driver qpnp_gpio_driver = {
-	.driver		= {
-		.name	= "qcom,qpnp-gpio",
-		.of_match_table = spmi_match_table,
-	},
-	.probe		= qpnp_gpio_probe,
-	.remove		= qpnp_gpio_remove,
-	.id_table	= qpnp_gpio_id,
-};
-
-static int __init qpnp_gpio_init(void)
-{
-#ifdef CONFIG_GPIO_QPNP_DEBUG
-	driver_dfs_dir = debugfs_create_dir("qpnp_gpio", NULL);
-	if (driver_dfs_dir == NULL)
-		pr_err("Cannot register top level debugfs directory\n");
-#endif
-
-	return spmi_driver_register(&qpnp_gpio_driver);
-}
-
-static void __exit qpnp_gpio_exit(void)
-{
-#ifdef CONFIG_GPIO_QPNP_DEBUG
-	debugfs_remove_recursive(driver_dfs_dir);
-#endif
-	spmi_driver_unregister(&qpnp_gpio_driver);
-}
-
-MODULE_DESCRIPTION("QPNP PMIC gpio driver");
-MODULE_LICENSE("GPL v2");
-
-module_init(qpnp_gpio_init);
-module_exit(qpnp_gpio_exit);
diff --git a/drivers/gpio/qpnp-pin.c b/drivers/gpio/qpnp-pin.c
new file mode 100644
index 0000000..bbcba81
--- /dev/null
+++ b/drivers/gpio/qpnp-pin.c
@@ -0,0 +1,1335 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/qpnp/pin.h>
+
+#define Q_REG_ADDR(q_spec, reg_index)	\
+		((q_spec)->offset + reg_index)
+
+#define Q_REG_STATUS1			0x8
+#define Q_NUM_CTL_REGS			0xD
+
+/* type registers base address offsets */
+#define Q_REG_TYPE			0x4
+#define Q_REG_SUBTYPE			0x5
+
+/* gpio peripheral type and subtype values */
+#define Q_GPIO_TYPE			0x10
+#define Q_GPIO_SUBTYPE_GPIO_4CH		0x0
+#define Q_GPIO_SUBTYPE_GPIOC_4CH	0x2
+#define Q_GPIO_SUBTYPE_GPIO_8CH		0x4
+#define Q_GPIO_SUBTYPE_GPIOC_8CH	0x6
+
+/* mpp peripheral type and subtype values */
+#define Q_MPP_TYPE			0x11
+#define Q_MPP_SUBTYPE_4CH_NO_ANA_OUT	0x1
+#define Q_MPP_SUBTYPE_4CH_NO_SINK	0x2
+#define Q_MPP_SUBTYPE_4CH_FULL_FUNC	0x3
+#define Q_MPP_SUBTYPE_8CH_FULL_FUNC	0x7
+
+/* control register base address offsets */
+#define Q_REG_MODE_CTL			0x40
+#define Q_REG_DIG_VIN_CTL		0x41
+#define Q_REG_DIG_PULL_CTL		0x42
+#define Q_REG_DIG_IN_CTL		0x43
+#define Q_REG_DIG_OUT_CTL		0x45
+#define Q_REG_EN_CTL			0x46
+#define Q_REG_AOUT_CTL			0x48
+#define Q_REG_AIN_CTL			0x4A
+#define Q_REG_SINK_CTL			0x4C
+
+/* control register regs array indices */
+#define Q_REG_I_MODE_CTL		0
+#define Q_REG_I_DIG_VIN_CTL		1
+#define Q_REG_I_DIG_PULL_CTL		2
+#define Q_REG_I_DIG_IN_CTL		3
+#define Q_REG_I_DIG_OUT_CTL		5
+#define Q_REG_I_EN_CTL			6
+#define Q_REG_I_AOUT_CTL		8
+#define Q_REG_I_AIN_CTL			10
+#define Q_REG_I_SINK_CTL		12
+
+/* control reg: mode */
+#define Q_REG_OUT_INVERT_SHIFT		0
+#define Q_REG_OUT_INVERT_MASK		0x1
+#define Q_REG_SRC_SEL_SHIFT		1
+#define Q_REG_SRC_SEL_MASK		0xE
+#define Q_REG_MODE_SEL_SHIFT		4
+#define Q_REG_MODE_SEL_MASK		0x70
+
+/* control reg: dig_vin */
+#define Q_REG_VIN_SHIFT			0
+#define Q_REG_VIN_MASK			0x7
+
+/* control reg: dig_pull */
+#define Q_REG_PULL_SHIFT		0
+#define Q_REG_PULL_MASK			0x7
+
+/* control reg: dig_out */
+#define Q_REG_OUT_STRENGTH_SHIFT	0
+#define Q_REG_OUT_STRENGTH_MASK		0x3
+#define Q_REG_OUT_TYPE_SHIFT		4
+#define Q_REG_OUT_TYPE_MASK		0x30
+
+/* control reg: en */
+#define Q_REG_MASTER_EN_SHIFT		7
+#define Q_REG_MASTER_EN_MASK		0x80
+
+/* control reg: ana_out */
+#define Q_REG_AOUT_REF_SHIFT		0
+#define Q_REG_AOUT_REF_MASK		0x7
+
+/* control reg: ana_in */
+#define Q_REG_AIN_ROUTE_SHIFT		0
+#define Q_REG_AIN_ROUTE_MASK		0x7
+
+/* control reg: sink */
+#define Q_REG_CS_OUT_SHIFT		0
+#define Q_REG_CS_OUT_MASK		0x7
+
+enum qpnp_pin_param_type {
+	Q_PIN_CFG_MODE,
+	Q_PIN_CFG_OUTPUT_TYPE,
+	Q_PIN_CFG_INVERT,
+	Q_PIN_CFG_PULL,
+	Q_PIN_CFG_VIN_SEL,
+	Q_PIN_CFG_OUT_STRENGTH,
+	Q_PIN_CFG_SELECT,
+	Q_PIN_CFG_MASTER_EN,
+	Q_PIN_CFG_AOUT_REF,
+	Q_PIN_CFG_AIN_ROUTE,
+	Q_PIN_CFG_CS_OUT,
+	Q_PIN_CFG_INVALID,
+};
+
+#define Q_NUM_PARAMS			Q_PIN_CFG_INVALID
+
+/* param error checking */
+#define QPNP_PIN_MODE_INVALID		3
+#define QPNP_PIN_INVERT_INVALID		2
+#define QPNP_PIN_OUT_BUF_INVALID	3
+#define QPNP_PIN_VIN_4CH_INVALID	5
+#define QPNP_PIN_VIN_8CH_INVALID	8
+#define QPNP_PIN_GPIO_PULL_INVALID	6
+#define QPNP_PIN_MPP_PULL_INVALID	4
+#define QPNP_PIN_OUT_STRENGTH_INVALID	4
+#define QPNP_PIN_SRC_INVALID		8
+#define QPNP_PIN_MASTER_INVALID		2
+#define QPNP_PIN_AOUT_REF_INVALID	8
+#define QPNP_PIN_AIN_ROUTE_INVALID	8
+#define QPNP_PIN_CS_OUT_INVALID		8
+
+struct qpnp_pin_spec {
+	uint8_t slave;			/* 0-15 */
+	uint16_t offset;		/* 0-255 */
+	uint32_t gpio_chip_idx;		/* offset from gpio_chip base */
+	uint32_t pmic_pin;		/* PMIC pin number */
+	int irq;			/* logical IRQ number */
+	u8 regs[Q_NUM_CTL_REGS];	/* Control regs */
+	u8 num_ctl_regs;		/* usable number on this pin */
+	u8 type;			/* peripheral type */
+	u8 subtype;			/* peripheral subtype */
+	struct device_node *node;
+	enum qpnp_pin_param_type params[Q_NUM_PARAMS];
+	struct qpnp_pin_chip *q_chip;
+};
+
+struct qpnp_pin_chip {
+	struct gpio_chip	gpio_chip;
+	struct spmi_device	*spmi;
+	struct qpnp_pin_spec	**pmic_pins;
+	struct qpnp_pin_spec	**chip_gpios;
+	uint32_t		pmic_pin_lowest;
+	uint32_t		pmic_pin_highest;
+	struct device_node	*int_ctrl;
+	struct list_head	chip_list;
+	struct dentry		*dfs_dir;
+};
+
+static LIST_HEAD(qpnp_pin_chips);
+static DEFINE_MUTEX(qpnp_pin_chips_lock);
+
+static inline void qpnp_pmic_pin_set_spec(struct qpnp_pin_chip *q_chip,
+					      uint32_t pmic_pin,
+					      struct qpnp_pin_spec *spec)
+{
+	q_chip->pmic_pins[pmic_pin - q_chip->pmic_pin_lowest] = spec;
+}
+
+static inline struct qpnp_pin_spec *qpnp_pmic_pin_get_spec(
+						struct qpnp_pin_chip *q_chip,
+						uint32_t pmic_pin)
+{
+	if (pmic_pin < q_chip->pmic_pin_lowest ||
+	    pmic_pin > q_chip->pmic_pin_highest)
+		return NULL;
+
+	return q_chip->pmic_pins[pmic_pin - q_chip->pmic_pin_lowest];
+}
+
+static inline struct qpnp_pin_spec *qpnp_chip_gpio_get_spec(
+						struct qpnp_pin_chip *q_chip,
+						uint32_t chip_gpio)
+{
+	if (chip_gpio > q_chip->gpio_chip.ngpio)
+		return NULL;
+
+	return q_chip->chip_gpios[chip_gpio];
+}
+
+static inline void qpnp_chip_gpio_set_spec(struct qpnp_pin_chip *q_chip,
+					      uint32_t chip_gpio,
+					      struct qpnp_pin_spec *spec)
+{
+	q_chip->chip_gpios[chip_gpio] = spec;
+}
+
+/*
+ * Determines whether a specified param's configuration is correct.
+ * This check is two tier. First a check is done whether the hardware
+ * supports this param and value requested. The second check validates
+ * that the configuration is correct, given the fact that the hardware
+ * supports it.
+ *
+ * Returns
+ *	-ENXIO is the hardware does not support this param.
+ *	-EINVAL if the the hardware does support this param, but the
+ *	requested value is outside the supported range.
+ */
+static int qpnp_pin_check_config(enum qpnp_pin_param_type idx,
+				 struct qpnp_pin_spec *q_spec, uint32_t val)
+{
+	switch (idx) {
+	case Q_PIN_CFG_MODE:
+		if (val >= QPNP_PIN_MODE_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_OUTPUT_TYPE:
+		if (q_spec->type != Q_GPIO_TYPE)
+			return -ENXIO;
+		if ((val == QPNP_PIN_OUT_BUF_OPEN_DRAIN_NMOS ||
+		    val == QPNP_PIN_OUT_BUF_OPEN_DRAIN_PMOS) &&
+		    (q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_4CH ||
+		    (q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_8CH)))
+			return -EINVAL;
+		else if (val >= QPNP_PIN_OUT_BUF_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_INVERT:
+		if (val >= QPNP_PIN_INVERT_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_PULL:
+		if (q_spec->type == Q_GPIO_TYPE &&
+		    val >= QPNP_PIN_GPIO_PULL_INVALID)
+			return -EINVAL;
+		if (q_spec->type == Q_MPP_TYPE &&
+		    val >= QPNP_PIN_MPP_PULL_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_VIN_SEL:
+		if (val >= QPNP_PIN_VIN_8CH_INVALID)
+			return -EINVAL;
+		else if (val >= QPNP_PIN_VIN_4CH_INVALID) {
+			if (q_spec->type == Q_GPIO_TYPE &&
+			   (q_spec->subtype == Q_GPIO_SUBTYPE_GPIO_4CH ||
+			    q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_4CH))
+				return -EINVAL;
+			if (q_spec->type == Q_MPP_TYPE &&
+			   (q_spec->subtype == Q_MPP_SUBTYPE_4CH_NO_ANA_OUT ||
+			    q_spec->subtype == Q_MPP_SUBTYPE_4CH_NO_SINK ||
+			    q_spec->subtype == Q_MPP_SUBTYPE_4CH_FULL_FUNC))
+				return -EINVAL;
+		}
+		break;
+	case Q_PIN_CFG_OUT_STRENGTH:
+		if (q_spec->type != Q_GPIO_TYPE)
+			return -ENXIO;
+		if (val >= QPNP_PIN_OUT_STRENGTH_INVALID ||
+		    val == 0)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_SELECT:
+		if (q_spec->type == Q_MPP_TYPE &&
+		    (val == QPNP_PIN_SEL_FUNC_1 ||
+		     val == QPNP_PIN_SEL_FUNC_2))
+			return -EINVAL;
+		if (val >= QPNP_PIN_SRC_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_MASTER_EN:
+		if (val >= QPNP_PIN_MASTER_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_AOUT_REF:
+		if (q_spec->type != Q_MPP_TYPE)
+			return -ENXIO;
+		if (q_spec->subtype == Q_MPP_SUBTYPE_4CH_NO_ANA_OUT)
+			return -ENXIO;
+		if (val >= QPNP_PIN_AOUT_REF_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_AIN_ROUTE:
+		if (q_spec->type != Q_MPP_TYPE)
+			return -ENXIO;
+		if (val >= QPNP_PIN_AIN_ROUTE_INVALID)
+			return -EINVAL;
+		break;
+	case Q_PIN_CFG_CS_OUT:
+		if (q_spec->type != Q_MPP_TYPE)
+			return -ENXIO;
+		if (q_spec->subtype == Q_MPP_SUBTYPE_4CH_NO_SINK)
+			return -ENXIO;
+		if (val >= QPNP_PIN_CS_OUT_INVALID)
+			return -EINVAL;
+		break;
+
+	default:
+		pr_err("invalid param type %u specified\n", idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#define Q_CHK_INVALID(idx, q_spec, val) \
+	(qpnp_pin_check_config(idx, q_spec, val) == -EINVAL)
+
+static int qpnp_pin_check_constraints(struct qpnp_pin_spec *q_spec,
+				      struct qpnp_pin_cfg *param)
+{
+	int pin = q_spec->pmic_pin;
+	const char *name;
+
+	name = (q_spec->type == Q_GPIO_TYPE) ? "gpio" : "mpp";
+
+	if (Q_CHK_INVALID(Q_PIN_CFG_MODE, q_spec, param->mode))
+		pr_err("invalid direction for %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_INVERT, q_spec, param->invert))
+		pr_err("invalid invert polarity for %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_SELECT, q_spec, param->select))
+		pr_err("invalid source select for %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_OUT_STRENGTH,
+						q_spec, param->out_strength))
+		pr_err("invalid out strength for %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_OUTPUT_TYPE,
+						 q_spec, param->output_type))
+		pr_err("invalid out type for %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_VIN_SEL, q_spec, param->vin_sel))
+		pr_err("invalid vin select value for %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_PULL, q_spec, param->pull))
+		pr_err("invalid pull value for pin %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_MASTER_EN, q_spec, param->master_en))
+		pr_err("invalid master_en value for %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_AOUT_REF, q_spec, param->aout_ref))
+		pr_err("invalid aout_reg value for %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_AIN_ROUTE, q_spec, param->ain_route))
+		pr_err("invalid ain_route value for %s %d\n", name, pin);
+	else if (Q_CHK_INVALID(Q_PIN_CFG_CS_OUT, q_spec, param->cs_out))
+		pr_err("invalid cs_out value for %s %d\n", name, pin);
+	else
+		return 0;
+
+	return -EINVAL;
+}
+
+static inline u8 q_reg_get(u8 *reg, int shift, int mask)
+{
+	return (*reg & mask) >> shift;
+}
+
+static inline void q_reg_set(u8 *reg, int shift, int mask, int value)
+{
+	*reg |= (value << shift) & mask;
+}
+
+static inline void q_reg_clr_set(u8 *reg, int shift, int mask, int value)
+{
+	*reg &= ~mask;
+	*reg |= (value << shift) & mask;
+}
+
+/*
+ * Calculate the minimum number of registers that must be read / written
+ * in order to satisfy the full feature set of the given pin.
+ */
+static int qpnp_pin_ctl_regs_init(struct qpnp_pin_spec *q_spec)
+{
+	if (q_spec->type == Q_GPIO_TYPE)
+		q_spec->num_ctl_regs = 7;
+	else if (q_spec->type == Q_MPP_TYPE)
+		switch (q_spec->subtype) {
+		case Q_MPP_SUBTYPE_4CH_NO_SINK:
+			q_spec->num_ctl_regs = 12;
+			break;
+		case Q_MPP_SUBTYPE_4CH_NO_ANA_OUT:
+		case Q_MPP_SUBTYPE_4CH_FULL_FUNC:
+		case Q_MPP_SUBTYPE_8CH_FULL_FUNC:
+			q_spec->num_ctl_regs = 13;
+			break;
+		default:
+			pr_err("Invalid MPP subtype 0x%x\n", q_spec->subtype);
+			return -EINVAL;
+		}
+	else {
+		pr_err("Invalid type 0x%x\n", q_spec->type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qpnp_pin_read_regs(struct qpnp_pin_chip *q_chip,
+			       struct qpnp_pin_spec *q_spec, u16 addr, u8 *buf)
+{
+	int bytes_left = q_spec->num_ctl_regs;
+	int rc;
+	char *reg_p = &q_spec->regs[0];
+
+	while (bytes_left > 0) {
+		rc = spmi_ext_register_readl(q_chip->spmi->ctrl, q_spec->slave,
+					Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
+					reg_p, bytes_left < 8 ? bytes_left : 8);
+		if (rc)
+			return rc;
+		bytes_left -= 8;
+		reg_p += 8;
+	}
+	return 0;
+}
+
+static int qpnp_pin_write_regs(struct qpnp_pin_chip *q_chip,
+				struct qpnp_pin_spec *q_spec, u16 addr, u8 *buf)
+{
+	int bytes_left = q_spec->num_ctl_regs;
+	int rc;
+	char *reg_p = &q_spec->regs[0];
+
+	while (bytes_left > 0) {
+		rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
+					Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
+					reg_p, bytes_left < 8 ? bytes_left : 8);
+		if (rc)
+			return rc;
+		bytes_left -= 8;
+		reg_p += 8;
+	}
+	return 0;
+}
+
+static int qpnp_pin_cache_regs(struct qpnp_pin_chip *q_chip,
+			       struct qpnp_pin_spec *q_spec)
+{
+	int rc;
+	struct device *dev = &q_chip->spmi->dev;
+
+	rc = qpnp_pin_read_regs(q_chip, q_spec,
+				 Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
+				 &q_spec->regs[Q_REG_I_MODE_CTL]);
+	if (rc)
+		dev_err(dev, "%s: unable to read control regs\n", __func__);
+
+	return rc;
+}
+
+#define Q_HAVE_HW_SP(idx, q_spec, val) \
+	(qpnp_pin_check_config(idx, q_spec, val) == 0)
+
+static int _qpnp_pin_config(struct qpnp_pin_chip *q_chip,
+			    struct qpnp_pin_spec *q_spec,
+			    struct qpnp_pin_cfg *param)
+{
+	struct device *dev = &q_chip->spmi->dev;
+	int rc;
+
+	rc = qpnp_pin_check_constraints(q_spec, param);
+	if (rc)
+		goto gpio_cfg;
+
+	/* set mode */
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_MODE, q_spec, param->mode))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+			  Q_REG_MODE_SEL_SHIFT, Q_REG_MODE_SEL_MASK,
+			  param->mode);
+
+	/* output specific configuration */
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_INVERT, q_spec, param->invert))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+			  Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK,
+			  param->invert);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_SELECT, q_spec, param->select))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+			  Q_REG_SRC_SEL_SHIFT, Q_REG_SRC_SEL_MASK,
+			  param->select);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_OUT_STRENGTH, q_spec, param->out_strength))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+			  Q_REG_OUT_STRENGTH_SHIFT, Q_REG_OUT_STRENGTH_MASK,
+			  param->out_strength);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_OUTPUT_TYPE, q_spec, param->output_type))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+			  Q_REG_OUT_TYPE_SHIFT, Q_REG_OUT_TYPE_MASK,
+			  param->output_type);
+
+	/* config applicable for both input / output */
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_VIN_SEL, q_spec, param->vin_sel))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_VIN_CTL],
+			  Q_REG_VIN_SHIFT, Q_REG_VIN_MASK,
+			  param->vin_sel);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_PULL, q_spec, param->pull))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_PULL_CTL],
+			  Q_REG_PULL_SHIFT, Q_REG_PULL_MASK,
+			  param->pull);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_MASTER_EN, q_spec, param->master_en))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_EN_CTL],
+			  Q_REG_MASTER_EN_SHIFT, Q_REG_MASTER_EN_MASK,
+			  param->master_en);
+
+	/* mpp specific config */
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_AOUT_REF, q_spec, param->aout_ref))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_AOUT_CTL],
+			  Q_REG_AOUT_REF_SHIFT, Q_REG_AOUT_REF_MASK,
+			  param->aout_ref);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_AIN_ROUTE, q_spec, param->ain_route))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_AIN_CTL],
+			  Q_REG_AIN_ROUTE_SHIFT, Q_REG_AIN_ROUTE_MASK,
+			  param->ain_route);
+	if (Q_HAVE_HW_SP(Q_PIN_CFG_CS_OUT, q_spec, param->cs_out))
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_SINK_CTL],
+			  Q_REG_CS_OUT_SHIFT, Q_REG_CS_OUT_MASK,
+			  param->cs_out);
+
+	rc = qpnp_pin_write_regs(q_chip, q_spec,
+				 Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
+				 &q_spec->regs[Q_REG_I_MODE_CTL]);
+	if (rc) {
+		dev_err(&q_chip->spmi->dev, "%s: unable to write master enable\n",
+								__func__);
+		goto gpio_cfg;
+	}
+
+	return 0;
+
+gpio_cfg:
+	dev_err(dev, "%s: unable to set default config for pmic gpio %d\n",
+						__func__, q_spec->pmic_pin);
+
+	return rc;
+}
+
+int qpnp_pin_config(int gpio, struct qpnp_pin_cfg *param)
+{
+	int rc, chip_offset;
+	struct qpnp_pin_chip *q_chip;
+	struct qpnp_pin_spec *q_spec = NULL;
+	struct gpio_chip *gpio_chip;
+
+	if (param == NULL)
+		return -EINVAL;
+
+	mutex_lock(&qpnp_pin_chips_lock);
+	list_for_each_entry(q_chip, &qpnp_pin_chips, chip_list) {
+		gpio_chip = &q_chip->gpio_chip;
+		if (gpio >= gpio_chip->base
+				&& gpio < gpio_chip->base + gpio_chip->ngpio) {
+			chip_offset = gpio - gpio_chip->base;
+			q_spec = qpnp_chip_gpio_get_spec(q_chip, chip_offset);
+			if (WARN_ON(!q_spec)) {
+				mutex_unlock(&qpnp_pin_chips_lock);
+				return -ENODEV;
+			}
+			break;
+		}
+	}
+	mutex_unlock(&qpnp_pin_chips_lock);
+
+	rc = _qpnp_pin_config(q_chip, q_spec, param);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_pin_config);
+
+#define Q_MAX_CHIP_NAME 128
+int qpnp_pin_map(const char *name, uint32_t pmic_pin)
+{
+	struct qpnp_pin_chip *q_chip;
+	struct qpnp_pin_spec *q_spec = NULL;
+
+	mutex_lock(&qpnp_pin_chips_lock);
+	list_for_each_entry(q_chip, &qpnp_pin_chips, chip_list) {
+		if (strncmp(q_chip->gpio_chip.label, name,
+							Q_MAX_CHIP_NAME) != 0)
+			continue;
+		if (q_chip->pmic_pin_lowest <= pmic_pin &&
+		    q_chip->pmic_pin_highest >= pmic_pin) {
+			q_spec = qpnp_pmic_pin_get_spec(q_chip, pmic_pin);
+			mutex_unlock(&qpnp_pin_chips_lock);
+			if (WARN_ON(!q_spec))
+				return -ENODEV;
+			return q_chip->gpio_chip.base + q_spec->gpio_chip_idx;
+		}
+	}
+	mutex_unlock(&qpnp_pin_chips_lock);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(qpnp_pin_map);
+
+static int qpnp_pin_to_irq(struct gpio_chip *gpio_chip, unsigned offset)
+{
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+	struct qpnp_pin_spec *q_spec;
+
+	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+	if (!q_spec)
+		return -EINVAL;
+
+	return q_spec->irq;
+}
+
+static int qpnp_pin_get(struct gpio_chip *gpio_chip, unsigned offset)
+{
+	int rc, ret_val;
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+	struct qpnp_pin_spec *q_spec = NULL;
+	u8 buf[1];
+
+	if (WARN_ON(!q_chip))
+		return -ENODEV;
+
+	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+	if (WARN_ON(!q_spec))
+		return -ENODEV;
+
+	/* gpio val is from RT status iff input is enabled */
+	if ((q_spec->regs[Q_REG_I_MODE_CTL] & Q_REG_MODE_SEL_MASK)
+						== QPNP_PIN_MODE_DIG_IN) {
+		/* INT_RT_STS */
+		rc = spmi_ext_register_readl(q_chip->spmi->ctrl, q_spec->slave,
+				Q_REG_ADDR(q_spec, Q_REG_STATUS1),
+				&buf[0], 1);
+		return buf[0];
+
+	} else {
+		ret_val = (q_spec->regs[Q_REG_I_MODE_CTL] &
+			       Q_REG_OUT_INVERT_MASK) >> Q_REG_OUT_INVERT_SHIFT;
+		return ret_val;
+	}
+
+	return 0;
+}
+
+static int __qpnp_pin_set(struct qpnp_pin_chip *q_chip,
+			   struct qpnp_pin_spec *q_spec, int value)
+{
+	int rc;
+
+	if (!q_chip || !q_spec)
+		return -EINVAL;
+
+	if (value)
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+			  Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK, 1);
+	else
+		q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+			  Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK, 0);
+
+	rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
+			      Q_REG_ADDR(q_spec, Q_REG_I_MODE_CTL),
+			      &q_spec->regs[Q_REG_I_MODE_CTL], 1);
+	if (rc)
+		dev_err(&q_chip->spmi->dev, "%s: spmi write failed\n",
+								__func__);
+	return rc;
+}
+
+
+static void qpnp_pin_set(struct gpio_chip *gpio_chip,
+		unsigned offset, int value)
+{
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+	struct qpnp_pin_spec *q_spec;
+
+	if (WARN_ON(!q_chip))
+		return;
+
+	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+	if (WARN_ON(!q_spec))
+		return;
+
+	__qpnp_pin_set(q_chip, q_spec, value);
+}
+
+static int qpnp_pin_set_mode(struct qpnp_pin_chip *q_chip,
+				   struct qpnp_pin_spec *q_spec, int mode)
+{
+	int rc;
+
+	if (!q_chip || !q_spec)
+		return -EINVAL;
+
+	if (mode >= QPNP_PIN_MODE_INVALID) {
+		pr_err("invalid mode specification %d\n", mode);
+		return -EINVAL;
+	}
+
+	q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+			Q_REG_MODE_SEL_SHIFT,
+			Q_REG_MODE_SEL_MASK,
+			mode);
+
+	rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
+			      Q_REG_ADDR(q_spec, Q_REG_I_MODE_CTL),
+			      &q_spec->regs[Q_REG_I_MODE_CTL], 1);
+	return rc;
+}
+
+static int qpnp_pin_direction_input(struct gpio_chip *gpio_chip,
+		unsigned offset)
+{
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+	struct qpnp_pin_spec *q_spec;
+
+	if (WARN_ON(!q_chip))
+		return -ENODEV;
+
+	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+	if (WARN_ON(!q_spec))
+		return -ENODEV;
+
+	return qpnp_pin_set_mode(q_chip, q_spec, QPNP_PIN_MODE_DIG_IN);
+}
+
+static int qpnp_pin_direction_output(struct gpio_chip *gpio_chip,
+		unsigned offset,
+		int val)
+{
+	int rc;
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+	struct qpnp_pin_spec *q_spec;
+
+	if (WARN_ON(!q_chip))
+		return -ENODEV;
+
+	q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+	if (WARN_ON(!q_spec))
+		return -ENODEV;
+
+	rc = __qpnp_pin_set(q_chip, q_spec, val);
+	if (rc)
+		return rc;
+
+	rc = qpnp_pin_set_mode(q_chip, q_spec, QPNP_PIN_MODE_DIG_OUT);
+
+	return rc;
+}
+
+static int qpnp_pin_of_gpio_xlate(struct gpio_chip *gpio_chip,
+				   const struct of_phandle_args *gpio_spec,
+				   u32 *flags)
+{
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+	struct qpnp_pin_spec *q_spec;
+
+	if (WARN_ON(gpio_chip->of_gpio_n_cells < 2)) {
+		pr_err("of_gpio_n_cells < 2\n");
+		return -EINVAL;
+	}
+
+	q_spec = qpnp_pmic_pin_get_spec(q_chip, gpio_spec->args[0]);
+	if (!q_spec) {
+		pr_err("no such PMIC gpio %u in device topology\n",
+							gpio_spec->args[0]);
+		return -EINVAL;
+	}
+
+	if (flags)
+		*flags = gpio_spec->args[1];
+
+	return q_spec->gpio_chip_idx;
+}
+
+static int qpnp_pin_apply_config(struct qpnp_pin_chip *q_chip,
+				  struct qpnp_pin_spec *q_spec)
+{
+	struct qpnp_pin_cfg param;
+	struct device_node *node = q_spec->node;
+	int rc;
+
+	param.mode	   = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
+				       Q_REG_MODE_SEL_SHIFT,
+				       Q_REG_MODE_SEL_MASK);
+	param.output_type  = q_reg_get(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+				       Q_REG_OUT_TYPE_SHIFT,
+				       Q_REG_OUT_TYPE_MASK);
+	param.invert	   = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
+				       Q_REG_OUT_INVERT_MASK,
+				       Q_REG_OUT_INVERT_MASK);
+	param.pull	   = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
+				       Q_REG_PULL_SHIFT, Q_REG_PULL_MASK);
+	param.vin_sel	   = q_reg_get(&q_spec->regs[Q_REG_I_DIG_VIN_CTL],
+				       Q_REG_VIN_SHIFT, Q_REG_VIN_MASK);
+	param.out_strength = q_reg_get(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+				       Q_REG_OUT_STRENGTH_SHIFT,
+				       Q_REG_OUT_STRENGTH_MASK);
+	param.select   = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
+				       Q_REG_SRC_SEL_SHIFT, Q_REG_SRC_SEL_MASK);
+	param.master_en    = q_reg_get(&q_spec->regs[Q_REG_I_EN_CTL],
+				       Q_REG_MASTER_EN_SHIFT,
+				       Q_REG_MASTER_EN_MASK);
+	param.aout_ref    = q_reg_get(&q_spec->regs[Q_REG_I_AOUT_CTL],
+				       Q_REG_AOUT_REF_SHIFT,
+				       Q_REG_AOUT_REF_MASK);
+	param.ain_route    = q_reg_get(&q_spec->regs[Q_REG_I_AIN_CTL],
+				       Q_REG_AIN_ROUTE_SHIFT,
+				       Q_REG_AIN_ROUTE_MASK);
+	param.cs_out    = q_reg_get(&q_spec->regs[Q_REG_I_SINK_CTL],
+				       Q_REG_CS_OUT_SHIFT,
+				       Q_REG_CS_OUT_MASK);
+
+	of_property_read_u32(node, "qcom,mode",
+		&param.mode);
+	of_property_read_u32(node, "qcom,output-type",
+		&param.output_type);
+	of_property_read_u32(node, "qcom,invert",
+		&param.invert);
+	of_property_read_u32(node, "qcom,pull",
+		&param.pull);
+	of_property_read_u32(node, "qcom,vin-sel",
+		&param.vin_sel);
+	of_property_read_u32(node, "qcom,out-strength",
+		&param.out_strength);
+	of_property_read_u32(node, "qcom,src-select",
+		&param.select);
+	of_property_read_u32(node, "qcom,master-en",
+		&param.master_en);
+	of_property_read_u32(node, "qcom,aout-ref",
+		&param.aout_ref);
+	of_property_read_u32(node, "qcom,ain-route",
+		&param.ain_route);
+	of_property_read_u32(node, "qcom,cs-out",
+		&param.cs_out);
+	rc = _qpnp_pin_config(q_chip, q_spec, &param);
+
+	return rc;
+}
+
+static int qpnp_pin_free_chip(struct qpnp_pin_chip *q_chip)
+{
+	struct spmi_device *spmi = q_chip->spmi;
+	int rc, i;
+
+	if (q_chip->chip_gpios)
+		for (i = 0; i < spmi->num_dev_node; i++)
+			kfree(q_chip->chip_gpios[i]);
+
+	mutex_lock(&qpnp_pin_chips_lock);
+	list_del(&q_chip->chip_list);
+	mutex_unlock(&qpnp_pin_chips_lock);
+	rc = gpiochip_remove(&q_chip->gpio_chip);
+	if (rc)
+		dev_err(&q_chip->spmi->dev, "%s: unable to remove gpio\n",
+				__func__);
+	kfree(q_chip->chip_gpios);
+	kfree(q_chip->pmic_pins);
+	kfree(q_chip);
+	return rc;
+}
+
+#ifdef CONFIG_GPIO_QPNP_PIN_DEBUG
+struct qpnp_pin_reg {
+	uint32_t addr;
+	uint32_t idx;
+	uint32_t shift;
+	uint32_t mask;
+};
+
+static struct dentry *driver_dfs_dir;
+
+static int qpnp_pin_reg_attr(enum qpnp_pin_param_type type,
+			     struct qpnp_pin_reg *cfg)
+{
+	switch (type) {
+	case Q_PIN_CFG_MODE:
+		cfg->addr = Q_REG_MODE_CTL;
+		cfg->idx = Q_REG_I_MODE_CTL;
+		cfg->shift = Q_REG_MODE_SEL_SHIFT;
+		cfg->mask = Q_REG_MODE_SEL_MASK;
+		break;
+	case Q_PIN_CFG_OUTPUT_TYPE:
+		cfg->addr = Q_REG_DIG_OUT_CTL;
+		cfg->idx = Q_REG_I_DIG_OUT_CTL;
+		cfg->shift = Q_REG_OUT_TYPE_SHIFT;
+		cfg->mask = Q_REG_OUT_TYPE_MASK;
+		break;
+	case Q_PIN_CFG_INVERT:
+		cfg->addr = Q_REG_MODE_CTL;
+		cfg->idx = Q_REG_I_MODE_CTL;
+		cfg->shift = Q_REG_OUT_INVERT_SHIFT;
+		cfg->mask = Q_REG_OUT_INVERT_MASK;
+		break;
+	case Q_PIN_CFG_PULL:
+		cfg->addr = Q_REG_DIG_PULL_CTL;
+		cfg->idx = Q_REG_I_DIG_PULL_CTL;
+		cfg->shift = Q_REG_PULL_SHIFT;
+		cfg->mask = Q_REG_PULL_MASK;
+		break;
+	case Q_PIN_CFG_VIN_SEL:
+		cfg->addr = Q_REG_DIG_VIN_CTL;
+		cfg->idx = Q_REG_I_DIG_VIN_CTL;
+		cfg->shift = Q_REG_VIN_SHIFT;
+		cfg->mask = Q_REG_VIN_MASK;
+		break;
+	case Q_PIN_CFG_OUT_STRENGTH:
+		cfg->addr = Q_REG_DIG_OUT_CTL;
+		cfg->idx = Q_REG_I_DIG_OUT_CTL;
+		cfg->shift = Q_REG_OUT_STRENGTH_SHIFT;
+		cfg->mask = Q_REG_OUT_STRENGTH_MASK;
+		break;
+	case Q_PIN_CFG_SELECT:
+		cfg->addr = Q_REG_MODE_CTL;
+		cfg->idx = Q_REG_I_MODE_CTL;
+		cfg->shift = Q_REG_SRC_SEL_SHIFT;
+		cfg->mask = Q_REG_SRC_SEL_MASK;
+		break;
+	case Q_PIN_CFG_MASTER_EN:
+		cfg->addr = Q_REG_EN_CTL;
+		cfg->idx = Q_REG_I_EN_CTL;
+		cfg->shift = Q_REG_MASTER_EN_SHIFT;
+		cfg->mask = Q_REG_MASTER_EN_MASK;
+		break;
+	case Q_PIN_CFG_AOUT_REF:
+		cfg->addr = Q_REG_AOUT_CTL;
+		cfg->idx = Q_REG_I_AOUT_CTL;
+		cfg->shift = Q_REG_AOUT_REF_SHIFT;
+		cfg->mask = Q_REG_AOUT_REF_MASK;
+		break;
+	case Q_PIN_CFG_AIN_ROUTE:
+		cfg->addr = Q_REG_AIN_CTL;
+		cfg->idx = Q_REG_I_AIN_CTL;
+		cfg->shift = Q_REG_AIN_ROUTE_SHIFT;
+		cfg->mask = Q_REG_AIN_ROUTE_MASK;
+		break;
+	case Q_PIN_CFG_CS_OUT:
+		cfg->addr = Q_REG_SINK_CTL;
+		cfg->idx = Q_REG_I_SINK_CTL;
+		cfg->shift = Q_REG_CS_OUT_SHIFT;
+		cfg->mask = Q_REG_CS_OUT_MASK;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qpnp_pin_debugfs_get(void *data, u64 *val)
+{
+	enum qpnp_pin_param_type *idx = data;
+	struct qpnp_pin_spec *q_spec;
+	struct qpnp_pin_reg cfg = {};
+	int rc;
+
+	rc = qpnp_pin_reg_attr(*idx, &cfg);
+	if (rc)
+		return rc;
+	q_spec = container_of(idx, struct qpnp_pin_spec, params[*idx]);
+	*val = q_reg_get(&q_spec->regs[cfg.idx], cfg.shift, cfg.mask);
+	return 0;
+}
+
+static int qpnp_pin_debugfs_set(void *data, u64 val)
+{
+	enum qpnp_pin_param_type *idx = data;
+	struct qpnp_pin_spec *q_spec;
+	struct qpnp_pin_chip *q_chip;
+	struct qpnp_pin_reg cfg = {};
+	int rc;
+
+	q_spec = container_of(idx, struct qpnp_pin_spec, params[*idx]);
+	q_chip = q_spec->q_chip;
+
+	rc = qpnp_pin_check_config(*idx, q_spec, val);
+	if (rc)
+		return rc;
+
+	rc = qpnp_pin_reg_attr(*idx, &cfg);
+	if (rc)
+		return rc;
+	q_reg_clr_set(&q_spec->regs[cfg.idx], cfg.shift, cfg.mask, val);
+	rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
+				      Q_REG_ADDR(q_spec, cfg.addr),
+				      &q_spec->regs[cfg.idx], 1);
+
+	return rc;
+}
+DEFINE_SIMPLE_ATTRIBUTE(qpnp_pin_fops, qpnp_pin_debugfs_get,
+			qpnp_pin_debugfs_set, "%llu\n");
+
+#define DEBUGFS_BUF_SIZE 11 /* supports 2^32 in decimal */
+
+struct qpnp_pin_debugfs_args {
+	enum qpnp_pin_param_type type;
+	const char *filename;
+};
+
+static struct qpnp_pin_debugfs_args dfs_args[] = {
+	{ Q_PIN_CFG_MODE, "mode" },
+	{ Q_PIN_CFG_OUTPUT_TYPE, "output_type" },
+	{ Q_PIN_CFG_INVERT, "invert" },
+	{ Q_PIN_CFG_PULL, "pull" },
+	{ Q_PIN_CFG_VIN_SEL, "vin_sel" },
+	{ Q_PIN_CFG_OUT_STRENGTH, "out_strength" },
+	{ Q_PIN_CFG_SELECT, "select" },
+	{ Q_PIN_CFG_MASTER_EN, "master_en" },
+	{ Q_PIN_CFG_AOUT_REF, "aout_ref" },
+	{ Q_PIN_CFG_AIN_ROUTE, "ain_route" },
+	{ Q_PIN_CFG_CS_OUT, "cs_out" },
+};
+
+static int qpnp_pin_debugfs_create(struct qpnp_pin_chip *q_chip)
+{
+	struct spmi_device *spmi = q_chip->spmi;
+	struct device *dev = &spmi->dev;
+	struct qpnp_pin_spec *q_spec;
+	enum qpnp_pin_param_type *params;
+	enum qpnp_pin_param_type type;
+	char pmic_pin[DEBUGFS_BUF_SIZE];
+	const char *filename;
+	struct dentry *dfs, *dfs_io_dir;
+	int i, j, rc;
+
+	BUG_ON(Q_NUM_PARAMS != ARRAY_SIZE(dfs_args));
+
+	q_chip->dfs_dir = debugfs_create_dir(q_chip->gpio_chip.label,
+							driver_dfs_dir);
+	if (q_chip->dfs_dir == NULL) {
+		dev_err(dev, "%s: cannot register chip debugfs directory %s\n",
+						__func__, dev->of_node->name);
+		return -ENODEV;
+	}
+
+	for (i = 0; i < spmi->num_dev_node; i++) {
+		q_spec = qpnp_chip_gpio_get_spec(q_chip, i);
+		params = q_spec->params;
+		snprintf(pmic_pin, DEBUGFS_BUF_SIZE, "%u", q_spec->pmic_pin);
+		dfs_io_dir = debugfs_create_dir(pmic_pin, q_chip->dfs_dir);
+		if (dfs_io_dir == NULL)
+			goto dfs_err;
+
+		for (j = 0; j < Q_NUM_PARAMS; j++) {
+			type = dfs_args[j].type;
+			filename = dfs_args[j].filename;
+
+			/*
+			 * Use a value of '0' to see if the pin has even basic
+			 * support for a function. Do not create a file if
+			 * it doesn't.
+			 */
+			rc = qpnp_pin_check_config(type, q_spec, 0);
+			if (rc == -ENXIO)
+				continue;
+
+			params[type] = type;
+			dfs = debugfs_create_file(
+					filename,
+					S_IRUGO | S_IWUSR,
+					dfs_io_dir,
+					&q_spec->params[type],
+					&qpnp_pin_fops);
+			if (dfs == NULL)
+				goto dfs_err;
+		}
+	}
+	return 0;
+dfs_err:
+	dev_err(dev, "%s: cannot register debugfs for pmic gpio %u on chip %s\n",
+			__func__, q_spec->pmic_pin, dev->of_node->name);
+	debugfs_remove_recursive(q_chip->dfs_dir);
+	return -ENFILE;
+}
+#else
+static int qpnp_pin_debugfs_create(struct qpnp_pin_chip *q_chip)
+{
+	return 0;
+}
+#endif
+
+static int qpnp_pin_probe(struct spmi_device *spmi)
+{
+	struct qpnp_pin_chip *q_chip;
+	struct qpnp_pin_spec *q_spec;
+	struct resource *res;
+	struct spmi_resource *d_node;
+	int i, rc;
+	int lowest_gpio = UINT_MAX, highest_gpio = 0;
+	u32 intspec[3], gpio;
+	char buf[2];
+	const char *dev_name;
+
+	dev_name = spmi_get_primary_dev_name(spmi);
+	if (!dev_name) {
+		dev_err(&spmi->dev, "%s: label binding undefined for node %s\n",
+					__func__, spmi->dev.of_node->full_name);
+		return -EINVAL;
+	}
+
+	q_chip = kzalloc(sizeof(*q_chip), GFP_KERNEL);
+	if (!q_chip) {
+		dev_err(&spmi->dev, "%s: Can't allocate gpio_chip\n",
+								__func__);
+		return -ENOMEM;
+	}
+	q_chip->spmi = spmi;
+	dev_set_drvdata(&spmi->dev, q_chip);
+
+	mutex_lock(&qpnp_pin_chips_lock);
+	list_add(&q_chip->chip_list, &qpnp_pin_chips);
+	mutex_unlock(&qpnp_pin_chips_lock);
+
+	/* first scan through nodes to find the range required for allocation */
+	for (i = 0; i < spmi->num_dev_node; i++) {
+		rc = of_property_read_u32(spmi->dev_node[i].of_node,
+						"qcom,pin-num", &gpio);
+		if (rc) {
+			dev_err(&spmi->dev, "%s: unable to get qcom,pin-num property\n",
+								__func__);
+			goto err_probe;
+		}
+
+		if (gpio < lowest_gpio)
+			lowest_gpio = gpio;
+		if (gpio > highest_gpio)
+			highest_gpio = gpio;
+	}
+
+	if (highest_gpio < lowest_gpio) {
+		dev_err(&spmi->dev, "%s: no device nodes specified in topology\n",
+								__func__);
+		rc = -EINVAL;
+		goto err_probe;
+	} else if (lowest_gpio == 0) {
+		dev_err(&spmi->dev, "%s: 0 is not a valid PMIC GPIO\n",
+								__func__);
+		rc = -EINVAL;
+		goto err_probe;
+	}
+
+	q_chip->pmic_pin_lowest = lowest_gpio;
+	q_chip->pmic_pin_highest = highest_gpio;
+
+	/* allocate gpio lookup tables */
+	q_chip->pmic_pins = kzalloc(sizeof(struct qpnp_pin_spec *) *
+						highest_gpio - lowest_gpio + 1,
+						GFP_KERNEL);
+	q_chip->chip_gpios = kzalloc(sizeof(struct qpnp_pin_spec *) *
+						spmi->num_dev_node, GFP_KERNEL);
+	if (!q_chip->pmic_pins || !q_chip->chip_gpios) {
+		dev_err(&spmi->dev, "%s: unable to allocate memory\n",
+								__func__);
+		rc = -ENOMEM;
+		goto err_probe;
+	}
+
+	/* get interrupt controller device_node */
+	q_chip->int_ctrl = of_irq_find_parent(spmi->dev.of_node);
+	if (!q_chip->int_ctrl) {
+		dev_err(&spmi->dev, "%s: Can't find interrupt parent\n",
+								__func__);
+		rc = -EINVAL;
+		goto err_probe;
+	}
+
+	/* now scan through again and populate the lookup table */
+	for (i = 0; i < spmi->num_dev_node; i++) {
+		d_node = &spmi->dev_node[i];
+		res = spmi_get_resource(spmi, d_node, IORESOURCE_MEM, 0);
+		if (!res) {
+			dev_err(&spmi->dev, "%s: node %s is missing has no base address definition\n",
+				__func__, d_node->of_node->full_name);
+		}
+
+		rc = of_property_read_u32(d_node->of_node,
+							"qcom,pin-num", &gpio);
+		if (rc) {
+			dev_err(&spmi->dev, "%s: unable to get qcom,pin-num property\n",
+								__func__);
+			goto err_probe;
+		}
+
+		q_spec = kzalloc(sizeof(struct qpnp_pin_spec),
+							GFP_KERNEL);
+		if (!q_spec) {
+			dev_err(&spmi->dev, "%s: unable to allocate memory\n",
+								__func__);
+			rc = -ENOMEM;
+			goto err_probe;
+		}
+
+		q_spec->slave = spmi->sid;
+		q_spec->offset = res->start;
+		q_spec->gpio_chip_idx = i;
+		q_spec->pmic_pin = gpio;
+		q_spec->node = d_node->of_node;
+		q_spec->q_chip = q_chip;
+
+		rc = spmi_ext_register_readl(spmi->ctrl, q_spec->slave,
+				Q_REG_ADDR(q_spec, Q_REG_TYPE), &buf[0], 2);
+		if (rc) {
+			dev_err(&spmi->dev, "%s: unable to read type regs\n",
+						__func__);
+			goto err_probe;
+		}
+		q_spec->type	= buf[0];
+		q_spec->subtype = buf[1];
+
+		rc = qpnp_pin_ctl_regs_init(q_spec);
+		if (rc)
+			goto err_probe;
+
+		/* call into irq_domain to get irq mapping */
+		intspec[0] = q_chip->spmi->sid;
+		intspec[1] = (q_spec->offset >> 8) & 0xFF;
+		intspec[2] = 0;
+		q_spec->irq = irq_create_of_mapping(q_chip->int_ctrl,
+							intspec, 3);
+		if (!q_spec->irq) {
+			dev_err(&spmi->dev, "%s: invalid irq for gpio %u\n",
+								__func__, gpio);
+			rc = -EINVAL;
+			goto err_probe;
+		}
+		/* initialize lookup table params */
+		qpnp_pmic_pin_set_spec(q_chip, gpio, q_spec);
+		qpnp_chip_gpio_set_spec(q_chip, i, q_spec);
+	}
+
+	q_chip->gpio_chip.base = -1;
+	q_chip->gpio_chip.ngpio = spmi->num_dev_node;
+	q_chip->gpio_chip.label = dev_name;
+	q_chip->gpio_chip.direction_input = qpnp_pin_direction_input;
+	q_chip->gpio_chip.direction_output = qpnp_pin_direction_output;
+	q_chip->gpio_chip.to_irq = qpnp_pin_to_irq;
+	q_chip->gpio_chip.get = qpnp_pin_get;
+	q_chip->gpio_chip.set = qpnp_pin_set;
+	q_chip->gpio_chip.dev = &spmi->dev;
+	q_chip->gpio_chip.of_xlate = qpnp_pin_of_gpio_xlate;
+	q_chip->gpio_chip.of_gpio_n_cells = 2;
+	q_chip->gpio_chip.can_sleep = 0;
+
+	rc = gpiochip_add(&q_chip->gpio_chip);
+	if (rc) {
+		dev_err(&spmi->dev, "%s: Can't add gpio chip, rc = %d\n",
+								__func__, rc);
+		goto err_probe;
+	}
+
+	/* now configure gpio config defaults if they exist */
+	for (i = 0; i < spmi->num_dev_node; i++) {
+		q_spec = qpnp_chip_gpio_get_spec(q_chip, i);
+		if (WARN_ON(!q_spec)) {
+			rc = -ENODEV;
+			goto err_probe;
+		}
+
+		rc = qpnp_pin_cache_regs(q_chip, q_spec);
+		if (rc)
+			goto err_probe;
+
+		rc = qpnp_pin_apply_config(q_chip, q_spec);
+		if (rc)
+			goto err_probe;
+	}
+
+	dev_dbg(&spmi->dev, "%s: gpio_chip registered between %d-%u\n",
+			__func__, q_chip->gpio_chip.base,
+			(q_chip->gpio_chip.base + q_chip->gpio_chip.ngpio) - 1);
+
+	rc = qpnp_pin_debugfs_create(q_chip);
+	if (rc) {
+		dev_err(&spmi->dev, "%s: debugfs creation failed\n", __func__);
+		goto err_probe;
+	}
+
+	return 0;
+
+err_probe:
+	qpnp_pin_free_chip(q_chip);
+	return rc;
+}
+
+static int qpnp_pin_remove(struct spmi_device *spmi)
+{
+	struct qpnp_pin_chip *q_chip = dev_get_drvdata(&spmi->dev);
+
+	debugfs_remove_recursive(q_chip->dfs_dir);
+
+	return qpnp_pin_free_chip(q_chip);
+}
+
+static struct of_device_id spmi_match_table[] = {
+	{	.compatible = "qcom,qpnp-pin",
+	},
+	{}
+};
+
+static const struct spmi_device_id qpnp_pin_id[] = {
+	{ "qcom,qpnp-pin", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(spmi, qpnp_pin_id);
+
+static struct spmi_driver qpnp_pin_driver = {
+	.driver		= {
+		.name	= "qcom,qpnp-pin",
+		.of_match_table = spmi_match_table,
+	},
+	.probe		= qpnp_pin_probe,
+	.remove		= qpnp_pin_remove,
+	.id_table	= qpnp_pin_id,
+};
+
+static int __init qpnp_pin_init(void)
+{
+#ifdef CONFIG_GPIO_QPNP_PIN_DEBUG
+	driver_dfs_dir = debugfs_create_dir("qpnp_pin", NULL);
+	if (driver_dfs_dir == NULL)
+		pr_err("Cannot register top level debugfs directory\n");
+#endif
+
+	return spmi_driver_register(&qpnp_pin_driver);
+}
+
+static void __exit qpnp_pin_exit(void)
+{
+#ifdef CONFIG_GPIO_QPNP_PIN_DEBUG
+	debugfs_remove_recursive(driver_dfs_dir);
+#endif
+	spmi_driver_unregister(&qpnp_pin_driver);
+}
+
+MODULE_DESCRIPTION("QPNP PMIC gpio driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(qpnp_pin_init);
+module_exit(qpnp_pin_exit);
diff --git a/drivers/gpu/msm/a3xx_reg.h b/drivers/gpu/msm/a3xx_reg.h
index bd58b4e..f5ee1d7 100644
--- a/drivers/gpu/msm/a3xx_reg.h
+++ b/drivers/gpu/msm/a3xx_reg.h
@@ -509,6 +509,6 @@
 #define RBBM_BLOCK_ID_MARB_3           0x2b
 
 /* RBBM_CLOCK_CTL default value */
-#define A3XX_RBBM_CLOCK_CTL_DEFAULT 0x00000000
+#define A3XX_RBBM_CLOCK_CTL_DEFAULT 0xBFFFFFFF
 
 #endif
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 4991a2e..f5cb888 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -267,7 +267,7 @@
 				KGSL_IOMMU_CONTEXT_USER))
 		goto done;
 
-	if (adreno_is_a225(adreno_dev))
+	if (cpu_is_msm8960())
 		cmds += adreno_add_change_mh_phys_limit_cmds(cmds, 0xFFFFF000,
 					device->mmu.setstate_memory.gpuaddr +
 					KGSL_IOMMU_SETSTATE_NOP_OFFSET);
@@ -362,7 +362,7 @@
 		}
 	}
 
-	if (adreno_is_a225(adreno_dev))
+	if (cpu_is_msm8960())
 		cmds += adreno_add_change_mh_phys_limit_cmds(cmds,
 			reg_map_desc[num_iommu_units - 1]->gpuaddr - PAGE_SIZE,
 			device->mmu.setstate_memory.gpuaddr +
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 58a0963..a6b4210 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2558,11 +2558,6 @@
 {
 	struct kgsl_device *device = &adreno_dev->dev;
 
-	/* Reset the core */
-	adreno_regwrite(device, A3XX_RBBM_SW_RESET_CMD,
-		0x00000001);
-	msleep(20);
-
 	/* Set up 16 deep read/write request queues */
 
 	adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
@@ -2612,6 +2607,10 @@
 	adreno_regwrite(device, A3XX_RBBM_INTERFACE_HANG_INT_CTL,
 			(1 << 16) | 0xFFF);
 
+	/* Enable Clock gating */
+	adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL,
+			A3XX_RBBM_CLOCK_CTL_DEFAULT);
+
 }
 
 /* Defined in adreno_a3xx_snapshot.c */
diff --git a/drivers/gpu/msm/adreno_a3xx_snapshot.c b/drivers/gpu/msm/adreno_a3xx_snapshot.c
index 60aab64..a3bee4d 100644
--- a/drivers/gpu/msm/adreno_a3xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a3xx_snapshot.c
@@ -285,6 +285,9 @@
 			remain, REG_CP_ME_CNTL, REG_CP_ME_STATUS,
 			64, 44);
 
+	/* Disable Clock gating temporarily for the debug bus to work */
+	adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL, 0x00);
+
 	/* VPC memory */
 	snapshot = kgsl_snapshot_add_section(device,
 			KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
@@ -321,5 +324,9 @@
 
 	snapshot = a3xx_snapshot_debugbus(device, snapshot, remain);
 
+	/* Enable Clock gating */
+	adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL,
+			A3XX_RBBM_CLOCK_CTL_DEFAULT);
+
 	return snapshot;
 }
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index e42c7b6..d20cf7e 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -17,6 +17,7 @@
 #include <linux/slab.h>
 #include <linux/iommu.h>
 #include <linux/msm_kgsl.h>
+#include <mach/socinfo.h>
 
 #include "kgsl.h"
 #include "kgsl_device.h"
@@ -268,14 +269,17 @@
 	struct kgsl_iommu *iommu = mmu->priv;
 	int i, j;
 
-	BUG_ON(mmu->hwpagetable == NULL);
-	BUG_ON(mmu->hwpagetable->priv == NULL);
-
-	iommu_pt = mmu->hwpagetable->priv;
-
 	for (i = 0; i < iommu->unit_count; i++) {
 		struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+		iommu_pt = mmu->defaultpagetable->priv;
 		for (j = 0; j < iommu_unit->dev_count; j++) {
+			/*
+			 * If there is a 2nd default pagetable then priv domain
+			 * is attached with this pagetable
+			 */
+			if (mmu->priv_bank_table &&
+				(KGSL_IOMMU_CONTEXT_PRIV == j))
+				iommu_pt = mmu->priv_bank_table->priv;
 			if (iommu_unit->dev[j].attached) {
 				iommu_detach_device(iommu_pt->domain,
 						iommu_unit->dev[j].dev);
@@ -307,18 +311,21 @@
 	struct kgsl_iommu *iommu = mmu->priv;
 	int i, j, ret = 0;
 
-	BUG_ON(mmu->hwpagetable == NULL);
-	BUG_ON(mmu->hwpagetable->priv == NULL);
-
-	iommu_pt = mmu->hwpagetable->priv;
-
 	/*
 	 * Loop through all the iommu devcies under all iommu units and
 	 * attach the domain
 	 */
 	for (i = 0; i < iommu->unit_count; i++) {
 		struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+		iommu_pt = mmu->defaultpagetable->priv;
 		for (j = 0; j < iommu_unit->dev_count; j++) {
+			/*
+			 * If there is a 2nd default pagetable then priv domain
+			 * is attached to this pagetable
+			 */
+			if (mmu->priv_bank_table &&
+				(KGSL_IOMMU_CONTEXT_PRIV == j))
+				iommu_pt = mmu->priv_bank_table->priv;
 			if (!iommu_unit->dev[j].attached) {
 				ret = iommu_attach_device(iommu_pt->domain,
 							iommu_unit->dev[j].dev);
@@ -614,17 +621,32 @@
 	int i = 0;
 	struct kgsl_iommu *iommu = mmu->priv;
 	struct kgsl_iommu_pt *iommu_pt;
+	struct kgsl_pagetable *pagetable = NULL;
 
+	/* If chip is not 8960 then we use the 2nd context bank for pagetable
+	 * switching on the 3D side for which a separate table is allocated */
+	if (!cpu_is_msm8960()) {
+		mmu->priv_bank_table =
+			kgsl_mmu_getpagetable(KGSL_MMU_PRIV_BANK_TABLE_NAME);
+		if (mmu->priv_bank_table == NULL) {
+			status = -ENOMEM;
+			goto err;
+		}
+		iommu_pt = mmu->priv_bank_table->priv;
+		iommu_pt->asid = 1;
+	}
 	mmu->defaultpagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
 	/* Return error if the default pagetable doesn't exist */
 	if (mmu->defaultpagetable == NULL) {
 		status = -ENOMEM;
 		goto err;
 	}
+	pagetable = mmu->priv_bank_table ? mmu->priv_bank_table :
+				mmu->defaultpagetable;
 	/* Map the IOMMU regsiters to only defaultpagetable */
 	for (i = 0; i < iommu->unit_count; i++) {
 		iommu->iommu_units[i].reg_map.priv |= KGSL_MEMFLAGS_GLOBAL;
-		status = kgsl_mmu_map(mmu->defaultpagetable,
+		status = kgsl_mmu_map(pagetable,
 			&(iommu->iommu_units[i].reg_map),
 			GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
 		if (status) {
@@ -644,10 +666,14 @@
 	return status;
 err:
 	for (i--; i >= 0; i--) {
-		kgsl_mmu_unmap(mmu->defaultpagetable,
+		kgsl_mmu_unmap(pagetable,
 				&(iommu->iommu_units[i].reg_map));
 		iommu->iommu_units[i].reg_map.priv &= ~KGSL_MEMFLAGS_GLOBAL;
 	}
+	if (mmu->priv_bank_table) {
+		kgsl_mmu_putpagetable(mmu->priv_bank_table);
+		mmu->priv_bank_table = NULL;
+	}
 	if (mmu->defaultpagetable) {
 		kgsl_mmu_putpagetable(mmu->defaultpagetable);
 		mmu->defaultpagetable = NULL;
@@ -669,9 +695,9 @@
 		if (status)
 			return -ENOMEM;
 	}
-	/* We use the GPU MMU to control access to IOMMU registers on a225,
-	 * hence we still keep the MMU active on a225 */
-	if (adreno_is_a225(ADRENO_DEVICE(mmu->device))) {
+	/* We use the GPU MMU to control access to IOMMU registers on 8960 with
+	 * a225, hence we still keep the MMU active on 8960 */
+	if (cpu_is_msm8960()) {
 		struct kgsl_mh *mh = &(mmu->device->mh);
 		kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000001);
 		kgsl_regwrite(mmu->device, MH_MMU_MPU_END,
@@ -707,6 +733,12 @@
 	 */
 	for (i = 0; i < iommu->unit_count; i++) {
 		struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+		/* Make sure that the ASID of the priv bank is set to 1.
+		 * When we a different pagetable for the priv bank then the
+		 * iommu driver sets the ASID to 0 instead of 1 */
+		KGSL_IOMMU_SET_IOMMU_REG(iommu->iommu_units[i].reg_map.hostptr,
+					KGSL_IOMMU_CONTEXT_PRIV,
+					CONTEXTIDR, 1);
 		for (j = 0; j < iommu_unit->dev_count; j++)
 			iommu_unit->dev[j].pt_lsb = KGSL_IOMMMU_PT_LSB(
 						KGSL_IOMMU_GET_IOMMU_REG(
@@ -816,14 +848,19 @@
 	struct kgsl_iommu *iommu = mmu->priv;
 	int i;
 	for (i = 0; i < iommu->unit_count; i++) {
+		struct kgsl_pagetable *pagetable = (mmu->priv_bank_table ?
+			mmu->priv_bank_table : mmu->defaultpagetable);
 		if (iommu->iommu_units[i].reg_map.gpuaddr)
-			kgsl_mmu_unmap(mmu->defaultpagetable,
+			kgsl_mmu_unmap(pagetable,
 			&(iommu->iommu_units[i].reg_map));
 		if (iommu->iommu_units[i].reg_map.hostptr)
 			iounmap(iommu->iommu_units[i].reg_map.hostptr);
 		kgsl_sg_free(iommu->iommu_units[i].reg_map.sg,
 				iommu->iommu_units[i].reg_map.sglen);
 	}
+
+	if (mmu->priv_bank_table)
+		kgsl_mmu_putpagetable(mmu->priv_bank_table);
 	if (mmu->defaultpagetable)
 		kgsl_mmu_putpagetable(mmu->defaultpagetable);
 	kfree(iommu->asids);
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 5216b34..dfaadba 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -39,7 +39,8 @@
 	/* For IOMMU only unmap the global structures to global pt */
 	if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
 		(KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
-		(KGSL_MMU_GLOBAL_PT !=  pt->name))
+		(KGSL_MMU_GLOBAL_PT !=  pt->name) &&
+		(KGSL_MMU_PRIV_BANK_TABLE_NAME !=  pt->name))
 		return 0;
 	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
 		struct kgsl_device *device = kgsl_driver.devp[i];
@@ -58,7 +59,8 @@
 	/* For IOMMU only map the global structures to global pt */
 	if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
 		(KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
-		(KGSL_MMU_GLOBAL_PT !=  pt->name))
+		(KGSL_MMU_GLOBAL_PT !=  pt->name) &&
+		(KGSL_MMU_PRIV_BANK_TABLE_NAME !=  pt->name))
 		return 0;
 	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
 		struct kgsl_device *device = kgsl_driver.devp[i];
@@ -453,9 +455,9 @@
 	 * just once from this pool of the defaultpagetable
 	 */
 	if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) &&
-		(KGSL_MMU_GLOBAL_PT == name)) {
-		pagetable->kgsl_pool = gen_pool_create(KGSL_MMU_ALIGN_SHIFT,
-						       -1);
+		((KGSL_MMU_GLOBAL_PT == name) ||
+		(KGSL_MMU_PRIV_BANK_TABLE_NAME == name))) {
+		pagetable->kgsl_pool = gen_pool_create(PAGE_SHIFT, -1);
 		if (pagetable->kgsl_pool == NULL) {
 			KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
 					KGSL_MMU_ALIGN_SHIFT);
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 2db327b..4c0c015 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -29,6 +29,7 @@
    as an identifier */
 
 #define KGSL_MMU_GLOBAL_PT 0
+#define KGSL_MMU_PRIV_BANK_TABLE_NAME 0xFFFFFFFF
 
 struct kgsl_device;
 
@@ -165,6 +166,8 @@
 	struct kgsl_memdesc    setstate_memory;
 	/* current page table object being used by device mmu */
 	struct kgsl_pagetable  *defaultpagetable;
+	/* pagetable object used for priv bank of IOMMU */
+	struct kgsl_pagetable  *priv_bank_table;
 	struct kgsl_pagetable  *hwpagetable;
 	const struct kgsl_mmu_ops *mmu_ops;
 	void *priv;
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index d55d476..409fe40 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -785,9 +785,6 @@
 	case KGSL_STATE_NAP:
 	case KGSL_STATE_SLEEP:
 		del_timer_sync(&device->idle_timer);
-		if (!device->pwrctrl.strtstp_sleepwake)
-			kgsl_pwrctrl_pwrlevel_change(device,
-					KGSL_PWRLEVEL_NOMINAL);
 		device->pwrctrl.restore_slumber = true;
 		device->ftbl->suspend_context(device);
 		device->ftbl->stop(device);
diff --git a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
index ad1e7ed..04896be 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
@@ -114,8 +114,7 @@
 {
 	struct tz_priv *priv = pwrscale->priv;
 	if (device->state != KGSL_STATE_NAP &&
-		priv->governor == TZ_GOVERNOR_ONDEMAND &&
-		device->pwrctrl.restore_slumber == 0)
+		priv->governor == TZ_GOVERNOR_ONDEMAND)
 		kgsl_pwrctrl_pwrlevel_change(device,
 					device->pwrctrl.default_pwrlevel);
 }
diff --git a/drivers/iommu/msm_iommu-v2.c b/drivers/iommu/msm_iommu-v2.c
index 2ae9f28..6e62e60 100644
--- a/drivers/iommu/msm_iommu-v2.c
+++ b/drivers/iommu/msm_iommu-v2.c
@@ -25,7 +25,7 @@
 #include <linux/scatterlist.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
-
+#include <linux/regulator/consumer.h>
 #include <asm/sizes.h>
 
 #include <mach/iommu_hw-v2.h>
@@ -383,10 +383,16 @@
 		goto fail;
 	}
 
-	ret = __enable_clocks(iommu_drvdata);
+	ret = regulator_enable(iommu_drvdata->gdsc);
 	if (ret)
 		goto fail;
 
+	ret = __enable_clocks(iommu_drvdata);
+	if (ret) {
+		regulator_disable(iommu_drvdata->gdsc);
+		goto fail;
+	}
+
 	if (!msm_iommu_ctx_attached(dev->parent))
 		__program_iommu(iommu_drvdata->base);
 
@@ -431,6 +437,8 @@
 	__reset_context(iommu_drvdata->base, ctx_drvdata->num);
 	__disable_clocks(iommu_drvdata);
 
+	regulator_disable(iommu_drvdata->gdsc);
+
 	list_del_init(&ctx_drvdata->attached_elm);
 	ctx_drvdata->attached_domain = NULL;
 
diff --git a/drivers/iommu/msm_iommu_dev-v2.c b/drivers/iommu/msm_iommu_dev-v2.c
index d6858de..87e1a46 100644
--- a/drivers/iommu/msm_iommu_dev-v2.c
+++ b/drivers/iommu/msm_iommu_dev-v2.c
@@ -75,6 +75,10 @@
 	if (!drvdata->base)
 		return -ENOMEM;
 
+	drvdata->gdsc = devm_regulator_get(&pdev->dev, "vdd");
+	if (IS_ERR(drvdata->gdsc))
+		return -EINVAL;
+
 	drvdata->pclk = clk_get(&pdev->dev, "iface_clk");
 	if (IS_ERR(drvdata->pclk))
 		return PTR_ERR(drvdata->pclk);
diff --git a/drivers/media/radio/radio-iris.c b/drivers/media/radio/radio-iris.c
index 53eb85c..23d11c3 100644
--- a/drivers/media/radio/radio-iris.c
+++ b/drivers/media/radio/radio-iris.c
@@ -88,6 +88,7 @@
 	unsigned char power_mode;
 	int search_on;
 	unsigned int tone_freq;
+	unsigned char spur_table_size;
 	unsigned char g_scan_time;
 	unsigned int g_antenna;
 	unsigned int g_rds_grp_proc_ps;
@@ -101,11 +102,13 @@
 	struct hci_fm_sig_threshold_rsp sig_th;
 	struct hci_fm_ch_det_threshold ch_det_threshold;
 	struct hci_fm_data_rd_rsp default_data;
+	struct hci_fm_spur_data spur_data;
 };
 
 static struct video_device *priv_videodev;
 static int iris_do_calibration(struct iris_device *radio);
 
+static int update_spur_table(struct iris_device *radio);
 static struct v4l2_queryctrl iris_v4l2_queryctrl[] = {
 	{
 	.id	= V4L2_CID_AUDIO_VOLUME,
@@ -2896,6 +2899,7 @@
 				FMDERR("get frequency failed %d\n", retval);
 			break;
 		case FM_OFF:
+			radio->spur_table_size = 0;
 			switch (radio->mode) {
 			case FM_RECV:
 				retval = hci_cmd(HCI_FM_DISABLE_RECV_CMD,
@@ -3248,12 +3252,116 @@
 		*/
 		retval = 0;
 		break;
+	case V4L2_CID_PRIVATE_SPUR_FREQ:
+		if (radio->spur_table_size >= MAX_SPUR_FREQ_LIMIT) {
+			FMDERR("%s: Spur Table Full!\n", __func__);
+			retval = -1;
+		} else
+			radio->spur_data.freq[radio->spur_table_size] =
+				ctrl->value;
+		break;
+	case V4L2_CID_PRIVATE_SPUR_FREQ_RMSSI:
+		if (radio->spur_table_size >= MAX_SPUR_FREQ_LIMIT) {
+			FMDERR("%s: Spur Table Full!\n", __func__);
+			retval = -1;
+		} else
+			radio->spur_data.rmssi[radio->spur_table_size] =
+				ctrl->value;
+		break;
+	case V4L2_CID_PRIVATE_SPUR_SELECTION:
+		if (radio->spur_table_size >= MAX_SPUR_FREQ_LIMIT) {
+			FMDERR("%s: Spur Table Full!\n", __func__);
+			retval = -1;
+		} else {
+			radio->spur_data.enable[radio->spur_table_size] =
+				ctrl->value;
+			radio->spur_table_size++;
+		}
+		break;
+	case V4L2_CID_PRIVATE_UPDATE_SPUR_TABLE:
+		update_spur_table(radio);
+		break;
 	default:
 		retval = -EINVAL;
 	}
 	return retval;
 }
 
+static int update_spur_table(struct iris_device *radio)
+{
+	struct hci_fm_def_data_wr_req default_data;
+	int len = 0, index = 0, offset = 0, i = 0;
+	int retval = 0, temp = 0, cnt = 0;
+
+	memset(&default_data, 0, sizeof(default_data));
+
+	/* Pass the mode of SPUR_CLK */
+	default_data.mode = CKK_SPUR;
+
+	temp = radio->spur_table_size;
+	for (cnt = 0; cnt < (temp / 5); cnt++) {
+		offset = 0;
+		/*
+		 * Program the spur entries in spur table in following order:
+		 *    Spur index
+		 *    Length of the spur data
+		 *    Spur Data:
+		 *        MSB of the spur frequency
+		 *        LSB of the spur frequency
+		 *        Enable/Disable the spur frequency
+		 *        RMSSI value of the spur frequency
+		 */
+		default_data.data[offset++] = ENTRY_0 + cnt;
+		for (i = 0; i < SPUR_ENTRIES_PER_ID; i++) {
+			default_data.data[offset++] = GET_FREQ(COMPUTE_SPUR(
+				radio->spur_data.freq[index]), 0);
+			default_data.data[offset++] = GET_FREQ(COMPUTE_SPUR(
+				radio->spur_data.freq[index]), 1);
+			default_data.data[offset++] =
+				radio->spur_data.enable[index];
+			default_data.data[offset++] =
+				radio->spur_data.rmssi[index];
+			index++;
+		}
+		len = (SPUR_ENTRIES_PER_ID * SPUR_DATA_SIZE);
+		default_data.length = (len + 1);
+		retval = hci_def_data_write(&default_data, radio->fm_hdev);
+		if (retval < 0) {
+			FMDBG("%s: Failed to configure entries for ID : %d\n",
+				__func__, default_data.data[0]);
+			return retval;
+		}
+	}
+
+	/* Compute balance SPUR frequencies to be programmed */
+	temp %= SPUR_ENTRIES_PER_ID;
+	if (temp > 0) {
+		offset = 0;
+		default_data.data[offset++] = (radio->spur_table_size / 5);
+		for (i = 0; i < temp; i++) {
+			default_data.data[offset++] = GET_FREQ(COMPUTE_SPUR(
+				radio->spur_data.freq[index]), 0);
+			default_data.data[offset++] = GET_FREQ(COMPUTE_SPUR(
+				radio->spur_data.freq[index]), 1);
+			default_data.data[offset++] =
+				radio->spur_data.enable[index];
+			default_data.data[offset++] =
+				radio->spur_data.rmssi[index];
+			index++;
+		}
+		len = (temp * SPUR_DATA_SIZE);
+		default_data.length = (len + 1);
+		retval = hci_def_data_write(&default_data, radio->fm_hdev);
+		if (retval < 0) {
+			FMDERR("%s: Failed to configure entries for ID : %d\n",
+				__func__, default_data.data[0]);
+			return retval;
+		}
+	}
+
+	return retval;
+}
+
 static int iris_vidioc_g_tuner(struct file *file, void *priv,
 		struct v4l2_tuner *tuner)
 {
diff --git a/drivers/media/radio/radio-tavarua.c b/drivers/media/radio/radio-tavarua.c
index 971cf10..a2993ce 100644
--- a/drivers/media/radio/radio-tavarua.c
+++ b/drivers/media/radio/radio-tavarua.c
@@ -996,6 +996,10 @@
 			FMDBG("read PHY_TXGAIN is successful");
 			complete(&radio->sync_req_done);
 			break;
+		case (XFR_EXT | 0x80):
+			FMDBG("Set tone generator successful\n");
+			complete(&radio->sync_req_done);
+			break;
 		case (0x80 | RX_CONFIG):
 		case (0x80 | RADIO_CONFIG):
 		case (0x80 | RDS_CONFIG):
@@ -1214,7 +1218,7 @@
 	unsigned char adie_type_bahma;
 	int retval = 0;
 	unsigned int rdsMask = 0;
-	unsigned char value;
+	unsigned char value = 0;
 
 	adie_type_bahma = is_bahama();
 
@@ -3459,7 +3463,6 @@
 	case V4L2_CID_PRIVATE_SSBI_ACCS_ADDR:
 	case V4L2_CID_PRIVATE_SSBI_PEEK:
 	case V4L2_CID_PRIVATE_SSBI_POKE:
-	case V4L2_CID_PRIVATE_TX_TONE:
 	case V4L2_CID_PRIVATE_RDS_GRP_COUNTERS:
 	case V4L2_CID_PRIVATE_SET_NOTCH_FILTER:
 	case V4L2_CID_PRIVATE_TAVARUA_DO_CALIBRATION:
@@ -3479,6 +3482,54 @@
 	case V4L2_CID_PRIVATE_UPDATE_SPUR_TABLE:
 		retval = update_spur_table(radio);
 		break;
+	case V4L2_CID_PRIVATE_TX_TONE:
+		retval = 0;
+		memset(xfr_buf, 0, sizeof(xfr_buf));
+		switch (ctrl->value) {
+		case ONE_KHZ_LR_EQUA_0DBFS:
+			xfr_buf[TONE_CHANNEL_EN_AND_SCALING_BYTE]
+				= TONE_LEFT_RIGHT_CH_ENABLED;
+			xfr_buf[TONE_LEFT_FREQ_BYTE] = 0x01;
+			xfr_buf[TONE_RIGHT_FREQ_BYTE] = 0x01;
+			break;
+		case ONE_KHZ_LEFTONLY_EQUA_0DBFS:
+			xfr_buf[TONE_CHANNEL_EN_AND_SCALING_BYTE]
+				 = TONE_LEFT_CH_ENABLED;
+			xfr_buf[TONE_LEFT_FREQ_BYTE] = 0x01;
+			break;
+		case ONE_KHZ_RIGHTONLY_EQUA_0DBFS:
+			xfr_buf[TONE_CHANNEL_EN_AND_SCALING_BYTE]
+				 = TONE_RIGHT_CH_ENABLED;
+			xfr_buf[TONE_RIGHT_FREQ_BYTE] = 0x01;
+			break;
+		case ONE_KHZ_LR_EQUA_l8DBFS:
+			xfr_buf[TONE_CHANNEL_EN_AND_SCALING_BYTE]
+				 = (LSH_DATA(TONE_SCALE_IND_12,
+						 TONE_SCALING_SHIFT)
+					 | TONE_LEFT_RIGHT_CH_ENABLED);
+			xfr_buf[TONE_LEFT_FREQ_BYTE] = 0x01;
+			xfr_buf[TONE_RIGHT_FREQ_BYTE] = 0x01;
+			break;
+		case FIFTEEN_KHZ_LR_EQUA_l8DBFS:
+			xfr_buf[TONE_CHANNEL_EN_AND_SCALING_BYTE]
+				 = (LSH_DATA(TONE_SCALE_IND_12,
+						 TONE_SCALING_SHIFT)
+					 | TONE_LEFT_RIGHT_CH_ENABLED);
+			xfr_buf[TONE_LEFT_FREQ_BYTE] = 0x0F;
+			xfr_buf[TONE_RIGHT_FREQ_BYTE] = 0x0F;
+			break;
+		default:
+			retval = -1;
+			FMDERR("tone generator value not valid\n");
+			break;
+		}
+		if (retval >= 0) {
+			xfr_buf[TONE_GEN_CTRL_BYTE] = 0x01;
+			retval = sync_write_xfr(radio, XFR_EXT, xfr_buf);
+		}
+		if (retval < 0)
+			FMDERR("Tone generator failed\n");
+		break;
 	default:
 		retval = -EINVAL;
 	}
diff --git a/drivers/media/video/msm/actuators/msm_actuator.c b/drivers/media/video/msm/actuators/msm_actuator.c
index 50399de..774a46d 100644
--- a/drivers/media/video/msm/actuators/msm_actuator.c
+++ b/drivers/media/video/msm/actuators/msm_actuator.c
@@ -82,7 +82,7 @@
 				i2c_byte1 = write_arr[i].reg_addr;
 				i2c_byte2 = value;
 				if (size != (i+1)) {
-					i2c_byte2 = (i2c_byte2 & 0xFF00) >> 8;
+					i2c_byte2 = value & 0xFF;
 					CDBG("%s: byte1:0x%x, byte2:0x%x\n",
 					__func__, i2c_byte1, i2c_byte2);
 					rc = msm_camera_i2c_write(
@@ -97,7 +97,7 @@
 
 					i++;
 					i2c_byte1 = write_arr[i].reg_addr;
-					i2c_byte2 = value & 0xFF;
+					i2c_byte2 = (value & 0xFF00) >> 8;
 				}
 			} else {
 				i2c_byte1 = (value & 0xFF00) >> 8;
diff --git a/drivers/media/video/msm/gemini/msm_gemini_sync.c b/drivers/media/video/msm/gemini/msm_gemini_sync.c
index b55ec18..ae3de13 100644
--- a/drivers/media/video/msm/gemini/msm_gemini_sync.c
+++ b/drivers/media/video/msm/gemini/msm_gemini_sync.c
@@ -280,6 +280,7 @@
 		GMN_DBG("%s:%d] no output return buffer\n", __func__,
 			__LINE__);
 		rc = -1;
+		return rc;
 	}
 
 	buf_out = msm_gemini_q_out(&pgmn_dev->output_buf_q);
diff --git a/drivers/media/video/msm/mercury/msm_mercury_platform.c b/drivers/media/video/msm/mercury/msm_mercury_platform.c
index 9366ef3..67ce82d 100644
--- a/drivers/media/video/msm/mercury/msm_mercury_platform.c
+++ b/drivers/media/video/msm/mercury/msm_mercury_platform.c
@@ -11,7 +11,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/pm_qos_params.h>
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/android_pmem.h>
diff --git a/drivers/media/video/msm/msm_isp.c b/drivers/media/video/msm/msm_isp.c
index 834c9b0..9b42f9b 100644
--- a/drivers/media/video/msm/msm_isp.c
+++ b/drivers/media/video/msm/msm_isp.c
@@ -136,6 +136,15 @@
 			image_mode = -1;
 			break;
 		}
+	} else if (vfe_msg == VFE_MSG_OUTPUT_TERTIARY1) {
+		switch (pmctl->vfe_output_mode) {
+		case VFE_OUTPUTS_RDI0:
+			image_mode = MSM_V4L2_EXT_CAPTURE_MODE_PREVIEW;
+			break;
+		default:
+			image_mode = -1;
+			break;
+		}
 	} else
 		image_mode = -1;
 
@@ -331,6 +340,9 @@
 			case MSG_ID_OUTPUT_SECONDARY:
 				msgid = VFE_MSG_OUTPUT_SECONDARY;
 				break;
+			case MSG_ID_OUTPUT_TERTIARY1:
+				msgid = VFE_MSG_OUTPUT_TERTIARY1;
+				break;
 			default:
 				pr_err("%s: Invalid VFE output id: %d\n",
 					   __func__, isp_output->output_id);
@@ -673,6 +685,7 @@
 	case CMD_AXI_CFG_PRIM_ALL_CHNLS|CMD_AXI_CFG_SEC:
 	case CMD_AXI_START:
 	case CMD_AXI_STOP:
+	case CMD_AXI_CFG_TERT1:
 		/* Dont need to pass buffer information.
 		 * subdev will get the buffer from media
 		 * controller free queue.
diff --git a/drivers/media/video/msm/msm_mctl_buf.c b/drivers/media/video/msm/msm_mctl_buf.c
index eade6f1..cd86a80 100644
--- a/drivers/media/video/msm/msm_mctl_buf.c
+++ b/drivers/media/video/msm/msm_mctl_buf.c
@@ -116,6 +116,10 @@
 	}
 	buf_idx = vb->v4l2_buf.index;
 	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+	if (pmctl == NULL) {
+		pr_err("%s No mctl found\n", __func__);
+		return -EINVAL;
+	}
 	for (i = 0; i < vb->num_planes; i++) {
 		mem = vb2_plane_cookie(vb, i);
 		if (buf_type == VIDEOBUF2_MULTIPLE_PLANES)
@@ -147,13 +151,14 @@
 	struct msm_cam_v4l2_dev_inst *pcam_inst;
 	struct msm_cam_v4l2_device *pcam;
 	struct msm_frame_buffer *buf;
-	struct vb2_queue	*vq = vb->vb2_queue;
+	struct vb2_queue *vq;
 
 	D("%s\n", __func__);
-	if (!vb || !vq) {
+	if (!vb || !vb->vb2_queue) {
 		pr_err("%s error : input is NULL\n", __func__);
 		return -EINVAL;
 	}
+	vq = vb->vb2_queue;
 	pcam_inst = vb2_get_drv_priv(vq);
 	pcam = pcam_inst->pcam;
 	buf = container_of(vb, struct msm_frame_buffer, vidbuf);
@@ -207,6 +212,12 @@
 	pcam = pcam_inst->pcam;
 	buf = container_of(vb, struct msm_frame_buffer, vidbuf);
 
+	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+	if (pmctl == NULL) {
+		pr_err("%s No mctl found\n", __func__);
+		return;
+	}
+
 	if (pcam_inst->vid_fmt.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 		for (i = 0; i < vb->num_planes; i++) {
 			mem = vb2_plane_cookie(vb, i);
@@ -251,7 +262,6 @@
 		}
 		spin_unlock_irqrestore(&pcam_inst->vq_irqlock, flags);
 	}
-	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
 	for (i = 0; i < vb->num_planes; i++) {
 		mem = vb2_plane_cookie(vb, i);
 		videobuf2_pmem_contig_user_put(mem, pmctl->client);
@@ -274,13 +284,14 @@
 	struct msm_cam_v4l2_dev_inst *pcam_inst = NULL;
 	struct msm_cam_v4l2_device *pcam = NULL;
 	unsigned long flags = 0;
-	struct vb2_queue *vq = vb->vb2_queue;
+	struct vb2_queue *vq;
 	struct msm_frame_buffer *buf;
 	D("%s\n", __func__);
-	if (!vb || !vq) {
+	if (!vb || !vb->vb2_queue) {
 		pr_err("%s error : input is NULL\n", __func__);
 		return ;
 	}
+	vq = vb->vb2_queue;
 	pcam_inst = vb2_get_drv_priv(vq);
 	pcam = pcam_inst->pcam;
 	D("%s pcam_inst=%p,(vb=0x%p),idx=%d,len=%d\n",
@@ -473,6 +484,10 @@
 {
 	struct msm_cam_media_controller *pmctl;
 	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+	if (pmctl == NULL) {
+		pr_err("%s No mctl found\n", __func__);
+		return -EINVAL;
+	}
 	pmctl->mctl_vbqueue_init = msm_vbqueue_init;
 	return 0;
 }
diff --git a/drivers/media/video/msm/msm_vfe32.c b/drivers/media/video/msm/msm_vfe32.c
index 9382292..acff492 100644
--- a/drivers/media/video/msm/msm_vfe32.c
+++ b/drivers/media/video/msm/msm_vfe32.c
@@ -432,8 +432,15 @@
 	axi_ctrl->share_ctrl->outpath.out2.ch1 =
 		0x0000FFFF & (*ch_info++ >> 16);
 	axi_ctrl->share_ctrl->outpath.out2.ch2 = 0x0000FFFF & *ch_info++;
+	axi_ctrl->share_ctrl->outpath.out2.image_mode =
+		0x0000FFFF & (*ch_info++ >> 16);
+
 
 	switch (mode) {
+	case OUTPUT_TERT1:
+		axi_ctrl->share_ctrl->outpath.output_mode =
+			VFE32_OUTPUT_MODE_TERTIARY1;
+		break;
 	case OUTPUT_PRIM:
 		axi_ctrl->share_ctrl->outpath.output_mode =
 			VFE32_OUTPUT_MODE_PRIMARY;
@@ -714,7 +721,7 @@
 
 static void vfe32_start_common(struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	uint32_t irq_mask = 0x00E00021;
+	uint32_t irq_mask = 0x00E00021, irq_mask1;
 	vfe32_ctrl->start_ack_pending = TRUE;
 	CDBG("VFE opertaion mode = 0x%x, output mode = 0x%x\n",
 		vfe32_ctrl->share_ctrl->operation_mode,
@@ -723,19 +730,31 @@
 		irq_mask |= VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK;
 	else
 		irq_mask |= 0x000FE000;
-
+	irq_mask |=
+		msm_camera_io_r(vfe32_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_MASK_0);
 	msm_camera_io_w(irq_mask,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
 	msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
 
+	if (vfe32_ctrl->share_ctrl->operation_mode == VFE_OUTPUTS_RDI0) {
+		irq_mask1 =
+		msm_camera_io_r(vfe32_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_MASK_1);
+		irq_mask1 |= VFE_IRQ_STATUS1_RDI0_REG_UPDATE_MASK;
+		msm_camera_io_w(irq_mask1, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_MASK_1);
+		msm_camera_io_w_mb(2, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_REG_UPDATE_CMD);
+	} else {
+		msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_REG_UPDATE_CMD);
+		msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_CAMIF_COMMAND);
+	}
 	/* Ensure the write order while writing
 	to the command register using the barrier */
-	msm_camera_io_w_mb(1,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
-	msm_camera_io_w_mb(1,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_CAMIF_COMMAND);
-
 	atomic_set(&vfe32_ctrl->share_ctrl->vstate, 1);
 }
 
@@ -989,7 +1008,8 @@
 	struct msm_cam_media_controller *pmctl,
 	struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	uint32_t irq_comp_mask = 0;
+	uint32_t irq_comp_mask = 0, irq_mask = 0;
+
 	irq_comp_mask	=
 		msm_camera_io_r(vfe32_ctrl->share_ctrl->vfebase +
 			VFE_IRQ_COMP_MASK);
@@ -1018,6 +1038,16 @@
 			0x1 << (vfe32_ctrl->share_ctrl->outpath.out1.ch1 + 8) |
 			0x1 << (vfe32_ctrl->share_ctrl->outpath.out1.ch2 + 8));
 	}
+	if (vfe32_ctrl->share_ctrl->outpath.output_mode &
+		VFE32_OUTPUT_MODE_TERTIARY1) {
+		irq_mask = msm_camera_io_r(vfe32_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_MASK_0);
+		irq_mask |= (0x1 << (vfe32_ctrl->share_ctrl->outpath.out2.ch0 +
+			VFE_WM_OFFSET));
+		msm_camera_io_w(irq_mask, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_MASK_0);
+	}
+
 	msm_camera_io_w(irq_comp_mask,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
 
@@ -1235,6 +1265,8 @@
 		ch = &share_ctrl->outpath.out0;
 	else if (path == VFE_MSG_OUTPUT_SECONDARY)
 		ch = &share_ctrl->outpath.out1;
+	else if (path == VFE_MSG_OUTPUT_TERTIARY1)
+		ch = &share_ctrl->outpath.out2;
 	else
 		pr_err("%s: Invalid path %d\n", __func__,
 			path);
@@ -1251,8 +1283,10 @@
 
 	if (path == VFE_MSG_OUTPUT_PRIMARY)
 		image_mode = axi_ctrl->share_ctrl->outpath.out0.image_mode;
-	else
+	else if (path == VFE_MSG_OUTPUT_SECONDARY)
 		image_mode = axi_ctrl->share_ctrl->outpath.out1.image_mode;
+	else if (path == VFE_MSG_OUTPUT_TERTIARY1)
+		image_mode = axi_ctrl->share_ctrl->outpath.out2.image_mode;
 
 	vfe32_subdev_notify(id, path, image_mode,
 		&axi_ctrl->subdev, axi_ctrl->share_ctrl);
@@ -1269,8 +1303,10 @@
 	uint32_t image_mode = 0;
 	if (path == VFE_MSG_OUTPUT_PRIMARY)
 		image_mode = vfe32_ctrl->share_ctrl->outpath.out0.image_mode;
-	else
+	else if (path == VFE_MSG_OUTPUT_SECONDARY)
 		image_mode = vfe32_ctrl->share_ctrl->outpath.out1.image_mode;
+	else if (path == VFE_MSG_OUTPUT_TERTIARY1)
+		image_mode = vfe32_ctrl->share_ctrl->outpath.out2.image_mode;
 
 	vfe32_subdev_notify(id, path, image_mode,
 		&vfe32_ctrl->subdev, vfe32_ctrl->share_ctrl);
@@ -1286,8 +1322,9 @@
 			vfe32_ctrl->share_ctrl->vfebase, outch->ch0,
 			outch->pong.ch_paddr[0]);
 
-		if (vfe32_ctrl->share_ctrl->operation_mode !=
-			VFE_OUTPUTS_RAW) {
+		if ((vfe32_ctrl->share_ctrl->operation_mode !=
+			VFE_OUTPUTS_RAW) &&
+			(path != VFE_MSG_OUTPUT_TERTIARY1)) {
 			vfe32_put_ch_ping_addr(
 				vfe32_ctrl->share_ctrl->vfebase, outch->ch1,
 				outch->ping.ch_paddr[1]);
@@ -1377,6 +1414,11 @@
 			rc = vfe32_configure_pingpong_buffers(
 				VFE_MSG_V32_START, VFE_MSG_OUTPUT_PRIMARY,
 				vfe32_ctrl);
+		else if (vfe32_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_RDI0)
+			rc = vfe32_configure_pingpong_buffers(
+				VFE_MSG_V32_START, VFE_MSG_OUTPUT_TERTIARY1,
+				vfe32_ctrl);
 		else
 			/* Configure secondary channel */
 			rc = vfe32_configure_pingpong_buffers(
@@ -2844,11 +2886,14 @@
 		CDBG("stop video triggered .\n");
 	}
 
+	spin_lock_irqsave(&vfe32_ctrl->start_ack_lock, flags);
 	if (vfe32_ctrl->start_ack_pending == TRUE) {
+		vfe32_ctrl->start_ack_pending = FALSE;
+		spin_unlock_irqrestore(&vfe32_ctrl->start_ack_lock, flags);
 		vfe32_send_isp_msg(&vfe32_ctrl->subdev,
 			vfe32_ctrl->share_ctrl->vfeFrameId, MSG_ID_START_ACK);
-		vfe32_ctrl->start_ack_pending = FALSE;
 	} else {
+		spin_unlock_irqrestore(&vfe32_ctrl->start_ack_lock, flags);
 		if (vfe32_ctrl->recording_state ==
 				VFE_STATE_STOP_REQUESTED) {
 			vfe32_ctrl->recording_state = VFE_STATE_STOPPED;
@@ -2977,6 +3022,23 @@
 	} /* if snapshot mode. */
 }
 
+static void vfe32_process_rdi0_reg_update_irq(
+	struct vfe32_ctrl_type *vfe32_ctrl)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&vfe32_ctrl->start_ack_lock, flags);
+	if (vfe32_ctrl->start_ack_pending == TRUE) {
+		vfe32_ctrl->start_ack_pending = FALSE;
+		spin_unlock_irqrestore(
+				&vfe32_ctrl->start_ack_lock, flags);
+		vfe32_send_isp_msg(&vfe32_ctrl->subdev,
+			vfe32_ctrl->share_ctrl->vfeFrameId, MSG_ID_START_ACK);
+	} else {
+		spin_unlock_irqrestore(
+				&vfe32_ctrl->start_ack_lock, flags);
+	}
+}
+
 static void vfe32_set_default_reg_values(
 			struct vfe32_ctrl_type *vfe32_ctrl)
 {
@@ -3381,6 +3443,48 @@
 	}
 }
 
+static void vfe32_process_output_path_irq_rdi0(
+			struct axi_ctrl_t *axi_ctrl)
+{
+	uint32_t ping_pong;
+	uint32_t ch0_paddr = 0;
+	/* this must be rdi image output. */
+	struct msm_free_buf *free_buf = NULL;
+	/*RDI0*/
+	if (axi_ctrl->share_ctrl->operation_mode == VFE_OUTPUTS_RDI0) {
+		free_buf = vfe32_check_free_buffer(VFE_MSG_OUTPUT_IRQ,
+			VFE_MSG_OUTPUT_TERTIARY1, axi_ctrl);
+		if (free_buf) {
+			ping_pong = msm_camera_io_r(axi_ctrl->
+				share_ctrl->vfebase +
+				VFE_BUS_PING_PONG_STATUS);
+
+			/* Y only channel */
+			ch0_paddr = vfe32_get_ch_addr(ping_pong,
+				axi_ctrl->share_ctrl->vfebase,
+				axi_ctrl->share_ctrl->outpath.out2.ch0);
+
+			pr_debug("%s ch0 = 0x%x\n",
+				__func__, ch0_paddr);
+
+			/* Y channel */
+			vfe32_put_ch_addr(ping_pong,
+				axi_ctrl->share_ctrl->vfebase,
+				axi_ctrl->share_ctrl->outpath.out2.ch0,
+				free_buf->ch_paddr[0]);
+
+			vfe_send_outmsg(axi_ctrl,
+				MSG_ID_OUTPUT_TERTIARY1, ch0_paddr,
+				0, 0,
+				axi_ctrl->share_ctrl->outpath.out2.image_mode);
+
+		} else {
+			axi_ctrl->share_ctrl->outpath.out2.frame_drop_cnt++;
+			pr_err("path_irq_2 irq - no free buffer for rdi0!\n");
+		}
+	}
+}
+
 static uint32_t  vfe32_process_stats_irq_common(
 	struct vfe32_ctrl_type *vfe32_ctrl,
 	uint32_t statsNum, uint32_t newAddr)
@@ -3756,6 +3860,10 @@
 		CDBG("irq	regUpdateIrq\n");
 		vfe32_process_reg_update_irq(vfe32_ctrl);
 		break;
+	case VFE_IRQ_STATUS1_RDI0_REG_UPDATE:
+		CDBG("irq	rdi0 regUpdateIrq\n");
+		vfe32_process_rdi0_reg_update_irq(vfe32_ctrl);
+		break;
 	case VFE_IMASK_WHILE_STOPPING_1:
 		CDBG("irq	resetAckIrq\n");
 		vfe32_process_reset_irq(vfe32_ctrl);
@@ -3845,6 +3953,12 @@
 				(void *)VFE_IRQ_STATUS0_REG_UPDATE_MASK);
 
 		if (qcmd->vfeInterruptStatus1 &
+				VFE_IRQ_STATUS1_RDI0_REG_UPDATE_MASK)
+			v4l2_subdev_notify(&axi_ctrl->subdev,
+				NOTIFY_VFE_IRQ,
+				(void *)VFE_IRQ_STATUS1_RDI0_REG_UPDATE);
+
+		if (qcmd->vfeInterruptStatus1 &
 				VFE_IMASK_WHILE_STOPPING_1)
 			v4l2_subdev_notify(&axi_ctrl->subdev,
 				NOTIFY_VFE_IRQ,
@@ -4284,6 +4398,7 @@
 	spin_lock_init(&vfe32_ctrl->state_lock);
 	spin_lock_init(&vfe32_ctrl->io_lock);
 	spin_lock_init(&vfe32_ctrl->update_ack_lock);
+	spin_lock_init(&vfe32_ctrl->start_ack_lock);
 
 	spin_lock_init(&vfe32_ctrl->aec_ack_lock);
 	spin_lock_init(&vfe32_ctrl->awb_ack_lock);
@@ -4366,6 +4481,11 @@
 				share_ctrl->outpath.out0.ch2]);
 		}
 		break;
+	case VFE_OUTPUTS_RDI0:
+		msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+			vfe32_AXI_WM_CFG[axi_ctrl->
+			share_ctrl->outpath.out2.ch0]);
+		break;
 	default:
 		if (axi_ctrl->share_ctrl->outpath.output_mode &
 			VFE32_OUTPUT_MODE_SECONDARY) {
@@ -4467,8 +4587,8 @@
 		}
 		vfe32_config_axi(axi_ctrl, OUTPUT_PRIM, axio);
 		kfree(axio);
-	}
 		break;
+		}
 	case CMD_AXI_CFG_PRIM_ALL_CHNLS: {
 		uint32_t *axio = NULL;
 		axio = kmalloc(vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length,
@@ -4486,8 +4606,8 @@
 		}
 		vfe32_config_axi(axi_ctrl, OUTPUT_PRIM_ALL_CHNLS, axio);
 		kfree(axio);
-	}
 		break;
+		}
 	case CMD_AXI_CFG_PRIM|CMD_AXI_CFG_SEC: {
 		uint32_t *axio = NULL;
 		axio = kmalloc(vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length,
@@ -4505,8 +4625,8 @@
 		}
 		vfe32_config_axi(axi_ctrl, OUTPUT_PRIM|OUTPUT_SEC, axio);
 		kfree(axio);
-	}
 		break;
+		}
 	case CMD_AXI_CFG_PRIM|CMD_AXI_CFG_SEC_ALL_CHNLS: {
 		uint32_t *axio = NULL;
 		axio = kmalloc(vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length,
@@ -4525,8 +4645,8 @@
 		vfe32_config_axi(axi_ctrl,
 			OUTPUT_PRIM|OUTPUT_SEC_ALL_CHNLS, axio);
 		kfree(axio);
-	}
 		break;
+		}
 	case CMD_AXI_CFG_PRIM_ALL_CHNLS|CMD_AXI_CFG_SEC: {
 		uint32_t *axio = NULL;
 		axio = kmalloc(vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length,
@@ -4545,8 +4665,28 @@
 		vfe32_config_axi(axi_ctrl,
 			OUTPUT_PRIM_ALL_CHNLS|OUTPUT_SEC, axio);
 		kfree(axio);
-	}
 		break;
+		}
+	case CMD_AXI_CFG_TERT1: {
+		uint32_t *axio = NULL;
+		axio = kmalloc(vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe32_config_axi(axi_ctrl,
+			OUTPUT_TERT1, axio);
+		kfree(axio);
+		break;
+		}
 	case CMD_AXI_CFG_PRIM_ALL_CHNLS|CMD_AXI_CFG_SEC_ALL_CHNLS:
 		pr_err("%s Invalid/Unsupported AXI configuration %x",
 			__func__, cfgcmd.cmd_type);
@@ -4585,6 +4725,13 @@
 		CDBG("Image composite done 1 irq occured.\n");
 		vfe32_process_output_path_irq_1(axi_ctrl);
 	}
+
+	if (axi_ctrl->share_ctrl->outpath.output_mode &
+		VFE32_OUTPUT_MODE_TERTIARY1)
+		if (irqstatus & (0x1 << (axi_ctrl->share_ctrl->outpath.out2.ch0
+			+ VFE_WM_OFFSET)))
+			vfe32_process_output_path_irq_rdi0(axi_ctrl);
+
 	/* in snapshot mode if done then send
 	snapshot done message */
 	if (axi_ctrl->share_ctrl->operation_mode ==
diff --git a/drivers/media/video/msm/msm_vfe32.h b/drivers/media/video/msm/msm_vfe32.h
index d5da432..1746f3f 100644
--- a/drivers/media/video/msm/msm_vfe32.h
+++ b/drivers/media/video/msm/msm_vfe32.h
@@ -87,6 +87,9 @@
  * the luma samples.  JPEG 4:2:2 */
 #define VFE_CHROMA_UPSAMPLE_INTERPOLATED 0
 
+/* wm bit offset for IRQ MASK and IRQ STATUS register */
+#define VFE_WM_OFFSET 6
+
 /* constants for irq registers */
 #define VFE_DISABLE_ALL_IRQS 0
 /* bit =1 is to clear the corresponding bit in VFE_IRQ_STATUS.  */
@@ -115,6 +118,17 @@
 #define VFE_IRQ_STATUS0_ASYNC_TIMER2  0x40000000  /* bit 30 */
 #define VFE_IRQ_STATUS0_ASYNC_TIMER3  0x80000000  /* bit 32 */
 
+#define VFE_IRQ_STATUS1_RDI0_REG_UPDATE_MASK  0x4000000 /*bit 26*/
+#define VFE_IRQ_STATUS1_RDI1_REG_UPDATE_MASK  0x8000000 /*bit 27*/
+
+/*TODOs the irq status passed from axi to vfe irq handler does not account
+* for 2 irq status registers. So below macro is added to differentiate between
+* same bit set on both irq status registers. This wil be fixed later by passing
+*entire payload to vfe irq handler and parsing there instead of passing just the
+*status bit*/
+#define VFE_IRQ_STATUS1_RDI0_REG_UPDATE  0x84000000 /*bit 26*/
+#define VFE_IRQ_STATUS1_RDI1_REG_UPDATE  0x88000000 /*bit 27*/
+
 /* imask for while waiting for stop ack,  driver has already
  * requested stop, waiting for reset irq, and async timer irq.
  * For irq_status_0, bit 28-32 are for async timer. For
@@ -788,7 +802,7 @@
 
 	struct vfe32_output_ch out0; /* preview and thumbnail */
 	struct vfe32_output_ch out1; /* snapshot */
-	struct vfe32_output_ch out2; /* video    */
+	struct vfe32_output_ch out2; /* rdi0    */
 };
 
 struct vfe32_frame_extra {
@@ -893,6 +907,7 @@
 #define VFE32_OUTPUT_MODE_PRIMARY_ALL_CHNLS	BIT(7)
 #define VFE32_OUTPUT_MODE_SECONDARY		BIT(8)
 #define VFE32_OUTPUT_MODE_SECONDARY_ALL_CHNLS	BIT(9)
+#define VFE32_OUTPUT_MODE_TERTIARY1		BIT(10)
 
 struct vfe_stats_control {
 	uint8_t  ackPending;
@@ -946,6 +961,7 @@
 	uint32_t vfeImaskCompositePacked;
 
 	spinlock_t  update_ack_lock;
+	spinlock_t  start_ack_lock;
 	spinlock_t  state_lock;
 	spinlock_t  io_lock;
 
diff --git a/drivers/media/video/msm/sensors/ov7692_v4l2.c b/drivers/media/video/msm/sensors/ov7692_v4l2.c
index c25eba9..6fc1da1 100644
--- a/drivers/media/video/msm/sensors/ov7692_v4l2.c
+++ b/drivers/media/video/msm/sensors/ov7692_v4l2.c
@@ -585,7 +585,8 @@
 static struct msm_camera_i2c_reg_conf ov7692_wb_oem[][4] = {
 	{{-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1},
 		{-1, -1, -1},},/*WHITEBALNACE OFF*/
-	{{0x13, 0xf7}, {0x15, 0x00},},		/*WHITEBALNACE AUTO*/
+	{{0x13, 0xf7}, {0x15, 0x00}, {-1, -1, -1},
+		{-1, -1, -1},}, /*WHITEBALNACE AUTO*/
 	{{0x13, 0xf5}, {0x01, 0x56}, {0x02, 0x50},
 		{0x15, 0x00},},	/*WHITEBALNACE CUSTOM*/
 	{{0x13, 0xf5}, {0x01, 0x66}, {0x02, 0x40},
diff --git a/drivers/media/video/msm/server/msm_cam_server.c b/drivers/media/video/msm/server/msm_cam_server.c
index dfa7fbe..f2bb65f 100644
--- a/drivers/media/video/msm/server/msm_cam_server.c
+++ b/drivers/media/video/msm/server/msm_cam_server.c
@@ -899,6 +899,12 @@
 	/*for single VFE msms (8660, 8960v1), just populate the session
 	with our VFE devices that registered*/
 	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+	if (pmctl == NULL) {
+		pr_err("%s: cannot find mctl\n", __func__);
+		msm_mctl_free(pcam);
+		atomic_dec(&ps->number_pcam_active);
+		return -ENODEV;
+	}
 	pmctl->axi_sdev = ps->axi_device[0];
 	pmctl->isp_sdev = ps->isp_subdev[0];
 	return rc;
@@ -2019,7 +2025,7 @@
 	}
 
 	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
-	if (!pmctl->mctl_open) {
+	if (!pmctl || !pmctl->mctl_open) {
 		D("%s: media contoller is not inited\n",
 			 __func__);
 		rc = -ENODEV;
@@ -2297,7 +2303,10 @@
 	/* assume there is only one active camera possible*/
 	config_cam->p_mctl =
 		msm_cam_server_get_mctl(g_server_dev.pcam_active->mctl_handle);
-
+	if (!config_cam->p_mctl) {
+		pr_err("%s: cannot find mctl\n", __func__);
+		return -ENODEV;
+	}
 	INIT_HLIST_HEAD(&config_cam->p_mctl->stats_info.pmem_stats_list);
 	spin_lock_init(&config_cam->p_mctl->stats_info.pmem_stats_spinlock);
 
diff --git a/drivers/media/video/msm_vidc/msm_venc.c b/drivers/media/video/msm_vidc/msm_venc.c
index ec93628..63f23eb 100644
--- a/drivers/media/video/msm_vidc/msm_venc.c
+++ b/drivers/media/video/msm_vidc/msm_venc.c
@@ -477,7 +477,14 @@
 		rc = vidc_hal_session_set_property((void *)inst->session,
 				HAL_PARAM_FRAME_SIZE, &frame_sz);
 		if (rc) {
-			pr_err("Failed to set hal property for framesize\n");
+			pr_err("Failed to set framesize for Output port\n");
+			break;
+		}
+		frame_sz.buffer_type = HAL_BUFFER_OUTPUT;
+		rc = vidc_hal_session_set_property((void *)inst->session,
+				HAL_PARAM_FRAME_SIZE, &frame_sz);
+		if (rc) {
+			pr_err("Failed to set framesize for Capture port\n");
 			break;
 		}
 		rc = msm_comm_try_get_bufreqs(inst);
@@ -743,6 +750,8 @@
 		venc_profile_level.profile = control.value;
 		profile_level.level = venc_profile_level.level;
 		pdata = &profile_level;
+		pr_debug("\nprofile: %d\n",
+			   profile_level.profile);
 		break;
 	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
 		property_id =
@@ -804,6 +813,8 @@
 		venc_profile_level.level = control.value;
 		profile_level.profile = venc_profile_level.profile;
 		pdata = &profile_level;
+		pr_debug("\nLevel: %d\n",
+			   profile_level.level);
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION:
 		property_id =
@@ -895,7 +906,7 @@
 	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
 		property_id =
 			HAL_PARAM_VENC_H264_DEBLOCK_CONTROL;
-		h264_db_control.slicebeta_offset = control.value;
+		h264_db_control.slice_beta_offset = control.value;
 		pdata = &h264_db_control;
 	default:
 		break;
@@ -1164,7 +1175,7 @@
 	}
 	rc = vb2_dqbuf(q, b, true);
 	if (rc)
-		pr_err("Failed to qbuf, %d\n", rc);
+		pr_err("Failed to dqbuf, %d\n", rc);
 	return rc;
 }
 
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.c b/drivers/media/video/msm_vidc/msm_vidc_common.c
index 9b617aa..ba5fdc4 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.c
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.c
@@ -813,7 +813,7 @@
 		if (rc || state == inst->state)
 			break;
 	default:
-		pr_err("State not recognized\n");
+		pr_err("State not recognized: %d\n", flipped_state);
 		rc = -EINVAL;
 		break;
 	}
@@ -855,6 +855,7 @@
 		frame_data.alloc_len = vb->v4l2_planes[0].length;
 		frame_data.filled_len = vb->v4l2_planes[0].bytesused;
 		frame_data.device_addr = vb->v4l2_planes[0].m.userptr;
+		frame_data.timestamp = vb->v4l2_buf.timestamp.tv_usec;
 		frame_data.clnt_data = (u32)vb;
 		if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 			frame_data.buffer_type = HAL_BUFFER_INPUT;
@@ -871,10 +872,8 @@
 			frame_data.filled_len = 0;
 			frame_data.buffer_type = HAL_BUFFER_OUTPUT;
 			frame_data.extradata_addr = 0;
-			pr_debug("Sending ftb to hal...: Alloc: %d :filled: %d"
-				" extradata_addr: %d\n", frame_data.alloc_len,
-				   frame_data.filled_len,
-				   frame_data.extradata_addr);
+			pr_debug("Sending ftb to hal..: Alloc: %d :filled: %d\n",
+				frame_data.alloc_len, frame_data.filled_len);
 			rc = vidc_hal_session_ftb((void *) inst->session,
 					&frame_data);
 		} else {
diff --git a/drivers/media/video/msm_vidc/vidc_hal.c b/drivers/media/video/msm_vidc/vidc_hal.c
index 13a319d9..583b5a9 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.c
+++ b/drivers/media/video/msm_vidc/vidc_hal.c
@@ -23,7 +23,7 @@
 #define REG_ADDR_OFFSET_BITMASK	0x000FFFFF
 
 /*Workaround for virtio */
-#define HFI_VIRTIO_FW_BIAS		0x34f00000
+#define HFI_VIRTIO_FW_BIAS		0x14f00000
 
 struct hal_device_data hal_ctxt;
 
@@ -40,7 +40,7 @@
 
 	sys_init = (struct hfi_cmd_sys_session_init_packet *)packet;
 	sess = (struct hal_session *) sys_init->session_id;
-	switch (sys_init->packet) {
+	switch (sys_init->packet_type) {
 	case HFI_CMD_SESSION_EMPTY_BUFFER:
 		if (sess->is_decoder) {
 			struct hfi_cmd_session_empty_buffer_compressed_packet
@@ -73,7 +73,7 @@
 			struct hfi_buffer_info *buff;
 			buff = (struct hfi_buffer_info *) pkt->rg_buffer_info;
 			buff->buffer_addr -= HFI_VIRTIO_FW_BIAS;
-			buff->extradata_addr -= HFI_VIRTIO_FW_BIAS;
+			buff->extra_data_addr -= HFI_VIRTIO_FW_BIAS;
 		} else {
 			for (i = 0; i < pkt->num_buffers; i++)
 				pkt->rg_buffer_info[i] -= HFI_VIRTIO_FW_BIAS;
@@ -89,7 +89,7 @@
 			struct hfi_buffer_info *buff;
 			buff = (struct hfi_buffer_info *) pkt->rg_buffer_info;
 			buff->buffer_addr -= HFI_VIRTIO_FW_BIAS;
-			buff->extradata_addr -= HFI_VIRTIO_FW_BIAS;
+			buff->extra_data_addr -= HFI_VIRTIO_FW_BIAS;
 		} else {
 			for (i = 0; i < pkt->num_buffers; i++)
 				pkt->rg_buffer_info[i] -= HFI_VIRTIO_FW_BIAS;
@@ -640,7 +640,8 @@
 		goto err_no_dev;
 	}
 	pkt.size = sizeof(struct hfi_cmd_sys_init_packet);
-	pkt.packet = HFI_CMD_SYS_INIT;
+	pkt.packet_type = HFI_CMD_SYS_INIT;
+	pkt.arch_type = HFI_ARCH_OX_OFFSET;
 	if (vidc_hal_iface_cmdq_write(dev, &pkt)) {
 		rc = -ENOTEMPTY;
 		goto err_write_fail;
@@ -664,8 +665,6 @@
 	}
 	write_register(dev->hal_data->register_base_addr,
 		VIDC_CPU_CS_SCIACMDARG3, 0, 0);
-	disable_irq_nosync(dev->hal_data->irq);
-	vidc_hal_interface_queues_release(dev);
 	HAL_MSG_INFO("\nHAL exited\n");
 	return 0;
 }
@@ -742,8 +741,8 @@
 	switch (resource_hdr->resource_id) {
 	case VIDC_RESOURCE_OCMEM:
 	{
-		struct hfi_resource_ocmem_type *hfioc_mem =
-			(struct hfi_resource_ocmem_type *)
+		struct hfi_resource_ocmem *hfioc_mem =
+			(struct hfi_resource_ocmem *)
 			&pkt->rg_resource_data[0];
 		struct vidc_mem_addr *vidc_oc_mem =
 			(struct vidc_mem_addr *) resource_value;
@@ -751,7 +750,7 @@
 		pkt->resource_type = HFI_RESOURCE_OCMEM;
 		hfioc_mem->size = (u32) vidc_oc_mem->mem_size;
 		hfioc_mem->mem = (u8 *) vidc_oc_mem->align_device_addr;
-		pkt->size += sizeof(struct hfi_resource_ocmem_type);
+		pkt->size += sizeof(struct hfi_resource_ocmem);
 		if (vidc_hal_iface_cmdq_write(dev, pkt))
 			rc = -ENOTEMPTY;
 		break;
@@ -807,7 +806,41 @@
 		rc = -ENOTEMPTY;
 	return rc;
 }
-
+static u32 get_hfi_buffer(int hal_buffer)
+{
+	u32 buffer;
+	switch (hal_buffer) {
+	case HAL_BUFFER_INPUT:
+		buffer = HFI_BUFFER_INPUT;
+		break;
+	case HAL_BUFFER_OUTPUT:
+		buffer = HFI_BUFFER_OUTPUT;
+		break;
+	case HAL_BUFFER_OUTPUT2:
+		buffer = HFI_BUFFER_OUTPUT;
+		break;
+	case HAL_BUFFER_EXTRADATA_INPUT:
+		buffer = HFI_BUFFER_EXTRADATA_INPUT;
+		break;
+	case HAL_BUFFER_EXTRADATA_OUTPUT:
+		buffer = HFI_BUFFER_EXTRADATA_OUTPUT;
+		break;
+	case HAL_BUFFER_EXTRADATA_OUTPUT2:
+		buffer = HFI_BUFFER_EXTRADATA_OUTPUT2;
+		break;
+	case HAL_BUFFER_INTERNAL_SCRATCH:
+		buffer = HFI_BUFFER_INTERNAL_SCRATCH;
+		break;
+	case HAL_BUFFER_INTERNAL_PERSIST:
+		buffer = HFI_BUFFER_INTERNAL_PERSIST;
+		break;
+	default:
+		HAL_MSG_ERROR("Invalid buffer type : 0x%x\n", hal_buffer);
+		buffer = 0;
+		break;
+	}
+	return buffer;
+}
 int vidc_hal_session_set_property(void *sess,
 	enum hal_property ptype, void *pdata)
 {
@@ -832,24 +865,37 @@
 	switch (ptype) {
 	case HAL_CONFIG_FRAME_RATE:
 	{
-		struct hfi_frame_rate *hfi_fps;
+		struct hfi_frame_rate *hfi;
+		u32 buffer;
+		struct hal_frame_rate *prop =
+			(struct hal_frame_rate *) pdata;
 		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_FRAME_RATE;
-		hfi_fps = (struct hfi_frame_rate *) &pkt->rg_property_data[1];
-		memcpy(hfi_fps, (struct hfi_frame_rate *)
-			pdata, sizeof(struct hfi_frame_rate));
+		hfi = (struct hfi_frame_rate *) &pkt->rg_property_data[1];
+		buffer = get_hfi_buffer(prop->buffer_type);
+		if (buffer)
+			hfi->buffer_type = buffer;
+		else
+			return -EINVAL;
+		hfi->frame_rate = prop->frame_rate;
 		pkt->size += sizeof(u32) + sizeof(struct hfi_frame_rate);
 		break;
 	}
 	case HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT:
 	{
-		struct hfi_uncompressed_format_select *hfi_buf_fmt;
+		u32 buffer;
+		struct hfi_uncompressed_format_select *hfi;
+		struct hal_uncompressed_format_select *prop =
+			(struct hal_uncompressed_format_select *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
-		hfi_buf_fmt =
-		(struct hfi_uncompressed_format_select *)
-		&pkt->rg_property_data[1];
-		memcpy(hfi_buf_fmt, (struct hfi_uncompressed_format_select *)
-			pdata, sizeof(struct hfi_uncompressed_format_select));
+		hfi = (struct hfi_uncompressed_format_select *)
+			&pkt->rg_property_data[1];
+		buffer = get_hfi_buffer(prop->buffer_type);
+		if (buffer)
+			hfi->buffer_type = buffer;
+		else
+			return -EINVAL;
+		hfi->format = prop->format;
 		pkt->size += sizeof(u32) + sizeof(struct
 			hfi_uncompressed_format_select);
 		break;
@@ -862,11 +908,18 @@
 		break;
 	case HAL_PARAM_FRAME_SIZE:
 	{
-		struct hfi_frame_size *hfi_rect;
+		u32 buffer;
+		struct hfi_frame_size *hfi;
+		struct hal_frame_size *prop = (struct hal_frame_size *) pdata;
 		pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_FRAME_SIZE;
-		hfi_rect = (struct hfi_frame_size *) &pkt->rg_property_data[1];
-		memcpy(hfi_rect, (struct hfi_frame_size *) pdata,
-			sizeof(struct hfi_frame_size));
+		hfi = (struct hfi_frame_size *) &pkt->rg_property_data[1];
+		buffer = get_hfi_buffer(prop->buffer_type);
+		if (buffer)
+			hfi->buffer_type = buffer;
+		else
+			return -EINVAL;
+		hfi->height = prop->height;
+		hfi->width = prop->width;
 		pkt->size += sizeof(u32) + sizeof(struct hfi_frame_size);
 		break;
 	}
@@ -875,38 +928,85 @@
 		struct hfi_enable *hfi;
 		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_REALTIME;
 		hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_enable *) pdata,
-				sizeof(struct hfi_enable));
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		hfi->enable = ((struct hfi_enable *) pdata)->enable;
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_BUFFER_COUNT_ACTUAL:
 	{
+		u32 buffer;
 		struct hfi_buffer_count_actual *hfi;
+		struct hal_buffer_count_actual *prop =
+			(struct hal_buffer_count_actual *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
 		hfi = (struct hfi_buffer_count_actual *)
 				&pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_buffer_count_actual *) pdata,
-			sizeof(struct hfi_buffer_count_actual));
+		hfi->buffer_count_actual = prop->buffer_count_actual;
+		buffer = get_hfi_buffer(prop->buffer_type);
+		if (buffer)
+			hfi->buffer_type = buffer;
+		else
+			return -EINVAL;
 		pkt->size += sizeof(u32) + sizeof(struct
 					hfi_buffer_count_actual);
 		break;
 	}
 	case HAL_PARAM_NAL_STREAM_FORMAT_SELECT:
 	{
+		struct hal_nal_stream_format_supported *prop =
+			(struct hal_nal_stream_format_supported *)pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT;
-		pkt->rg_property_data[1] = (enum HFI_NAL_STREAM_FORMAT)pdata;
-		pkt->size += sizeof(u32) + sizeof(enum HFI_NAL_STREAM_FORMAT);
+		HAL_MSG_ERROR("\ndata is :%d",
+				prop->nal_stream_format_supported);
+		switch (prop->nal_stream_format_supported) {
+		case HAL_NAL_FORMAT_STARTCODES:
+			pkt->rg_property_data[1] =
+				HFI_NAL_FORMAT_STARTCODES;
+			break;
+		case HAL_NAL_FORMAT_ONE_NAL_PER_BUFFER:
+			pkt->rg_property_data[1] =
+				HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER;
+			break;
+		case HAL_NAL_FORMAT_ONE_BYTE_LENGTH:
+			pkt->rg_property_data[1] =
+				HFI_NAL_FORMAT_ONE_BYTE_LENGTH;
+			break;
+		case HAL_NAL_FORMAT_TWO_BYTE_LENGTH:
+			pkt->rg_property_data[1] =
+				HFI_NAL_FORMAT_TWO_BYTE_LENGTH;
+			break;
+		case HAL_NAL_FORMAT_FOUR_BYTE_LENGTH:
+			pkt->rg_property_data[1] =
+				HFI_NAL_FORMAT_FOUR_BYTE_LENGTH;
+			break;
+		default:
+			HAL_MSG_ERROR("Invalid nal format: 0x%x",
+				  prop->nal_stream_format_supported);
+			break;
+		}
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_VDEC_OUTPUT_ORDER:
 	{
+		int *data = (int *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER;
-		pkt->rg_property_data[1] = (enum HFI_OUTPUT_ORDER)pdata;
-		pkt->size += sizeof(u32) + sizeof(enum HFI_OUTPUT_ORDER);
+		switch (*data) {
+		case HAL_OUTPUT_ORDER_DECODE:
+			pkt->rg_property_data[1] = HFI_OUTPUT_ORDER_DISPLAY;
+			break;
+		case HAL_OUTPUT_ORDER_DISPLAY:
+			pkt->rg_property_data[1] = HFI_OUTPUT_ORDER_DECODE;
+			break;
+		default:
+			HAL_MSG_ERROR("invalid output order: 0x%x",
+						  *data);
+			break;
+		}
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_VDEC_PICTURE_TYPE_DECODE:
@@ -916,7 +1016,7 @@
 			HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE;
 		hfi = (struct hfi_enable_picture *) &pkt->rg_property_data[1];
 		hfi->picture_type = (u32) pdata;
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable_picture);
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO:
@@ -925,9 +1025,8 @@
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO;
 		hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_enable *) pdata,
-				sizeof(struct hfi_enable));
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		hfi->enable = ((struct hfi_enable *) pdata)->enable;
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
@@ -936,41 +1035,64 @@
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER;
 		hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_enable *) pdata,
-				sizeof(struct hfi_enable));
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		hfi->enable = ((struct hfi_enable *) pdata)->enable;
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_VDEC_MULTI_STREAM:
 	{
+		u32 buffer;
 		struct hfi_multi_stream *hfi;
+		struct hal_multi_stream *prop =
+			(struct hal_multi_stream *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
 		hfi = (struct hfi_multi_stream *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_multi_stream *)pdata,
-				sizeof(struct hfi_multi_stream));
+		buffer = get_hfi_buffer(prop->buffer_type);
+		if (buffer)
+			hfi->buffer_type = buffer;
+		else
+			return -EINVAL;
+		hfi->enable = prop->enable;
+		hfi->width = prop->width;
+		hfi->height = prop->height;
 		pkt->size += sizeof(u32) + sizeof(struct hfi_multi_stream);
 		break;
 	}
 	case HAL_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT:
 	{
-		struct hfi_display_picture_buffer_count *hfi_disp_buf;
+		struct hfi_display_picture_buffer_count *hfi;
+		struct hal_display_picture_buffer_count *prop =
+			(struct hal_display_picture_buffer_count *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT;
-		hfi_disp_buf = (struct hfi_display_picture_buffer_count *)
+		hfi = (struct hfi_display_picture_buffer_count *)
 			&pkt->rg_property_data[1];
-		memcpy(hfi_disp_buf,
-			(struct hfi_display_picture_buffer_count *)pdata,
-			sizeof(struct hfi_display_picture_buffer_count));
+		hfi->count = prop->count;
+		hfi->enable = prop->enable;
 		pkt->size += sizeof(u32) +
 			sizeof(struct hfi_display_picture_buffer_count);
 		break;
 	}
 	case HAL_PARAM_DIVX_FORMAT:
 	{
+		int *data = pdata;
 		pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_DIVX_FORMAT;
-		pkt->rg_property_data[1] = (enum HFI_DIVX_FORMAT)pdata;
-		pkt->size += sizeof(u32) + sizeof(enum HFI_DIVX_FORMAT);
+		switch (*data) {
+		case HAL_DIVX_FORMAT_4:
+			pkt->rg_property_data[1] = HFI_DIVX_FORMAT_4;
+			break;
+		case HAL_DIVX_FORMAT_5:
+			pkt->rg_property_data[1] = HFI_DIVX_FORMAT_5;
+			break;
+		case HAL_DIVX_FORMAT_6:
+			pkt->rg_property_data[1] = HFI_DIVX_FORMAT_6;
+			break;
+		default:
+			HAL_MSG_ERROR("Invalid divx format: 0x%x", *data);
+			break;
+		}
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING:
@@ -979,25 +1101,23 @@
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING;
 		hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_enable *) pdata,
-				sizeof(struct hfi_enable));
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		hfi->enable = ((struct hfi_enable *) pdata)->enable;
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER:
 	{
-		struct hfi_enable *enable;
+		struct hfi_enable *hfi;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER;
-		enable = (struct hfi_enable *) &pkt->rg_property_data[1];
-		memcpy(enable, (struct hfi_enable *) pdata,
-				sizeof(struct hfi_enable));
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+		hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
+		hfi->enable = ((struct hfi_enable *) pdata)->enable;
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_CONFIG_VENC_REQUEST_IFRAME:
 		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_REQUEST_IFRAME;
+			HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME;
 		break;
 	case HAL_PARAM_VENC_MPEG4_SHORT_HEADER:
 		break;
@@ -1009,31 +1129,64 @@
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE;
 		hfi = (struct hfi_bitrate *) &pkt->rg_property_data[1];
-		hfi->bit_rate = ((struct hfi_bitrate *)pdata)->bit_rate;
-		pkt->size += sizeof(u32) + sizeof(struct hfi_bitrate);
+		hfi->bit_rate = ((struct hal_bitrate *)pdata)->bit_rate;
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_PROFILE_LEVEL_CURRENT:
 	{
-		struct hfi_profile_level *hfi_profile_level;
+		struct hfi_profile_level *hfi;
+		struct hal_profile_level *prop =
+			(struct hal_profile_level *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
-		hfi_profile_level = (struct hfi_profile_level *)
-		&pkt->rg_property_data[1];
-		memcpy(hfi_profile_level, (struct hfi_profile_level *) pdata,
-			sizeof(struct hfi_profile_level));
+		hfi = (struct hfi_profile_level *)
+			&pkt->rg_property_data[1];
+		hfi->level = (u32) prop->level;
+		hfi->profile = prop->profile;
+		if (!hfi->profile)
+			hfi->profile = HFI_H264_PROFILE_HIGH;
+		if (!hfi->level)
+			hfi->level = 1;
 		pkt->size += sizeof(u32) + sizeof(struct hfi_profile_level);
 		break;
 	}
 	case HAL_PARAM_VENC_H264_ENTROPY_CONTROL:
 	{
 		struct hfi_h264_entropy_control *hfi;
+		struct hal_h264_entropy_control *prop =
+			(struct hal_h264_entropy_control *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL;
 		hfi = (struct hfi_h264_entropy_control *)
 			&pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_h264_entropy_control *) pdata,
-				sizeof(struct hfi_h264_entropy_control));
+		switch (prop->entropy_mode) {
+		case HAL_H264_ENTROPY_CAVLC:
+			hfi->cabac_model = HFI_H264_ENTROPY_CAVLC;
+			break;
+		case HAL_H264_ENTROPY_CABAC:
+			hfi->cabac_model = HFI_H264_ENTROPY_CABAC;
+			switch (prop->cabac_model) {
+			case HAL_H264_CABAC_MODEL_0:
+				hfi->cabac_model = HFI_H264_CABAC_MODEL_0;
+				break;
+			case HAL_H264_CABAC_MODEL_1:
+				hfi->cabac_model = HFI_H264_CABAC_MODEL_1;
+				break;
+			case HAL_H264_CABAC_MODEL_2:
+				hfi->cabac_model = HFI_H264_CABAC_MODEL_2;
+				break;
+			default:
+				HAL_MSG_ERROR("Invalid cabac model 0x%x",
+							  prop->entropy_mode);
+				break;
+			}
+		break;
+		default:
+			HAL_MSG_ERROR("Invalid entropy selected: 0x%x",
+				prop->cabac_model);
+			break;
+		}
 		pkt->size += sizeof(u32) + sizeof(
 			struct hfi_h264_entropy_control);
 		break;
@@ -1042,8 +1195,28 @@
 	{
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VENC_RATE_CONTROL;
-		pkt->rg_property_data[1] = (enum HFI_RATE_CONTROL)pdata;
-		pkt->size += sizeof(u32) + sizeof(enum HFI_RATE_CONTROL);
+		switch ((enum hal_rate_control)pdata) {
+		case HAL_RATE_CONTROL_OFF:
+		pkt->rg_property_data[1] = HFI_RATE_CONTROL_OFF;
+			break;
+		case HAL_RATE_CONTROL_CBR_CFR:
+		pkt->rg_property_data[1] = HFI_RATE_CONTROL_CBR_CFR;
+			break;
+		case HAL_RATE_CONTROL_CBR_VFR:
+		pkt->rg_property_data[1] = HFI_RATE_CONTROL_CBR_VFR;
+			break;
+		case HAL_RATE_CONTROL_VBR_CFR:
+		pkt->rg_property_data[1] = HFI_RATE_CONTROL_VBR_CFR;
+			break;
+		case HAL_RATE_CONTROL_VBR_VFR:
+		pkt->rg_property_data[1] = HFI_RATE_CONTROL_VBR_VFR;
+			break;
+		default:
+			HAL_MSG_ERROR("Invalid Rate control setting: 0x%x",
+						  (int) pdata);
+			break;
+		}
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_VENC_MPEG4_TIME_RESOLUTION:
@@ -1056,8 +1229,7 @@
 		hfi->time_increment_resolution =
 			((struct hal_mpeg4_time_resolution *)pdata)->
 					time_increment_resolution;
-		pkt->size += sizeof(u32) + sizeof(
-			struct hfi_mpeg4_time_resolution);
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_VENC_MPEG4_HEADER_EXTENSION:
@@ -1066,20 +1238,36 @@
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION;
 		hfi = (struct hfi_mpeg4_header_extension *)
-		&pkt->rg_property_data[1];
+			&pkt->rg_property_data[1];
 		hfi->header_extension = (u32) pdata;
-		pkt->size += sizeof(u32) +
-			sizeof(struct hfi_mpeg4_header_extension);
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_PARAM_VENC_H264_DEBLOCK_CONTROL:
 	{
 		struct hfi_h264_db_control *hfi;
+		struct hal_h264_db_control *prop =
+			(struct hal_h264_db_control *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL;
 		hfi = (struct hfi_h264_db_control *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_h264_db_control *) pdata,
-				sizeof(struct hfi_h264_db_control));
+		switch (prop->mode) {
+		case HAL_H264_DB_MODE_DISABLE:
+			hfi->mode = HFI_H264_DB_MODE_DISABLE;
+			break;
+		case HAL_H264_DB_MODE_SKIP_SLICE_BOUNDARY:
+			hfi->mode = HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY;
+			break;
+		case HAL_H264_DB_MODE_ALL_BOUNDARY:
+			hfi->mode = HFI_H264_DB_MODE_ALL_BOUNDARY;
+			break;
+		default:
+			HAL_MSG_ERROR("Invalid deblocking mode: 0x%x",
+						  prop->mode);
+			break;
+		}
+		hfi->slice_alpha_offset = prop->slice_alpha_offset;
+		hfi->slice_beta_offset = prop->slice_beta_offset;
 		pkt->size += sizeof(u32) +
 			sizeof(struct hfi_h264_db_control);
 		break;
@@ -1090,11 +1278,10 @@
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF;
 		hfi = (struct hfi_temporal_spatial_tradeoff *)
-		&pkt->rg_property_data[1];
+			&pkt->rg_property_data[1];
 		hfi->ts_factor = ((struct hfi_temporal_spatial_tradeoff *)
 					pdata)->ts_factor;
-		pkt->size += sizeof(u32) +
-			sizeof(struct hfi_temporal_spatial_tradeoff);
+		pkt->size += sizeof(u32)  * 2;
 		break;
 	}
 	case HAL_PARAM_VENC_SESSION_QP:
@@ -1125,7 +1312,7 @@
 		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD;
 		hfi = (struct hfi_idr_period *) &pkt->rg_property_data[1];
 		hfi->idr_period = ((struct hfi_idr_period *) pdata)->idr_period;
-		pkt->size += sizeof(u32) + sizeof(struct hfi_idr_period);
+		pkt->size += sizeof(u32) * 2;
 		break;
 	}
 	case HAL_CONFIG_VPE_OPERATIONS:
@@ -1133,25 +1320,67 @@
 	case HAL_PARAM_VENC_INTRA_REFRESH:
 	{
 		struct hfi_intra_refresh *hfi;
+		struct hal_intra_refresh *prop =
+			(struct hal_intra_refresh *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH;
 		hfi = (struct hfi_intra_refresh *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_intra_refresh *) pdata,
-				sizeof(struct hfi_intra_refresh));
+		switch (prop->mode) {
+		case HAL_INTRA_REFRESH_NONE:
+			hfi->mode = HFI_INTRA_REFRESH_NONE;
+			break;
+		case HAL_INTRA_REFRESH_ADAPTIVE:
+			hfi->mode = HFI_INTRA_REFRESH_ADAPTIVE;
+			break;
+		case HAL_INTRA_REFRESH_CYCLIC:
+			hfi->mode = HFI_INTRA_REFRESH_CYCLIC;
+			break;
+		case HAL_INTRA_REFRESH_CYCLIC_ADAPTIVE:
+			hfi->mode = HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE;
+			break;
+		case HAL_INTRA_REFRESH_RANDOM:
+			hfi->mode = HFI_INTRA_REFRESH_RANDOM;
+			break;
+		default:
+			HAL_MSG_ERROR("Invalid intra refresh setting: 0x%x",
+				prop->mode);
+			break;
+		}
+		hfi->air_mbs = prop->air_mbs;
+		hfi->air_ref = prop->air_ref;
+		hfi->cir_mbs = prop->cir_mbs;
 		pkt->size += sizeof(u32) + sizeof(struct hfi_intra_refresh);
 		break;
 	}
 	case HAL_PARAM_VENC_MULTI_SLICE_CONTROL:
 	{
 		struct hfi_multi_slice_control *hfi;
+		struct hal_multi_slice_control *prop =
+			(struct hal_multi_slice_control *) pdata;
 		pkt->rg_property_data[0] =
 			HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL;
 		hfi = (struct hfi_multi_slice_control *)
-				&pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_multi_slice_control *) pdata,
-				sizeof(struct hfi_multi_slice_control));
+			&pkt->rg_property_data[1];
+		switch (prop->multi_slice) {
+		case HAL_MULTI_SLICE_OFF:
+			hfi->multi_slice = HFI_MULTI_SLICE_OFF;
+			break;
+		case HAL_MULTI_SLICE_GOB:
+			hfi->multi_slice = HFI_MULTI_SLICE_GOB;
+			break;
+		case HAL_MULTI_SLICE_BY_MB_COUNT:
+			hfi->multi_slice = HFI_MULTI_SLICE_BY_MB_COUNT;
+			break;
+		case HAL_MULTI_SLICE_BY_BYTE_COUNT:
+			hfi->multi_slice = HFI_MULTI_SLICE_BY_BYTE_COUNT;
+			break;
+		default:
+			HAL_MSG_ERROR("Invalid slice settings: 0x%x",
+				prop->multi_slice);
+			break;
+		}
 		pkt->size += sizeof(u32) + sizeof(struct
-						hfi_multi_slice_control);
+					hfi_multi_slice_control);
 		break;
 	}
 	case HAL_CONFIG_VPE_DEINTERLACE:
@@ -1161,8 +1390,8 @@
 		struct hfi_debug_config *hfi;
 		pkt->rg_property_data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG;
 		hfi = (struct hfi_debug_config *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_debug_config *) pdata,
-				sizeof(struct hfi_debug_config));
+		hfi->debug_config = ((struct hal_debug_config *)
+					pdata)->debug_config;
 		pkt->size = sizeof(struct hfi_cmd_sys_set_property_packet) +
 			sizeof(struct hfi_debug_config);
 		break;
@@ -1353,7 +1582,7 @@
 	new_session->device = dev;
 	list_add_tail(&new_session->list, &dev->sess_head);
 	pkt.size = sizeof(struct hfi_cmd_sys_session_init_packet);
-	pkt.packet = HFI_CMD_SYS_SESSION_INIT;
+	pkt.packet_type = HFI_CMD_SYS_SESSION_INIT;
 	pkt.session_id = (u32) new_session;
 	pkt.session_domain = session_type;
 	pkt.session_codec = codec_type;
@@ -1363,7 +1592,7 @@
 }
 
 static int vidc_hal_send_session_cmd(void *session_id,
-	 enum HFI_COMMAND pkt_type)
+	 int pkt_type)
 {
 	struct vidc_hal_session_cmd_pkt pkt;
 	int rc = 0;
@@ -1400,6 +1629,7 @@
 int vidc_hal_session_set_buffers(void *sess,
 	struct vidc_buffer_addr_info *buffer_info)
 {
+	u32 buffer;
 	struct hfi_cmd_session_set_buffers_packet *pkt;
 	u8 packet[VIDC_IFACEQ_VAR_LARGE_PKT_SIZE];
 	int rc = 0;
@@ -1430,7 +1660,7 @@
 	if ((buffer_info->buffer_type == HAL_BUFFER_OUTPUT) ||
 		(buffer_info->buffer_type == HAL_BUFFER_OUTPUT2)) {
 		struct hfi_buffer_info *buff;
-		pkt->extradata_size = buffer_info->extradata_size;
+		pkt->extra_data_size = buffer_info->extradata_size;
 		pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) -
 			sizeof(u32) + ((buffer_info->num_buffers) *
 			sizeof(struct hfi_buffer_info));
@@ -1438,25 +1668,23 @@
 		for (i = 0; i < pkt->num_buffers; i++) {
 			buff->buffer_addr =
 				buffer_info->align_device_addr;
-			buff->extradata_addr =
+			buff->extra_data_addr =
 				buffer_info->extradata_addr;
 		}
 	} else {
-		pkt->extradata_size = 0;
+		pkt->extra_data_size = 0;
 		pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) +
 			((buffer_info->num_buffers - 1) * sizeof(u32));
 		for (i = 0; i < pkt->num_buffers; i++)
 			pkt->rg_buffer_info[i] =
 			buffer_info->align_device_addr;
 	}
-
-	if (buffer_info->buffer_type == HAL_BUFFER_INTERNAL_SCRATCH)
-		pkt->buffer_type = HFI_BUFFER_INTERNAL_SCRATCH;
-	else if (buffer_info->buffer_type == HAL_BUFFER_INTERNAL_PERSIST)
-		pkt->buffer_type = HFI_BUFFER_INTERNAL_PERSIST;
+	buffer = get_hfi_buffer(buffer_info->buffer_type);
+	if (buffer)
+		pkt->buffer_type = buffer;
 	else
-		pkt->buffer_type = (enum HFI_BUFFER) buffer_info->buffer_type;
-
+		return -EINVAL;
+	HAL_MSG_INFO("set buffers: 0x%x", buffer_info->buffer_type);
 	if (vidc_hal_iface_cmdq_write(session->device, pkt))
 		rc = -ENOTEMPTY;
 	return rc;
@@ -1465,6 +1693,7 @@
 int vidc_hal_session_release_buffers(void *sess,
 	struct vidc_buffer_addr_info *buffer_info)
 {
+	u32 buffer;
 	struct hfi_cmd_session_release_buffer_packet *pkt;
 	u8 packet[VIDC_IFACEQ_VAR_LARGE_PKT_SIZE];
 	int rc = 0;
@@ -1486,7 +1715,6 @@
 		((buffer_info->num_buffers - 1) * sizeof(u32));
 	pkt->packet_type = HFI_CMD_SESSION_RELEASE_BUFFERS;
 	pkt->session_id = (u32) session;
-	pkt->buffer_type = (enum HFI_BUFFER) buffer_info->buffer_type;
 	pkt->buffer_size = buffer_info->buffer_size;
 	pkt->num_buffers = buffer_info->num_buffers;
 
@@ -1497,10 +1725,10 @@
 		for (i = 0; i < pkt->num_buffers; i++) {
 			buff->buffer_addr =
 				buffer_info->align_device_addr;
-			buff->extradata_addr =
+			buff->extra_data_addr =
 				buffer_info->extradata_addr;
 		}
-		pkt->extradata_size = buffer_info->extradata_size;
+		pkt->extra_data_size = buffer_info->extradata_size;
 		pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) -
 			sizeof(u32) + ((buffer_info->num_buffers) *
 			sizeof(struct hfi_buffer_info));
@@ -1508,11 +1736,16 @@
 		for (i = 0; i < pkt->num_buffers; i++)
 			pkt->rg_buffer_info[i] =
 			buffer_info->align_device_addr;
-		pkt->extradata_size = 0;
+		pkt->extra_data_size = 0;
 		pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) +
 			((buffer_info->num_buffers - 1) * sizeof(u32));
 	}
-
+	buffer = get_hfi_buffer(buffer_info->buffer_type);
+	if (buffer)
+		pkt->buffer_type = buffer;
+	else
+		return -EINVAL;
+	HAL_MSG_INFO("Release buffers: 0x%x", buffer_info->buffer_type);
 	if (vidc_hal_iface_cmdq_write(session->device, pkt))
 		rc = -ENOTEMPTY;
 	return rc;
@@ -1572,8 +1805,8 @@
 			struct hfi_cmd_session_empty_buffer_compressed_packet);
 		pkt.packet_type = HFI_CMD_SESSION_EMPTY_BUFFER;
 		pkt.session_id = (u32) session;
-		pkt.timestamp_hi = (int) (((u64)input_frame->timestamp) >> 32);
-		pkt.timestamp_lo = (int) input_frame->timestamp;
+		pkt.time_stamp_hi = (int) (((u64)input_frame->timestamp) >> 32);
+		pkt.time_stamp_lo = (int) input_frame->timestamp;
 		pkt.flags = input_frame->flags;
 		pkt.mark_target = input_frame->mark_target;
 		pkt.mark_data = input_frame->mark_data;
@@ -1590,11 +1823,11 @@
 			pkt;
 		pkt.size = sizeof(struct
 		hfi_cmd_session_empty_buffer_uncompressed_plane0_packet);
-		pkt.packet = HFI_CMD_SESSION_EMPTY_BUFFER;
+		pkt.packet_type = HFI_CMD_SESSION_EMPTY_BUFFER;
 		pkt.session_id = (u32) session;
 		pkt.view_id = 0;
-		pkt.timestamp_hi = (u32) (((u64)input_frame->timestamp) >> 32);
-		pkt.timestamp_lo = (u32) input_frame->timestamp;
+		pkt.time_stamp_hi = (u32) (((u64)input_frame->timestamp) >> 32);
+		pkt.time_stamp_lo = (u32) input_frame->timestamp;
 		pkt.flags = input_frame->flags;
 		pkt.mark_target = input_frame->mark_target;
 		pkt.mark_data = input_frame->mark_data;
@@ -1734,8 +1967,23 @@
 	pkt.size = sizeof(struct hfi_cmd_session_flush_packet);
 	pkt.packet_type = HFI_CMD_SESSION_FLUSH;
 	pkt.session_id = (u32) session;
-	pkt.flush_type = flush_mode;
-
+	switch (flush_mode) {
+	case HAL_FLUSH_INPUT:
+		pkt.flush_type = HFI_FLUSH_INPUT;
+		break;
+	case HAL_FLUSH_OUTPUT:
+		pkt.flush_type = HFI_FLUSH_OUTPUT;
+		break;
+	case HAL_FLUSH_OUTPUT2:
+		pkt.flush_type = HFI_FLUSH_OUTPUT2;
+		break;
+	case HAL_FLUSH_ALL:
+		pkt.flush_type = HFI_FLUSH_ALL;
+		break;
+	default:
+		HAL_MSG_ERROR("Invalid flush mode: 0x%x\n", flush_mode);
+		break;
+	}
 	if (vidc_hal_iface_cmdq_write(session->device, &pkt))
 		rc = -ENOTEMPTY;
 	return rc;
diff --git a/drivers/media/video/msm_vidc/vidc_hal.h b/drivers/media/video/msm_vidc/vidc_hal.h
index 15441f4..a36d7f3 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.h
+++ b/drivers/media/video/msm_vidc/vidc_hal.h
@@ -11,27 +11,28 @@
  *
  */
 
-#ifndef __VIDC_HAL_H__
-#define __VIDC_HAL_H__
+#ifndef __H_VIDC_HAL_H__
+#define __H_VIDC_HAL_H__
 
 #include <linux/spinlock.h>
 #include <linux/mutex.h>
 #include "vidc_hal_api.h"
 #include "msm_smem.h"
+#include "vidc_hal_helper.h"
 
 #ifdef HAL_MSG_LOG
-#define HAL_MSG_LOW(x...) pr_debug(KERN_INFO x)
-#define HAL_MSG_MEDIUM(x...) pr_debug(KERN_INFO x)
-#define HAL_MSG_HIGH(x...) pr_debug(KERN_INFO x)
+#define HAL_MSG_LOW(x...)		pr_info(KERN_INFO x)
+#define HAL_MSG_MEDIUM(x...)	pr_info(KERN_INFO x)
+#define HAL_MSG_HIGH(x...)		pr_info(KERN_INFO x)
 #else
 #define HAL_MSG_LOW(x...)
 #define HAL_MSG_MEDIUM(x...)
 #define HAL_MSG_HIGH(x...)
 #endif
 
-#define HAL_MSG_ERROR(x...) pr_err(KERN_INFO x)
-#define HAL_MSG_FATAL(x...) pr_err(KERN_INFO x)
-#define HAL_MSG_INFO(x...) pr_info(KERN_INFO x)
+#define HAL_MSG_ERROR(x...)		pr_err(KERN_INFO x)
+#define HAL_MSG_FATAL(x...)		pr_err(KERN_INFO x)
+#define HAL_MSG_INFO(x...)		pr_info(KERN_INFO x)
 
 #define HFI_MASK_QHDR_TX_TYPE			0xFF000000
 #define HFI_MASK_QHDR_RX_TYPE			0x00FF0000
@@ -87,7 +88,7 @@
 #define VIDC_IFACEQ_TABLE_SIZE (sizeof(struct hfi_queue_table_header) \
 	+ sizeof(struct hfi_queue_header) * VIDC_IFACEQ_NUMQ)
 
-#define VIDC_IFACEQ_QUEUE_SIZE		(VIDC_IFACEQ_MAX_PKT_SIZE *  \
+#define VIDC_IFACEQ_QUEUE_SIZE	(VIDC_IFACEQ_MAX_PKT_SIZE *  \
 	VIDC_IFACEQ_MAX_BUF_COUNT * VIDC_IFACE_MAX_PARALLEL_CLNTS)
 
 #define VIDC_IFACEQ_GET_QHDR_START_ADDR(ptr, i)     \
@@ -107,409 +108,162 @@
 	VIDC_HWREG_HVI_SOFTINTEN =  0xA,
 };
 
-enum HFI_EVENT {
-	HFI_EVENT_SYS_ERROR,
-	HFI_EVENT_SESSION_ERROR,
-	HFI_EVENT_SESSION_SEQUENCE_CHANGED,
-	HFI_EVENT_SESSION_PROPERTY_CHANGED,
-	HFI_UNUSED_EVENT = 0x10000000,
-};
+#define HFI_EVENT_SESSION_SEQUENCE_CHANGED (HFI_OX_BASE + 0x3)
+#define HFI_EVENT_SESSION_PROPERTY_CHANGED (HFI_OX_BASE + 0x4)
 
-enum HFI_EVENT_DATA_SEQUENCE_CHANGED {
-	HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUFFER_RESOURCES,
-	HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUFFER_RESOURCES,
-	HFI_UNUSED_SEQCHG = 0x10000000,
-};
+#define HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUFFER_RESOURCES	\
+	(HFI_OX_BASE + 0x1)
+#define HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUFFER_RESOURCES	\
+	(HFI_OX_BASE + 0x2)
 
-#define HFI_BUFFERFLAG_EOS              0x00000001
-#define HFI_BUFFERFLAG_STARTTIME        0x00000002
-#define HFI_BUFFERFLAG_DECODEONLY       0x00000004
-#define HFI_BUFFERFLAG_DATACORRUPT      0x00000008
-#define HFI_BUFFERFLAG_ENDOFFRAME       0x00000010
-#define HFI_BUFFERFLAG_SYNCFRAME        0x00000020
-#define HFI_BUFFERFLAG_EXTRADATA        0x00000040
-#define HFI_BUFFERFLAG_CODECCONFIG      0x00000080
-#define HFI_BUFFERFLAG_TIMESTAMPINVALID 0x00000100
-#define HFI_BUFFERFLAG_READONLY         0x00000200
-#define HFI_BUFFERFLAG_ENDOFSUBFRAME    0x00000400
+#define HFI_BUFFERFLAG_EOS				0x00000001
+#define HFI_BUFFERFLAG_STARTTIME		0x00000002
+#define HFI_BUFFERFLAG_DECODEONLY		0x00000004
+#define HFI_BUFFERFLAG_DATACORRUPT		0x00000008
+#define HFI_BUFFERFLAG_ENDOFFRAME		0x00000010
+#define HFI_BUFFERFLAG_SYNCFRAME		0x00000020
+#define HFI_BUFFERFLAG_EXTRADATA		0x00000040
+#define HFI_BUFFERFLAG_CODECCONFIG		0x00000080
+#define HFI_BUFFERFLAG_TIMESTAMPINVALID	0x00000100
+#define HFI_BUFFERFLAG_READONLY			0x00000200
+#define HFI_BUFFERFLAG_ENDOFSUBFRAME	0x00000400
 
-enum HFI_ERROR {
-	HFI_ERR_NONE                              = 0,
-	HFI_ERR_SYS_UNKNOWN                       = 0x80000001,
-	HFI_ERR_SYS_FATAL                         = 0x80000002,
-	HFI_ERR_SYS_INVALID_PARAMETER             = 0x80000003,
-	HFI_ERR_SYS_VERSION_MISMATCH              = 0x80000004,
-	HFI_ERR_SYS_INSUFFICIENT_RESOURCES        = 0x80000005,
-	HFI_ERR_SYS_MAX_SESSIONS_REACHED          = 0x80000006,
-	HFI_ERR_SYS_UNSUPPORTED_CODEC             = 0x80000007,
-	HFI_ERR_SYS_SESSION_IN_USE                = 0x80000008,
-	HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE       = 0x80000009,
-	HFI_ERR_SYS_UNSUPPORTED_DOMAIN            = 0x8000000A,
-	HFI_ERR_SESSION_START_UNUSED              = 0x80001000,
-	HFI_ERR_SESSION_UNKNOWN                   = 0x80001001,
-	HFI_ERR_SESSION_FATAL                     = 0x80001002,
-	HFI_ERR_SESSION_INVALID_PARAMETER         = 0x80001003,
-	HFI_ERR_SESSION_BAD_POINTER               = 0x80001004,
-	HFI_ERR_SESSION_INVALID_SESSION_ID        = 0x80001005,
-	HFI_ERR_SESSION_INVALID_STREAM_ID         = 0x80001006,
-	HFI_ERR_SESSION_INCORRECT_STATE_OPERATION = 0x80001007,
-	HFI_ERR_SESSION_UNSUPPORTED_PROPERTY      = 0x80001008,
-	HFI_ERR_SESSION_UNSUPPORTED_SETTING       = 0x80001009,
-	HFI_ERR_SESSION_INSUFFICIENT_RESOURCES    = 0x8000100A,
-	HFI_ERR_SESSION_STREAM_CORRUPT            = 0x8000100B,
-	HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED    =  0x8000100C,
-	HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED          =  0x8000100D,
-	HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING =  0x8000100E,
-	HFI_ERR_SESSION_SAME_STATE_OPERATION		= 0x8000100F,
-	HFI_UNUSED_ERR = 0x10000000,
-};
+#define HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING	\
+	(HFI_OX_BASE + 0x1001)
+#define HFI_ERR_SESSION_SAME_STATE_OPERATION		\
+	(HFI_OX_BASE + 0x1002)
+#define HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED		\
+	(HFI_OX_BASE + 0x1003)
 
-enum HFI_DOMAIN {
-	HFI_VIDEO_DOMAIN_VPE,
-	HFI_VIDEO_DOMAIN_ENCODER,
-	HFI_VIDEO_DOMAIN_DECODER,
-	HFI_UNUSED_DOMAIN = 0x10000000,
-};
+#define HFI_BUFFER_INTERNAL_SCRATCH (HFI_OX_BASE + 0x1)
+#define HFI_BUFFER_EXTRADATA_INPUT (HFI_OX_BASE + 0x2)
+#define HFI_BUFFER_EXTRADATA_OUTPUT (HFI_OX_BASE + 0x3)
+#define HFI_BUFFER_EXTRADATA_OUTPUT2 (HFI_OX_BASE + 0x4)
 
-enum HFI_VIDEO_CODEC {
-	HFI_VIDEO_CODEC_UNKNOWN  = 0x00000000,
-	HFI_VIDEO_CODEC_H264     = 0x00000002,
-	HFI_VIDEO_CODEC_H263     = 0x00000004,
-	HFI_VIDEO_CODEC_MPEG1    = 0x00000008,
-	HFI_VIDEO_CODEC_MPEG2    = 0x00000010,
-	HFI_VIDEO_CODEC_MPEG4    = 0x00000020,
-	HFI_VIDEO_CODEC_DIVX_311 = 0x00000040,
-	HFI_VIDEO_CODEC_DIVX     = 0x00000080,
-	HFI_VIDEO_CODEC_VC1      = 0x00000100,
-	HFI_VIDEO_CODEC_SPARK    = 0x00000200,
-	HFI_VIDEO_CODEC_VP6      = 0x00000400,
-	HFI_VIDEO_CODEC_VP7		 = 0x00000800,
-	HFI_VIDEO_CODEC_VP8		 = 0x00001000,
-	HFI_UNUSED_CODEC		 = 0x10000000,
-};
+#define HFI_BUFFER_MODE_STATIC (HFI_OX_BASE + 0x1)
+#define HFI_BUFFER_MODE_RING (HFI_OX_BASE + 0x2)
 
-enum HFI_H263_PROFILE {
-	HFI_H263_PROFILE_BASELINE           = 0x00000001,
-	HFI_H263_PROFILE_H320CODING         = 0x00000002,
-	HFI_H263_PROFILE_BACKWARDCOMPATIBLE = 0x00000004,
-	HFI_H263_PROFILE_ISWV2              = 0x00000008,
-	HFI_H263_PROFILE_ISWV3              = 0x00000010,
-	HFI_H263_PROFILE_HIGHCOMPRESSION    = 0x00000020,
-	HFI_H263_PROFILE_INTERNET           = 0x00000040,
-	HFI_H263_PROFILE_INTERLACE          = 0x00000080,
-	HFI_H263_PROFILE_HIGHLATENCY        = 0x00000100,
-	HFI_UNUSED_H263_PROFILE = 0x10000000,
-};
+#define HFI_FLUSH_INPUT (HFI_OX_BASE + 0x1)
+#define HFI_FLUSH_OUTPUT (HFI_OX_BASE + 0x2)
+#define HFI_FLUSH_OUTPUT2 (HFI_OX_BASE + 0x3)
+#define HFI_FLUSH_ALL (HFI_OX_BASE + 0x4)
 
-enum HFI_H263_LEVEL {
-	HFI_H263_LEVEL_10 = 0x00000001,
-	HFI_H263_LEVEL_20 = 0x00000002,
-	HFI_H263_LEVEL_30 = 0x00000004,
-	HFI_H263_LEVEL_40 = 0x00000008,
-	HFI_H263_LEVEL_45 = 0x00000010,
-	HFI_H263_LEVEL_50 = 0x00000020,
-	HFI_H263_LEVEL_60 = 0x00000040,
-	HFI_H263_LEVEL_70 = 0x00000080,
-	HFI_UNUSED_H263_LEVEL = 0x10000000,
-};
+#define HFI_EXTRADATA_NONE					0x00000000
+#define HFI_EXTRADATA_MB_QUANTIZATION		0x00000001
+#define HFI_EXTRADATA_INTERLACE_VIDEO		0x00000002
+#define HFI_EXTRADATA_VC1_FRAMEDISP			0x00000003
+#define HFI_EXTRADATA_VC1_SEQDISP			0x00000004
+#define HFI_EXTRADATA_TIMESTAMP				0x00000005
+#define HFI_EXTRADATA_S3D_FRAME_PACKING		0x00000006
+#define HFI_EXTRADATA_MULTISLICE_INFO		0x7F100000
+#define HFI_EXTRADATA_NUM_CONCEALED_MB		0x7F100001
+#define HFI_EXTRADATA_INDEX					0x7F100002
+#define HFI_EXTRADATA_METADATA_FILLER		0x7FE00002
 
-enum HFI_MPEG2_PROFILE {
-	HFI_MPEG2_PROFILE_SIMPLE  = 0x00000001,
-	HFI_MPEG2_PROFILE_MAIN    = 0x00000002,
-	HFI_MPEG2_PROFILE_422     = 0x00000004,
-	HFI_MPEG2_PROFILE_SNR     = 0x00000008,
-	HFI_MPEG2_PROFILE_SPATIAL = 0x00000010,
-	HFI_MPEG2_PROFILE_HIGH    = 0x00000020,
-	HFI_UNUSED_MPEG2_PROFILE = 0x10000000,
-};
-
-enum HFI_MPEG2_LEVEL {
-	HFI_MPEG2_LEVEL_LL  = 0x00000001,
-	HFI_MPEG2_LEVEL_ML  = 0x00000002,
-	HFI_MPEG2_LEVEL_H14 = 0x00000004,
-	HFI_MPEG2_LEVEL_HL  = 0x00000008,
-	HFI_UNUSED_MEPG2_LEVEL = 0x10000000,
-};
-
-enum HFI_MPEG4_PROFILE {
-	HFI_MPEG4_PROFILE_SIMPLE           = 0x00000001,
-	HFI_MPEG4_PROFILE_SIMPLESCALABLE   = 0x00000002,
-	HFI_MPEG4_PROFILE_CORE             = 0x00000004,
-	HFI_MPEG4_PROFILE_MAIN             = 0x00000008,
-	HFI_MPEG4_PROFILE_NBIT             = 0x00000010,
-	HFI_MPEG4_PROFILE_SCALABLETEXTURE  = 0x00000020,
-	HFI_MPEG4_PROFILE_SIMPLEFACE       = 0x00000040,
-	HFI_MPEG4_PROFILE_SIMPLEFBA        = 0x00000080,
-	HFI_MPEG4_PROFILE_BASICANIMATED    = 0x00000100,
-	HFI_MPEG4_PROFILE_HYBRID           = 0x00000200,
-	HFI_MPEG4_PROFILE_ADVANCEDREALTIME = 0x00000400,
-	HFI_MPEG4_PROFILE_CORESCALABLE     = 0x00000800,
-	HFI_MPEG4_PROFILE_ADVANCEDCODING   = 0x00001000,
-	HFI_MPEG4_PROFILE_ADVANCEDCORE     = 0x00002000,
-	HFI_MPEG4_PROFILE_ADVANCEDSCALABLE = 0x00004000,
-	HFI_MPEG4_PROFILE_ADVANCEDSIMPLE   = 0x00008000,
-	HFI_UNUSED_MPEG4_PROFILE = 0x10000000,
-};
-
-enum HFI_MPEG4_LEVEL {
-	HFI_MPEG4_LEVEL_0  = 0x00000001,
-	HFI_MPEG4_LEVEL_0b = 0x00000002,
-	HFI_MPEG4_LEVEL_1  = 0x00000004,
-	HFI_MPEG4_LEVEL_2  = 0x00000008,
-	HFI_MPEG4_LEVEL_3  = 0x00000010,
-	HFI_MPEG4_LEVEL_4  = 0x00000020,
-	HFI_MPEG4_LEVEL_4a = 0x00000040,
-	HFI_MPEG4_LEVEL_5  = 0x00000080,
-	HFI_MPEG4_LEVEL_VENDOR_START_UNUSED = 0x7F000000,
-	HFI_MPEG4_LEVEL_6  = 0x7F000001,
-	HFI_MPEG4_LEVEL_7  = 0x7F000002,
-	HFI_MPEG4_LEVEL_8  = 0x7F000003,
-	HFI_MPEG4_LEVEL_9  = 0x7F000004,
-	HFI_MPEG4_LEVEL_3b = 0x7F000005,
-	HFI_UNUSED_MPEG4_LEVEL = 0x10000000,
-};
-
-enum HFI_H264_PROFILE {
-	HFI_H264_PROFILE_BASELINE = 0x00000001,
-	HFI_H264_PROFILE_MAIN     = 0x00000002,
-	HFI_H264_PROFILE_EXTENDED = 0x00000004,
-	HFI_H264_PROFILE_HIGH     = 0x00000008,
-	HFI_H264_PROFILE_HIGH10   = 0x00000010,
-	HFI_H264_PROFILE_HIGH422  = 0x00000020,
-	HFI_H264_PROFILE_HIGH444  = 0x00000040,
-	HFI_H264_PROFILE_STEREO_HIGH = 0x00000080,
-	HFI_H264_PROFILE_MV_HIGH  = 0x00000100,
-	HFI_UNUSED_H264_PROFILE   = 0x10000000,
-};
-
-enum HFI_H264_LEVEL {
-	HFI_H264_LEVEL_1  = 0x00000001,
-	HFI_H264_LEVEL_1b = 0x00000002,
-	HFI_H264_LEVEL_11 = 0x00000004,
-	HFI_H264_LEVEL_12 = 0x00000008,
-	HFI_H264_LEVEL_13 = 0x00000010,
-	HFI_H264_LEVEL_2  = 0x00000020,
-	HFI_H264_LEVEL_21 = 0x00000040,
-	HFI_H264_LEVEL_22 = 0x00000080,
-	HFI_H264_LEVEL_3  = 0x00000100,
-	HFI_H264_LEVEL_31 = 0x00000200,
-	HFI_H264_LEVEL_32 = 0x00000400,
-	HFI_H264_LEVEL_4  = 0x00000800,
-	HFI_H264_LEVEL_41 = 0x00001000,
-	HFI_H264_LEVEL_42 = 0x00002000,
-	HFI_H264_LEVEL_5  = 0x00004000,
-	HFI_H264_LEVEL_51 = 0x00008000,
-	HFI_UNUSED_H264_LEVEL = 0x10000000,
-};
-
-enum HFI_VPX_PROFILE {
-	HFI_VPX_PROFILE_SIMPLE    = 0x00000001,
-	HFI_VPX_PROFILE_ADVANCED  = 0x00000002,
-	HFI_VPX_PROFILE_VERSION_0 = 0x00000004,
-	HFI_VPX_PROFILE_VERSION_1 = 0x00000008,
-	HFI_VPX_PROFILE_VERSION_2 = 0x00000010,
-	HFI_VPX_PROFILE_VERSION_3 = 0x00000020,
-	HFI_VPX_PROFILE_UNUSED = 0x10000000,
-};
-
-enum HFI_VC1_PROFILE {
-	HFI_VC1_PROFILE_SIMPLE   = 0x00000001,
-	HFI_VC1_PROFILE_MAIN     = 0x00000002,
-	HFI_VC1_PROFILE_ADVANCED = 0x00000004,
-	HFI_UNUSED_VC1_PROFILE = 0x10000000,
-};
-
-enum HFI_VC1_LEVEL {
-	HFI_VC1_LEVEL_LOW    = 0x00000001,
-	HFI_VC1_LEVEL_MEDIUM = 0x00000002,
-	HFI_VC1_LEVEL_HIGH   = 0x00000004,
-	HFI_VC1_LEVEL_0      = 0x00000008,
-	HFI_VC1_LEVEL_1      = 0x00000010,
-	HFI_VC1_LEVEL_2      = 0x00000020,
-	HFI_VC1_LEVEL_3      = 0x00000040,
-	HFI_VC1_LEVEL_4      = 0x00000080,
-	HFI_UNUSED_VC1_LEVEL = 0x10000000,
-};
-
-enum HFI_DIVX_FORMAT {
-	HFI_DIVX_FORMAT_4,
-	HFI_DIVX_FORMAT_5,
-	HFI_DIVX_FORMAT_6,
-	HFI_UNUSED_DIVX_FORMAT = 0x10000000,
-};
-
-enum HFI_DIVX_PROFILE {
-	HFI_DIVX_PROFILE_QMOBILE  = 0x00000001,
-	HFI_DIVX_PROFILE_MOBILE   = 0x00000002,
-	HFI_DIVX_PROFILE_MT       = 0x00000004,
-	HFI_DIVX_PROFILE_HT       = 0x00000008,
-	HFI_DIVX_PROFILE_HD       = 0x00000010,
-	HFI_UNUSED_DIVX_PROFILE = 0x10000000,
-};
-
-enum HFI_BUFFER {
-	HFI_BUFFER_INPUT,
-	HFI_BUFFER_OUTPUT,
-	HFI_BUFFER_OUTPUT2,
-	HFI_BUFFER_EXTRADATA_INPUT,
-	HFI_BUFFER_EXTRADATA_OUTPUT,
-	HFI_BUFFER_EXTRADATA_OUTPUT2,
-	HFI_BUFFER_INTERNAL_SCRATCH = 0x7F000001,
-	HFI_BUFFER_INTERNAL_PERSIST = 0x7F000002,
-	HFI_UNUSED_BUFFER = 0x10000000,
-};
-
-enum HFI_BUFFER_MODE {
-	HFI_BUFFER_MODE_STATIC,
-	HFI_BUFFER_MODE_RING,
-	HFI_UNUSED_BUFFER_MODE = 0x10000000,
-};
-
-enum HFI_FLUSH {
-	HFI_FLUSH_INPUT,
-	HFI_FLUSH_OUTPUT,
-	HFI_FLUSH_OUTPUT2,
-	HFI_FLUSH_ALL,
-	HFI_UNUSED_FLUSH = 0x10000000,
-};
-
-enum HFI_EXTRADATA {
-	HFI_EXTRADATA_NONE                 = 0x00000000,
-	HFI_EXTRADATA_MB_QUANTIZATION      = 0x00000001,
-	HFI_EXTRADATA_INTERLACE_VIDEO      = 0x00000002,
-	HFI_EXTRADATA_VC1_FRAMEDISP        = 0x00000003,
-	HFI_EXTRADATA_VC1_SEQDISP          = 0x00000004,
-	HFI_EXTRADATA_TIMESTAMP            = 0x00000005,
-	HFI_EXTRADATA_MULTISLICE_INFO      = 0x7F100000,
-	HFI_EXTRADATA_NUM_CONCEALED_MB     = 0x7F100001,
-	HFI_EXTRADATA_INDEX                = 0x7F100002,
-	HFI_EXTRADATA_METADATA_FILLER      = 0x7FE00002,
-	HFI_UNUSED_EXTRADATA = 0x10000000,
-};
-
-enum HFI_EXTRADATA_INDEX_TYPE {
-	HFI_INDEX_EXTRADATA_INPUT_CROP    = 0x0700000E,
-	HFI_INDEX_EXTRADATA_DIGITAL_ZOOM  = 0x07000010,
-	HFI_INDEX_EXTRADATA_ASPECT_RATIO  = 0x7F100003,
-};
+#define HFI_INDEX_EXTRADATA_INPUT_CROP		0x0700000E
+#define HFI_INDEX_EXTRADATA_DIGITAL_ZOOM	0x07000010
+#define HFI_INDEX_EXTRADATA_ASPECT_RATIO	0x7F100003
 
 struct hfi_extradata_header {
 	u32 size;
 	u32 version;
-	u32 port_tndex;
-	enum HFI_EXTRADATA type;
+	u32 port_index;
+	u32 type;
 	u32 data_size;
 	u8 rg_data[1];
 };
 
-enum HFI_INTERLACE_FORMAT {
-	HFI_INTERLACE_FRAME_PROGRESSIVE                 = 0x01,
-	HFI_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST    = 0x02,
-	HFI_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST = 0x04,
-	HFI_INTERLACE_FRAME_TOPFIELDFIRST               = 0x08,
-	HFI_INTERLACE_FRAME_BOTTOMFIELDFIRST            = 0x10,
-	HFI_UNUSED_INTERLACE = 0x10000000,
-};
+#define HFI_INTERLACE_FRAME_PROGRESSIVE					0x01
+#define HFI_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST	0x02
+#define HFI_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST	0x04
+#define HFI_INTERLACE_FRAME_TOPFIELDFIRST				0x08
+#define HFI_INTERLACE_FRAME_BOTTOMFIELDFIRST			0x10
 
-enum HFI_PROPERTY {
-	HFI_PROPERTY_SYS_UNUSED = 0x08000000,
-	HFI_PROPERTY_SYS_IDLE_INDICATOR,
-	HFI_PROPERTY_SYS_DEBUG_CONFIG,
-	HFI_PROPERTY_SYS_RESOURCE_OCMEM_REQUIREMENT_INFO,
-	HFI_PROPERTY_PARAM_UNUSED = 0x04000000,
-	HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL,
-	HFI_PROPERTY_PARAM_FRAME_SIZE,
-	HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT,
-	HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED,
-	HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO,
-	HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
-	HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED,
-	HFI_PROPERTY_PARAM_CHROMA_SITE,
-	HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG,
-	HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT,
-	HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED,
-	HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED,
-	HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED,
-	HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT,
-	HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT,
-	HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED,
-	HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE,
-	HFI_PROPERTY_PARAM_CODEC_SUPPORTED,
-	HFI_PROPERTY_PARAM_DIVX_FORMAT,
+#define HFI_PROPERTY_SYS_OX_START			\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x0000)
+#define HFI_PROPERTY_SYS_IDLE_INDICATOR		\
+	(HFI_PROPERTY_SYS_OX_START + 0x001)
 
-	HFI_PROPERTY_CONFIG_UNUSED = 0x02000000,
-	HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS,
-	HFI_PROPERTY_CONFIG_REALTIME,
-	HFI_PROPERTY_CONFIG_PRIORITY,
-	HFI_PROPERTY_CONFIG_BATCH_INFO,
-	HFI_PROPERTY_CONFIG_FRAME_RATE,
+#define HFI_PROPERTY_PARAM_OX_START				\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x1000)
+#define HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL			\
+	(HFI_PROPERTY_PARAM_OX_START + 0x001)
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO	\
+	(HFI_PROPERTY_PARAM_OX_START + 0x002)
+#define HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED	\
+	(HFI_PROPERTY_PARAM_OX_START + 0x003)
+#define HFI_PROPERTY_PARAM_CHROMA_SITE					\
+(HFI_PROPERTY_PARAM_OX_START + 0x004)
+#define HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG		\
+	(HFI_PROPERTY_PARAM_OX_START + 0x005)
+#define HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE		\
+	(HFI_PROPERTY_PARAM_OX_START + 0x006)
+#define HFI_PROPERTY_PARAM_DIVX_FORMAT					\
+	(HFI_PROPERTY_PARAM_OX_START + 0x007)
 
-	HFI_PROPERTY_PARAM_VDEC_UNUSED = 0x01000000,
-	HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER,
-	HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT,
-	HFI_PROPERTY_PARAM_VDEC_MULTI_VIEW_SELECT,
-	HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE,
-	HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM,
-	HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER,
-	HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION,
-	HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB,
-	HFI_PROPERTY_PARAM_VDEC_H264_ENTROPY_SWITCHING,
-	HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO,
+#define HFI_PROPERTY_CONFIG_OX_START					\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x02000)
+#define HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS			\
+	(HFI_PROPERTY_CONFIG_OX_START + 0x001)
+#define HFI_PROPERTY_CONFIG_REALTIME					\
+	(HFI_PROPERTY_CONFIG_OX_START + 0x002)
+#define HFI_PROPERTY_CONFIG_PRIORITY					\
+	(HFI_PROPERTY_CONFIG_OX_START + 0x003)
+#define HFI_PROPERTY_CONFIG_BATCH_INFO					\
+	(HFI_PROPERTY_CONFIG_OX_START + 0x004)
 
-	HFI_PROPERTY_CONFIG_VDEC_UNUSED = 0x00800000,
-	HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER,
-	HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING,
-	HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP,
+#define HFI_PROPERTY_PARAM_VDEC_OX_START				\
+	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x3000)
+#define HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER	\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001)
+#define HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x002)
+#define HFI_PROPERTY_PARAM_VDEC_MULTI_VIEW_SELECT		\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x003)
+#define HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE		\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x004)
+#define HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER			\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x005)
+#define HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION			\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x006)
+#define HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB		\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x007)
+#define HFI_PROPERTY_PARAM_VDEC_H264_ENTROPY_SWITCHING	\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x008)
+#define HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO\
+	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x009)
 
-	HFI_PROPERTY_PARAM_VENC_UNUSED = 0x00400000,
-	HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE,
-	HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL,
-	HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL,
-	HFI_PROPERTY_PARAM_VENC_RATE_CONTROL,
-	HFI_PROPERTY_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF,
-	HFI_PROPERTY_PARAM_VENC_SESSION_QP,
-	HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION,
-	HFI_PROPERTY_PARAM_VENC_MPEG4_DATA_PARTITIONING,
-	HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION,
-	HFI_PROPERTY_PARAM_VENC_MPEG4_SHORT_HEADER,
-	HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION,
-	HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO,
-	HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH,
-	HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL,
+#define HFI_PROPERTY_CONFIG_VDEC_OX_START				\
+	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x0000)
+#define HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER	\
+	(HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x001)
+#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING	\
+	(HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x002)
+#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP			\
+	(HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x003)
 
-	HFI_PROPERTY_CONFIG_VENC_UNUSED = 0x00200000,
-	HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE,
-	HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD,
-	HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD,
-	HFI_PROPERTY_CONFIG_VENC_REQUEST_IFRAME,
-	HFI_PROPERTY_CONFIG_VENC_TIMESTAMP_SCALE,
-	HFI_PROPERTY_PARAM_VENC_MPEG4_QPEL,
-	HFI_PROPERTY_PARAM_VENC_ADVANCED,
+#define HFI_PROPERTY_PARAM_VENC_OX_START				\
+	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x5000)
+#define HFI_PROPERTY_CONFIG_VENC_OX_START				\
+	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x6000)
 
-	HFI_PROPERTY_PARAM_VPE_UNUSED = 0x00100000,
-
-	HFI_PROPERTY_CONFIG_VPE_UNUSED = 0x00080000,
-	HFI_PROPERTY_CONFIG_VPE_DEINTERLACE,
-	HFI_PROPERTY_CONFIG_VPE_OPERATIONS,
-	HFI_PROPERTY_UNUSED = 0x10000000,
-};
+#define HFI_PROPERTY_PARAM_VPE_OX_START					\
+	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x7000)
+#define HFI_PROPERTY_CONFIG_VPE_OX_START				\
+	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x8000)
 
 struct hfi_batch_info {
 	u32 input_batch_count;
 	u32 output_batch_count;
 };
 
-struct hfi_bitrate {
-	u32 bit_rate;
-};
-
 struct hfi_buffer_count_actual {
-	enum HFI_BUFFER buffer;
+	u32 buffer_type;
 	u32 buffer_count_actual;
 };
 
 struct hfi_buffer_requirements {
-	enum HFI_BUFFER buffer;
+	u32 buffer_type;
 	u32 buffer_size;
 	u32 buffer_region_size;
 	u32 buffer_hold_count;
@@ -519,35 +273,12 @@
 	u32 buffer_alignment;
 };
 
-enum HFI_CAPABILITY {
-	HFI_CAPABILITY_FRAME_WIDTH,
-	HFI_CAPABILITY_FRAME_HEIGHT,
-	HFI_CAPABILITY_MBS_PER_FRAME,
-	HFI_CAPABILITY_MBS_PER_SECOND,
-	HFI_CAPABILITY_FRAMERATE,
-	HFI_CAPABILITY_SCALE_X,
-	HFI_CAPABILITY_SCALE_Y,
-	HFI_CAPABILITY_BITRATE,
-	HFI_UNUSED_CAPABILITY = 0x10000000,
-};
-
-struct hfi_capability_supported {
-	enum HFI_CAPABILITY eCapabilityType;
-	u32 min;
-	u32 max;
-	u32 step_size;
-};
-
-struct hfi_capability_supported_INFO {
-	u32 num_capabilities;
-	struct hfi_capability_supported rg_data[1];
-};
-
-enum HFI_CHROMA_SITE {
-	HFI_CHROMA_SITE_0,
-	HFI_CHROMA_SITE_1,
-	HFI_UNUSED_CHROMA = 0x10000000,
-};
+#define HFI_CHROMA_SITE_0			(HFI_OX_BASE + 0x1)
+#define HFI_CHROMA_SITE_1			(HFI_OX_BASE + 0x2)
+#define HFI_CHROMA_SITE_2			(HFI_OX_BASE + 0x3)
+#define HFI_CHROMA_SITE_3			(HFI_OX_BASE + 0x4)
+#define HFI_CHROMA_SITE_4			(HFI_OX_BASE + 0x5)
+#define HFI_CHROMA_SITE_5			(HFI_OX_BASE + 0x6)
 
 struct hfi_data_payload {
 	u32 size;
@@ -567,86 +298,17 @@
 	u32 count;
 };
 
-struct hfi_enable {
-	int enable;
-};
-
-enum HFI_H264_DB_MODE {
-	HFI_H264_DB_MODE_DISABLE,
-	HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY,
-	HFI_H264_DB_MODE_ALL_BOUNDARY,
-	HFI_UNUSED_H264_DB = 0x10000000,
-};
-
-struct hfi_h264_db_control {
-	enum HFI_H264_DB_MODE mode;
-	int slice_alpha_offset;
-	int slice_beta_offset;
-};
-
-enum HFI_H264_ENTROPY {
-	HFI_H264_ENTROPY_CAVLC,
-	HFI_H264_ENTROPY_CABAC,
-	HFI_UNUSED_ENTROPY = 0x10000000,
-};
-
-enum HFI_H264_CABAC_MODEL {
-	HFI_H264_CABAC_MODEL_0,
-	HFI_H264_CABAC_MODEL_1,
-	HFI_H264_CABAC_MODEL_2,
-	HFI_UNUSED_CABAC = 0x10000000,
-};
-
-struct hfi_h264_entropy_control {
-	enum HFI_H264_ENTROPY entropy_mode;
-	enum HFI_H264_CABAC_MODEL cabac_model;
-};
-
 struct hfi_extra_data_header_config {
 	u32 type;
-	enum HFI_BUFFER buffer_type;
+	u32 buffer_type;
 	u32 version;
 	u32 port_index;
-	u32 client_extradata_id;
-};
-
-struct hfi_frame_rate {
-	enum HFI_BUFFER buffer_type;
-	u32 frame_rate;
+	u32 client_extra_data_id;
 };
 
 struct hfi_interlace_format_supported {
-	enum HFI_BUFFER buffer;
-	enum HFI_INTERLACE_FORMAT format;
-};
-
-enum hfi_intra_refresh_mode {
-	HFI_INTRA_REFRESH_NONE,
-	HFI_INTRA_REFRESH_CYCLIC,
-	HFI_INTRA_REFRESH_ADAPTIVE,
-	HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE,
-	HFI_INTRA_REFRESH_RANDOM,
-	HFI_UNUSED_INTRA = 0x10000000,
-};
-
-struct hfi_intra_refresh {
-	enum hfi_intra_refresh_mode mode;
-	u32 air_mbs;
-	u32 air_ref;
-	u32 cir_mbs;
-};
-
-struct hfi_idr_period {
-	u32 idr_period;
-};
-
-struct hfi_intra_period {
-	u32 pframes;
-	u32 bframes;
-};
-
-struct hfi_timestamp_scale {
-	u32 time_stamp_scale;
+	u32 buffer_type;
+	u32 format;
 };
 
 struct hfi_mb_error_map {
@@ -659,424 +321,110 @@
 	u32 size;
 };
 
-struct hfi_mpeg4_header_extension {
-	u32 header_extension;
-};
-
-struct hfi_mpeg4_time_resolution {
-	u32 time_increment_resolution;
-};
-
-enum HFI_MULTI_SLICE {
-	HFI_MULTI_SLICE_OFF,
-	HFI_MULTI_SLICE_BY_MB_COUNT,
-	HFI_MULTI_SLICE_BY_BYTE_COUNT,
-	HFI_MULTI_SLICE_GOB,
-	HFI_UNUSED_SLICE = 0x10000000,
-};
-
-struct hfi_multi_slice_control {
-	enum HFI_MULTI_SLICE multi_slice;
-	u32 slice_size;
-};
-
-struct hfi_multi_stream {
-	enum HFI_BUFFER buffer;
-	u32 enable;
-	u32 width;
-	u32 height;
-};
-
-struct hfi_multi_view_format {
-	u32 views;
-	u32 rg_view_order[1];
-};
-
 struct hfi_multi_view_select {
 	u32 view_index;
 };
 
-enum HFI_NAL_STREAM_FORMAT {
-	HFI_NAL_FORMAT_STARTCODES         = 0x00000001,
-	HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER = 0x00000002,
-	HFI_NAL_FORMAT_ONE_BYTE_LENGTH    = 0x00000004,
-	HFI_NAL_FORMAT_TWO_BYTE_LENGTH    = 0x00000008,
-	HFI_NAL_FORMAT_FOUR_BYTE_LENGTH   = 0x00000010,
-	HFI_UNUSED_NAL = 0x10000000,
-};
+#define HFI_PRIORITY_LOW		10
+#define HFI_PRIOIRTY_MEDIUM		20
+#define HFI_PRIORITY_HIGH		30
 
-struct hfi_nal_stream_format_supported {
-	u32 nal_stream_format_supported;
-};
+#define HFI_OUTPUT_ORDER_DISPLAY	(HFI_OX_BASE + 0x1)
+#define HFI_OUTPUT_ORDER_DECODE		(HFI_OX_BASE + 0x2)
 
-enum HFI_PICTURE {
-	HFI_PICTURE_I   = 0x01,
-	HFI_PICTURE_P   = 0x02,
-	HFI_PICTURE_B   = 0x04,
-	HFI_PICTURE_IDR = 0x7F001000,
-	HFI_UNUSED_PICT = 0x10000000,
-};
-
-enum HFI_PRIORITY {
-	HFI_PRIORITY_LOW = 10,
-	HFI_PRIOIRTY_MEDIUM = 20,
-	HFI_PRIORITY_HIGH = 30,
-	HFI_UNUSED_PRIORITY = 0x10000000,
-};
-
-struct hfi_profile_level {
-	u32 profile;
-	u32 level;
-};
-
-struct hfi_profile_level_supported {
-	u32 profile_count;
-	struct hfi_profile_level rg_profile_level[1];
-};
-
-enum HFI_ROTATE {
-	HFI_ROTATE_NONE,
-	HFI_ROTATE_90,
-	HFI_ROTATE_180,
-	HFI_ROTATE_270,
-	HFI_UNUSED_ROTATE = 0x10000000,
-};
-
-enum HFI_FLIP {
-	HFI_FLIP_NONE,
-	HFI_FLIP_HORIZONTAL,
-	HFI_FLIP_VERTICAL,
-	HFI_UNUSED_FLIP = 0x10000000,
-};
-
-struct hfi_operations {
-	enum HFI_ROTATE rotate;
-	enum HFI_FLIP flip;
-};
-
-enum HFI_OUTPUT_ORDER {
-	HFI_OUTPUT_ORDER_DISPLAY,
-	HFI_OUTPUT_ORDER_DECODE,
-	HFI_UNUSED_OUTPUT = 0x10000000,
-};
-
-struct hfi_quantization {
-	u32 qp_i;
-	u32 qp_p;
-	u32 qp_b;
-};
-
-enum HFI_RATE_CONTROL {
-	HFI_RATE_CONTROL_OFF,
-	HFI_RATE_CONTROL_VBR_VFR,
-	HFI_RATE_CONTROL_VBR_CFR,
-	HFI_RATE_CONTROL_CBR_VFR,
-	HFI_RATE_CONTROL_CBR_CFR,
-	HFI_UNUSED_RC = 0x10000000,
-};
-
-struct hfi_slice_delivery_mode {
-	int enable;
-};
-
-struct hfi_temporal_spatial_tradeoff {
-	u32 ts_factor;
-};
-
-struct hfi_frame_size {
-	enum HFI_BUFFER buffer;
-	u32 width;
-	u32 height;
-};
-
-enum HFI_UNCOMPRESSED_FORMAT {
-	HFI_COLOR_FORMAT_MONOCHROME,
-	HFI_COLOR_FORMAT_NV12,
-	HFI_COLOR_FORMAT_NV21,
-	HFI_COLOR_FORMAT_NV12_4x4TILE,
-	HFI_COLOR_FORMAT_NV21_4x4TILE,
-	HFI_COLOR_FORMAT_YUYV,
-	HFI_COLOR_FORMAT_YVYU,
-	HFI_COLOR_FORMAT_UYVY,
-	HFI_COLOR_FORMAT_VYUY,
-	HFI_COLOR_FORMAT_RGB565,
-	HFI_COLOR_FORMAT_BGR565,
-	HFI_COLOR_FORMAT_RGB888,
-	HFI_COLOR_FORMAT_BGR888,
-	HFI_UNUSED_COLOR = 0x10000000,
-};
-
-struct hfi_uncompressed_format_select {
-	enum HFI_BUFFER buffer;
-	enum HFI_UNCOMPRESSED_FORMAT format;
-};
-
-struct hfi_uncompressed_format_supported {
-	enum HFI_BUFFER buffer;
-	u32 format_entries;
-	u32 rg_format_info[1];
-};
-
-struct hfi_uncompressed_plane_actual {
-	int actual_stride;
-	u32 actual_plane_buffer_height;
-};
-
-struct hfi_uncompressed_plane_actual_info {
-	enum HFI_BUFFER buffer;
-	u32 num_planes;
-	struct hfi_uncompressed_plane_actual rg_plane_format[1];
-};
-
-struct hfi_uncompressed_plane_constraints {
-	u32 stride_multiples;
-	u32 max_stride;
-	u32 min_plane_buffer_height_multiple;
-	u32 buffer_alignment;
-};
-
-struct hfi_uncompressed_plane_info {
-	enum HFI_UNCOMPRESSED_FORMAT format;
-	u32 num_planes;
-	struct hfi_uncompressed_plane_constraints rg_plane_format[1];
-};
+#define HFI_RATE_CONTROL_OFF		(HFI_OX_BASE + 0x1)
+#define HFI_RATE_CONTROL_VBR_VFR	(HFI_OX_BASE + 0x2)
+#define HFI_RATE_CONTROL_VBR_CFR	(HFI_OX_BASE + 0x3)
+#define HFI_RATE_CONTROL_CBR_VFR	(HFI_OX_BASE + 0x4)
+#define HFI_RATE_CONTROL_CBR_CFR	(HFI_OX_BASE + 0x5)
 
 struct hfi_uncompressed_plane_actual_constraints_info {
-	enum HFI_BUFFER buffer;
+	u32 buffer_type;
 	u32 num_planes;
 	struct hfi_uncompressed_plane_constraints rg_plane_format[1];
 };
 
-struct hfi_codec_supported {
-	u32 decoder_codec_supported;
-	u32 encoder_codec_supported;
-};
+#define HFI_CMD_SYS_OX_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x0000)
+#define HFI_CMD_SYS_SESSION_ABORT	(HFI_CMD_SYS_OX_START + 0x001)
+#define HFI_CMD_SYS_PING		(HFI_CMD_SYS_OX_START + 0x002)
 
-enum HFI_DEBUG_MSG {
-	HFI_DEBUG_MSG_LOW     = 0x00000001,
-	HFI_DEBUG_MSG_MEDIUM  = 0x00000002,
-	HFI_DEBUG_MSG_HIGH    = 0x00000004,
-	HFI_DEBUG_MSG_ERROR   = 0x00000008,
-	HFI_DEBUG_MSG_FATAL   = 0x00000010,
-	HFI_UNUSED_DEBUG_MSG = 0x10000000,
-};
+#define HFI_CMD_SESSION_OX_START	\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x1000)
+#define HFI_CMD_SESSION_LOAD_RESOURCES	(HFI_CMD_SESSION_OX_START + 0x001)
+#define HFI_CMD_SESSION_START		(HFI_CMD_SESSION_OX_START + 0x002)
+#define HFI_CMD_SESSION_STOP		(HFI_CMD_SESSION_OX_START + 0x003)
+#define HFI_CMD_SESSION_EMPTY_BUFFER	(HFI_CMD_SESSION_OX_START + 0x004)
+#define HFI_CMD_SESSION_FILL_BUFFER	(HFI_CMD_SESSION_OX_START + 0x005)
+#define HFI_CMD_SESSION_SUSPEND		(HFI_CMD_SESSION_OX_START + 0x006)
+#define HFI_CMD_SESSION_RESUME		(HFI_CMD_SESSION_OX_START + 0x007)
+#define HFI_CMD_SESSION_FLUSH		(HFI_CMD_SESSION_OX_START + 0x008)
+#define HFI_CMD_SESSION_GET_PROPERTY	(HFI_CMD_SESSION_OX_START + 0x009)
+#define HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER	\
+	(HFI_CMD_SESSION_OX_START + 0x00A)
+#define HFI_CMD_SESSION_RELEASE_BUFFERS		\
+	(HFI_CMD_SESSION_OX_START + 0x00B)
+#define HFI_CMD_SESSION_RELEASE_RESOURCES	\
+	(HFI_CMD_SESSION_OX_START + 0x00C)
 
-struct hfi_debug_config {
-	u32 debug_config;
-};
+#define HFI_MSG_SYS_OX_START			\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x0000)
+#define HFI_MSG_SYS_IDLE		(HFI_MSG_SYS_OX_START + 0x1)
+#define HFI_MSG_SYS_PING_ACK	(HFI_MSG_SYS_OX_START + 0x2)
+#define HFI_MSG_SYS_PROPERTY_INFO	(HFI_MSG_SYS_OX_START + 0x3)
+#define HFI_MSG_SYS_SESSION_ABORT_DONE	(HFI_MSG_SYS_OX_START + 0x4)
 
-struct hfi_properties_supported {
-	u32 num_properties;
-	u32 rg_properties[1];
-};
-
-enum HFI_RESOURCE {
-	HFI_RESOURCE_OCMEM    = 0x00000001,
-	HFI_UNUSED_RESOURCE = 0x10000000,
-};
-
-struct hfi_resource_ocmem_type {
-	u32 size;
-	u8 *mem;
-};
-
-struct hfi_resource_ocmem_requirement {
-	enum HFI_DOMAIN session_domain;
-	u32 width;
-	u32 height;
-	u32 size;
-};
-
-struct hfi_resource_ocmem_requirement_info {
-	u32 num_entries;
-	struct hfi_resource_ocmem_requirement rg_requirements[1];
-};
-
-struct hfi_venc_config_advanced {
-	u8 pipe2d;
-	u8 hw_mode;
-	u8 low_delay_enforce;
-	int h264_constrain_intra_pred;
-	int h264_transform_8x8_flag;
-	int mpeg4_qpel_enable;
-	int multi_refP_en;
-	int qmatrix_en;
-	u8 vpp_info_packet_mode;
-	u8 ref_tile_mode;
-	u8 bitstream_flush_mode;
-	u32 ds_display_frame_width;
-	u32 ds_display_frame_height;
-	u32 perf_tune_param_ptr;
-};
-
-enum HFI_COMMAND {
-	HFI_CMD_SYS_UNUSED = 0x01000000,
-	HFI_CMD_SYS_INIT,
-	HFI_CMD_SYS_SESSION_INIT,
-	HFI_CMD_SYS_SESSION_END,
-	HFI_CMD_SYS_SESSION_ABORT,
-	HFI_CMD_SYS_SET_RESOURCE,
-	HFI_CMD_SYS_RELEASE_RESOURCE,
-	HFI_CMD_SYS_PING,
-	HFI_CMD_SYS_PC_PREP,
-	HFI_CMD_SYS_SET_PROPERTY,
-	HFI_CMD_SYS_GET_PROPERTY,
-
-	HFI_CMD_SESSION_UNUSED = 0x02000000,
-	HFI_CMD_SESSION_LOAD_RESOURCES,
-	HFI_CMD_SESSION_START,
-	HFI_CMD_SESSION_STOP,
-	HFI_CMD_SESSION_EMPTY_BUFFER,
-	HFI_CMD_SESSION_FILL_BUFFER,
-	HFI_CMD_SESSION_FLUSH,
-	HFI_CMD_SESSION_SUSPEND,
-	HFI_CMD_SESSION_RESUME,
-	HFI_CMD_SESSION_SET_PROPERTY,
-	HFI_CMD_SESSION_GET_PROPERTY,
-	HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER,
-	HFI_CMD_SESSION_GET_SEQUENCE_HEADER,
-	HFI_CMD_SESSION_SET_BUFFERS,
-	HFI_CMD_SESSION_RELEASE_BUFFERS,
-	HFI_CMD_SESSION_RELEASE_RESOURCES,
-
-	HFI_CMD_UNUSED = 0x10000000,
-};
-
-enum HFI_MESSAGE {
-	HFI_MSG_SYS_UNUSED = 0x01000000,
-	HFI_MSG_SYS_IDLE,
-	HFI_MSG_SYS_PC_PREP_DONE,
-	HFI_MSG_SYS_RELEASE_RESOURCE,
-	HFI_MSG_SYS_PING_ACK,
-	HFI_MSG_SYS_DEBUG,
-	HFI_MSG_SYS_INIT_DONE,
-	HFI_MSG_SYS_PROPERTY_INFO,
-	HFI_MSG_SESSION_UNUSED = 0x02000000,
-	HFI_MSG_EVENT_NOTIFY,
-	HFI_MSG_SYS_SESSION_INIT_DONE,
-	HFI_MSG_SYS_SESSION_END_DONE,
-	HFI_MSG_SYS_SESSION_ABORT_DONE,
-	HFI_MSG_SESSION_LOAD_RESOURCES_DONE,
-	HFI_MSG_SESSION_START_DONE,
-	HFI_MSG_SESSION_STOP_DONE,
-	HFI_MSG_SESSION_SUSPEND_DONE,
-	HFI_MSG_SESSION_RESUME_DONE,
-	HFI_MSG_SESSION_EMPTY_BUFFER_DONE,
-	HFI_MSG_SESSION_FILL_BUFFER_DONE,
-	HFI_MSG_SESSION_FLUSH_DONE,
-	HFI_MSG_SESSION_PROPERTY_INFO,
-	HFI_MSG_SESSION_RELEASE_RESOURCES_DONE,
-	HFI_MSG_SESSION_PARSE_SEQUENCE_HEADER_DONE,
-	HFI_MSG_SESSION_GET_SEQUENCE_HEADER_DONE,
-	HFI_MSG_UNUSED = 0x10000000,
-};
-
-struct vidc_hal_msg_pkt_hdr {
-	u32 size;
-	enum HFI_MESSAGE packet;
-};
-
-struct vidc_hal_session_cmd_pkt {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-	u32 session_id;
-};
-
-enum HFI_STATUS {
-	HFI_FAIL = 0,
-	HFI_SUCCESS,
-	HFI_UNUSED_STATUS = 0x10000000,
-};
-
-struct hfi_cmd_sys_init_packet {
-	u32 size;
-	enum HFI_COMMAND packet;
-};
-
-struct hfi_cmd_sys_session_init_packet {
-	u32 size;
-	enum HFI_COMMAND packet;
-	u32 session_id;
-	enum HFI_DOMAIN session_domain;
-	enum HFI_VIDEO_CODEC session_codec;
-};
-
-struct hfi_cmd_sys_session_end_packet {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-	u32 session_id;
-};
+#define HFI_MSG_SESSION_OX_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x1000)
+#define HFI_MSG_SESSION_LOAD_RESOURCES_DONE	(HFI_MSG_SESSION_OX_START + 0x1)
+#define HFI_MSG_SESSION_START_DONE		(HFI_MSG_SESSION_OX_START + 0x2)
+#define HFI_MSG_SESSION_STOP_DONE		(HFI_MSG_SESSION_OX_START + 0x3)
+#define HFI_MSG_SESSION_SUSPEND_DONE	(HFI_MSG_SESSION_OX_START + 0x4)
+#define HFI_MSG_SESSION_RESUME_DONE		(HFI_MSG_SESSION_OX_START + 0x5)
+#define HFI_MSG_SESSION_FLUSH_DONE		(HFI_MSG_SESSION_OX_START + 0x6)
+#define HFI_MSG_SESSION_EMPTY_BUFFER_DONE	(HFI_MSG_SESSION_OX_START + 0x7)
+#define HFI_MSG_SESSION_FILL_BUFFER_DONE	(HFI_MSG_SESSION_OX_START + 0x8)
+#define HFI_MSG_SESSION_PROPERTY_INFO		(HFI_MSG_SESSION_OX_START + 0x9)
+#define HFI_MSG_SESSION_RELEASE_RESOURCES_DONE	(HFI_MSG_SESSION_OX_START + 0xA)
+#define HFI_MSG_SESSION_PARSE_SEQUENCE_HEADER_DONE		\
+	(HFI_MSG_SESSION_OX_START + 0xB)
 
 struct hfi_cmd_sys_session_abort_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
 };
 
-struct hfi_cmd_sys_pc_prep_packet {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-};
-
-struct hfi_cmd_sys_set_resource_packet {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-	u32 resource_handle;
-	enum HFI_RESOURCE resource_type;
-	u32 rg_resource_data[1];
-};
-
-struct hfi_cmd_sys_release_resource_packet {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-	enum HFI_RESOURCE resource_type;
-	u32 resource_handle;
-};
-
 struct hfi_cmd_sys_ping_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 client_data;
 };
 
-struct hfi_cmd_sys_set_property_packet {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-	u32 num_properties;
-	u32 rg_property_data[1];
-};
-
-struct hfi_cmd_sys_get_property_packet {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-	u32 num_properties;
-	enum HFI_PROPERTY rg_property_data[1];
-};
-
 struct hfi_cmd_session_load_resources_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
 };
 
 struct hfi_cmd_session_start_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
 };
 
 struct hfi_cmd_session_stop_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
 };
 
 struct hfi_cmd_session_empty_buffer_compressed_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
-	u32 timestamp_hi;
-	u32 timestamp_lo;
+	u32 time_stamp_hi;
+	u32 time_stamp_lo;
 	u32 flags;
 	u32 mark_target;
 	u32 mark_data;
@@ -1085,15 +433,16 @@
 	u32 filled_len;
 	u32 input_tag;
 	u8 *packet_buffer;
+	u8 *extra_data_buffer;
 };
 
 struct hfi_cmd_session_empty_buffer_uncompressed_plane0_packet {
 	u32 size;
-	enum HFI_COMMAND packet;
+	u32 packet_type;
 	u32 session_id;
 	u32 view_id;
-	u32 timestamp_hi;
-	u32 timestamp_lo;
+	u32 time_stamp_hi;
+	u32 time_stamp_lo;
 	u32 flags;
 	u32 mark_target;
 	u32 mark_data;
@@ -1102,6 +451,7 @@
 	u32 offset;
 	u32 input_tag;
 	u8 *packet_buffer;
+	u8 *extra_data_buffer;
 };
 
 struct hfi_cmd_session_empty_buffer_uncompressed_plane1_packet {
@@ -1122,234 +472,153 @@
 
 struct hfi_cmd_session_fill_buffer_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
 	u32 stream_id;
+	u32 output_tag;
 	u8 *packet_buffer;
 	u8 *extra_data_buffer;
 };
 
 struct hfi_cmd_session_flush_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_FLUSH flush_type;
+	u32 flush_type;
 };
 
 struct hfi_cmd_session_suspend_packet {
 	u32 size;
-	enum HFI_COMMAND packet;
+	u32 packet_type;
 	u32 session_id;
 };
 
 struct hfi_cmd_session_resume_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
 };
 
-struct hfi_cmd_session_set_property_packet {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-	u32 session_id;
-	u32 num_properties;
-	u32 rg_property_data[0];
-};
-
 struct hfi_cmd_session_get_property_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
 	u32 num_properties;
-	enum HFI_PROPERTY rg_property_data[1];
-};
-
-struct hfi_buffer_info {
-	u32 buffer_addr;
-	u32 extradata_addr;
-};
-
-struct hfi_cmd_session_set_buffers_packet {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-	u32 session_id;
-	enum HFI_BUFFER buffer_type;
-	enum HFI_BUFFER_MODE buffer_mode;
-	u32 buffer_size;
-	u32 extradata_size;
-	u32 min_buffer_size;
-	u32 num_buffers;
-	u32 rg_buffer_info[1];
+	u32 rg_property_data[1];
 };
 
 struct hfi_cmd_session_release_buffer_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_BUFFER buffer_type;
+	u32 buffer_type;
 	u32 buffer_size;
-	u32 extradata_size;
+	u32 extra_data_size;
 	u32 num_buffers;
 	u32 rg_buffer_info[1];
 };
 
 struct hfi_cmd_session_release_resources_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
 };
 
 struct hfi_cmd_session_parse_sequence_header_packet {
 	u32 size;
-	enum HFI_COMMAND packet_type;
+	u32 packet_type;
 	u32 session_id;
 	u32 header_len;
 	u8 *packet_buffer;
 };
 
-struct hfi_cmd_session_get_sequence_header_packet {
-	u32 size;
-	enum HFI_COMMAND packet_type;
-	u32 session_id;
-	u32 buffer_len;
-	u8 *packet_buffer;
-};
-
-struct hfi_msg_event_notify_packet {
-	u32 size;
-	enum HFI_MESSAGE packet_type;
-	u32 session_id;
-	enum HFI_EVENT event_id;
-	u32 event_data1;
-	u32 event_data2;
-	u32 rg_ext_event_data[1];
-};
-
-struct hfi_msg_sys_init_done_packet {
-	u32 size;
-	enum HFI_MESSAGE packet_type;
-	enum HFI_ERROR error_type;
-	u32 num_properties;
-	u32 rg_property_data[1];
-};
-
-struct hfi_msg_sys_session_init_done_packet {
-	u32 size;
-	enum HFI_MESSAGE packet_type;
-	u32 session_id;
-	enum HFI_ERROR error_type;
-	u32 num_properties;
-	u32 rg_property_data[1];
-};
-
-struct hfi_msg_sys_session_end_done_packet {
-	u32 size;
-	enum HFI_MESSAGE packet_type;
-	u32 session_id;
-	enum HFI_ERROR error_type;
-};
-
 struct hfi_msg_sys_session_abort_done_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_ERROR error_type;
+	u32 error_type;
 };
 
 struct hfi_msg_sys_idle_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
-};
-
-struct hfi_msg_sys_pc_prep_done_packet {
-	u32 size;
-	enum HFI_MESSAGE packet_type;
-	enum HFI_ERROR error_type;
-};
-
-struct hfi_msg_sys_release_resource_done_packet {
-	u32 size;
-	enum HFI_MESSAGE packet_type;
-	u32 resource_handle;
-	enum HFI_ERROR error_type;
+	u32 packet_type;
 };
 
 struct hfi_msg_sys_ping_ack_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 client_data;
 };
 
-struct hfi_msg_sys_debug_packet {
-	u32 size;
-	enum HFI_MESSAGE packet_type;
-	enum HFI_DEBUG_MSG msg_type;
-	u32 msg_size;
-	u32 timestamp_hi;
-	u32 timestamp_lo;
-	u8 rg_msg_data[1];
-};
-
 struct hfi_msg_sys_property_info_packet {
-	u32 nsize;
-	enum HFI_MESSAGE packet_type;
+	u32 size;
+	u32 packet_type;
 	u32 num_properties;
 	u32 rg_property_data[1];
 };
 
 struct hfi_msg_session_load_resources_done_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_ERROR error_type;
+	u32 error_type;
 };
 
 struct hfi_msg_session_start_done_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_ERROR error_type;
+	u32 error_type;
 };
 
 struct hfi_msg_session_stop_done_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_ERROR error_type;
+	u32 error_type;
 };
 
 struct hfi_msg_session_suspend_done_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_ERROR error_type;
+	u32 error_type;
 };
 
 struct hfi_msg_session_resume_done_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_ERROR error_type;
+	u32 error_type;
+};
+
+struct hfi_msg_session_flush_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	u32 flush_type;
 };
 
 struct hfi_msg_session_empty_buffer_done_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_ERROR error_type;
+	u32 error_type;
 	u32 offset;
 	u32 filled_len;
 	u32 input_tag;
 	u8 *packet_buffer;
+	u8 *extra_data_buffer;
 };
 
 struct hfi_msg_session_fill_buffer_done_compressed_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	u32 timestamp_hi;
-	u32 timestamp_lo;
-	enum HFI_ERROR error_type;
+	u32 time_stamp_hi;
+	u32 time_stamp_lo;
+	u32 error_type;
 	u32 flags;
 	u32 mark_target;
 	u32 mark_data;
@@ -1358,34 +627,36 @@
 	u32 alloc_len;
 	u32 filled_len;
 	u32 input_tag;
-	enum HFI_PICTURE picture_type;
+	u32 output_tag;
+	u32 picture_type;
 	u8 *packet_buffer;
 	u8 *extra_data_buffer;
 };
 
 struct hfi_msg_session_fbd_uncompressed_plane0_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
 	u32 stream_id;
 	u32 view_id;
-	enum HFI_ERROR error_type;
-	u32 timestamp_hi;
-	u32 timestamp_lo;
+	u32 error_type;
+	u32 time_stamp_hi;
+	u32 time_stamp_lo;
 	u32 flags;
 	u32 mark_target;
 	u32 mark_data;
 	u32 stats;
 	u32 alloc_len;
 	u32 filled_len;
-	u32 oofset;
+	u32 offset;
 	u32 frame_width;
 	u32 frame_height;
-	u32 start_xCoord;
-	u32 start_yCoord;
+	u32 start_x_coord;
+	u32 start_y_coord;
 	u32 input_tag;
-	u32 input_tag1;
-	enum HFI_PICTURE picture_type;
+	u32 input_tag2;
+	u32 output_tag;
+	u32 picture_type;
 	u8 *packet_buffer;
 	u8 *extra_data_buffer;
 };
@@ -1395,7 +666,7 @@
 	u32 alloc_len;
 	u32 filled_len;
 	u32 offset;
-	u8 *packet_buffer;
+	u8 *packet_buffer2;
 };
 
 struct hfi_msg_session_fill_buffer_done_uncompressed_plane2_packet {
@@ -1403,38 +674,21 @@
 	u32 alloc_len;
 	u32 filled_len;
 	u32 offset;
-	u8 *packet_buffer;
-};
-
-struct hfi_msg_session_flush_done_packet {
-	u32 size;
-	enum HFI_MESSAGE packet_type;
-	u32 session_id;
-	enum HFI_ERROR error_type;
-	enum HFI_FLUSH flush_type;
+	u8 *packet_buffer3;
 };
 
 struct hfi_msg_session_parse_sequence_header_done_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_ERROR error_type;
+	u32 error_type;
 	u32 num_properties;
 	u32 rg_property_data[1];
 };
 
-struct hfi_msg_session_get_sequence_header_done_packet {
-	u32 size;
-	enum HFI_MESSAGE packet_type;
-	u32 session_id;
-	enum HFI_ERROR error_type;
-	u32 header_len;
-	u8 *sequence_header;
-};
-
 struct hfi_msg_session_property_info_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
 	u32 num_properties;
 	u32 rg_property_data[1];
@@ -1442,9 +696,9 @@
 
 struct hfi_msg_session_release_resources_done_packet {
 	u32 size;
-	enum HFI_MESSAGE packet_type;
+	u32 packet_type;
 	u32 session_id;
-	enum HFI_ERROR error_type;
+	u32 error_type;
 };
 
 struct hfi_extradata_mb_quantization_payload {
@@ -1453,7 +707,7 @@
 
 struct hfi_extradata_vc1_pswnd {
 	u32 ps_wnd_h_offset;
-	u32 ps_wndv_offset;
+	u32 ps_wnd_v_offset;
 	u32 ps_wnd_width;
 	u32 ps_wnd_height;
 };
@@ -1481,12 +735,8 @@
 };
 
 struct hfi_extradata_timestamp_payload {
-	u32 timestamp_low;
-	u32 timestamp_high;
-};
-
-struct hfi_extradata_interlace_video_payload {
-	enum HFI_INTERLACE_FORMAT format;
+	u32 time_stamp_low;
+	u32 time_stamp_high;
 };
 
 enum HFI_S3D_FP_LAYOUT {
@@ -1496,14 +746,14 @@
 	HFI_S3D_FP_LAYOUT_INTRLV_ROW,
 	HFI_S3D_FP_LAYOUT_SIDEBYSIDE,
 	HFI_S3D_FP_LAYOUT_TOPBOTTOM,
-	HFI_S3D_FP_LAYOUT_UNUSED = 0x10000000,
+	HFI_S3D_FP_LAYOUT_UNUSED = 0x10000000
 };
 
 enum HFI_S3D_FP_VIEW_ORDER {
 	HFI_S3D_FP_LEFTVIEW_FIRST,
 	HFI_S3D_FP_RIGHTVIEW_FIRST,
 	HFI_S3D_FP_UNKNOWN,
-	HFI_S3D_FP_VIEWORDER_UNUSED = 0x10000000,
+	HFI_S3D_FP_VIEWORDER_UNUSED = 0x10000000
 };
 
 enum HFI_S3D_FP_FLIP {
@@ -1512,18 +762,22 @@
 	HFI_S3D_FP_FLIP_LEFT_VERT,
 	HFI_S3D_FP_FLIP_RIGHT_HORIZ,
 	HFI_S3D_FP_FLIP_RIGHT_VERT,
-	HFI_S3D_FP_FLIP_UNUSED = 0x10000000,
+	HFI_S3D_FP_FLIP_UNUSED = 0x10000000
 };
 
 struct hfi_extradata_s3d_frame_packing_payload {
-	enum HFI_S3D_FP_LAYOUT eLayout;
-	enum HFI_S3D_FP_VIEW_ORDER eOrder;
-	enum HFI_S3D_FP_FLIP eFlip;
-	int bQuinCunx;
-	u32 nLeftViewLumaSiteX;
-	u32 nLeftViewLumaSiteY;
-	u32 nRightViewLumaSiteX;
-	u32 nRightViewLumaSiteY;
+	enum HFI_S3D_FP_LAYOUT layout;
+	enum HFI_S3D_FP_VIEW_ORDER order;
+	enum HFI_S3D_FP_FLIP flip;
+	int quin_cunx;
+	u32 left_view_luma_site_x;
+	u32 left_view_luma_site_y;
+	u32 right_view_luma_site_x;
+	u32 right_view_luma_site_y;
+};
+
+struct hfi_extradata_interlace_video_payload {
+	u32 format;
 };
 
 struct hfi_extradata_num_concealed_mb_payload {
@@ -1615,4 +869,4 @@
 /* Interrupt Processing:*/
 void vidc_hal_response_handler(struct hal_device *device);
 
-#endif /*__VIDC_HAL_H__ */
+#endif
diff --git a/drivers/media/video/msm_vidc/vidc_hal_api.h b/drivers/media/video/msm_vidc/vidc_hal_api.h
index 036091b..b3ea92a 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_api.h
+++ b/drivers/media/video/msm_vidc/vidc_hal_api.h
@@ -40,6 +40,12 @@
 #define HAL_BUFFERFLAG_READONLY         0x00000200
 #define HAL_BUFFERFLAG_ENDOFSUBFRAME    0x00000400
 
+#define HAL_DEBUG_MSG_LOW				0x00000001
+#define HAL_DEBUG_MSG_MEDIUM			0x00000002
+#define HAL_DEBUG_MSG_HIGH				0x00000004
+#define HAL_DEBUG_MSG_ERROR				0x00000008
+#define HAL_DEBUG_MSG_FATAL				0x00000010
+
 enum vidc_status {
 	VIDC_ERR_NONE = 0x0,
 	VIDC_ERR_FAIL = 0x80000000,
@@ -242,11 +248,12 @@
 enum hal_h264_profile {
 	HAL_H264_PROFILE_BASELINE = 0x00000001,
 	HAL_H264_PROFILE_MAIN     = 0x00000002,
-	HAL_H264_PROFILE_EXTENDED = 0x00000004,
-	HAL_H264_PROFILE_HIGH     = 0x00000008,
+	HAL_H264_PROFILE_HIGH     = 0x00000004,
+	HAL_H264_PROFILE_EXTENDED = 0x00000008,
 	HAL_H264_PROFILE_HIGH10   = 0x00000010,
 	HAL_H264_PROFILE_HIGH422  = 0x00000020,
 	HAL_H264_PROFILE_HIGH444  = 0x00000040,
+	HAL_H264_PROFILE_CONSTRAINED_HIGH  = 0x00000080,
 	HAL_UNUSED_H264_PROFILE = 0x10000000,
 };
 
@@ -541,7 +548,7 @@
 struct hal_h264_db_control {
 	enum hal_h264_db_mode mode;
 	int slice_alpha_offset;
-	int slicebeta_offset;
+	int slice_beta_offset;
 };
 
 struct hal_temporal_spatial_tradeoff {
diff --git a/drivers/media/video/msm_vidc/vidc_hal_helper.h b/drivers/media/video/msm_vidc/vidc_hal_helper.h
new file mode 100644
index 0000000..d4e2619
--- /dev/null
+++ b/drivers/media/video/msm_vidc/vidc_hal_helper.h
@@ -0,0 +1,832 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __H_VIDC_HAL_HELPER_H__
+#define __H_VIDC_HAL_HELPER_H__
+
+#define HFI_NV12_IL_CALC_Y_STRIDE(stride, frame_width, stride_multiple) \
+	{ stride = (frame_width + stride_multiple - 1) & \
+	(0xffffffff - (stride_multiple - 1))}
+
+#define HFI_NV12_IL_CALC_Y_BUFHEIGHT(buf_height, frame_height,\
+	min_buf_height_multiple) \
+	{ buf_height = (frame_height + min_buf_height_multiple - 1) & \
+	(0xffffffff - (min_buf_height_multiple - 1)) }
+
+#define HFI_NV12_IL_CALC_UV_STRIDE(stride, frame_width, stride_multiple) \
+	{ stride = ((((frame_width + 1) >> 1) + stride_multiple - 1) & \
+	(0xffffffff - (stride_multiple - 1))) << 1 }
+
+#define HFI_NV12_IL_CALC_UV_BUFHEIGHT(buf_height, frame_height,\
+	min_buf_height_multiple) \
+	{ buf_height = ((((frame_height + 1) >> 1) + \
+	min_buf_height_multiple - 1) & (0xffffffff - \
+	(min_buf_height_multiple - 1))) }
+
+#define HFI_NV12_IL_CALC_BUF_SIZE(buf_size, y_buf_size, y_stride, \
+	y_buf_height, uv_buf_size, uv_stride, uv_buf_height, uv_alignment) \
+	{ y_buf_size = (y_stride * y_buf_height); \
+	uv_buf_size = (uv_stride * uv_buf_height) + uv_alignment; \
+	buf_size = y_buf_size + uv_buf_size }
+
+#define HFI_YUYV_CALC_STRIDE(stride, frame_width, stride_multiple) \
+	{ stride = ((frame_width << 1) + stride_multiple - 1) & \
+	(0xffffffff - (stride_multiple - 1)) }
+
+#define HFI_YUYV_CALC_BUFHEIGHT(buf_height, frame_height,\
+	min_buf_height_multiple) \
+	{ buf_height = ((frame_height + min_buf_height_multiple - 1) & \
+	(0xffffffff - (min_buf_height_multiple - 1))) }
+
+#define HFI_YUYV_CALC_BUF_SIZE(buf_size, stride, buf_height) \
+	{ buf_size = stride * buf_height }
+
+#define HFI_RGB888_CALC_STRIDE(stride, frame_width, stride_multiple) \
+	{ stride = ((frame_width * 3) + stride_multiple - 1) & \
+	(0xffffffff - (stride_multiple - 1)) }
+
+#define HFI_RGB888_CALC_BUFHEIGHT(buf_height, frame_height,\
+	min_buf_height_multiple) \
+	{ buf_height = ((frame_height + min_buf_height_multiple - 1) & \
+	(0xffffffff - (min_buf_height_multiple - 1))) }
+
+#define HFI_RGB888_CALC_BUF_SIZE(buf_size, stride, buf_height) \
+	{ buf_size = (stride * buf_height) }
+
+#define HFI_COMMON_BASE				(0)
+#define HFI_OX_BASE					(0x01000000)
+
+#define HFI_VIDEO_DOMAIN_ENCODER	(HFI_COMMON_BASE + 0x1)
+#define HFI_VIDEO_DOMAIN_DECODER	(HFI_COMMON_BASE + 0x2)
+#define HFI_VIDEO_DOMAIN_VPE		(HFI_COMMON_BASE + 0x3)
+#define HFI_VIDEO_DOMAIN_MBI		(HFI_COMMON_BASE + 0x4)
+
+#define HFI_DOMAIN_BASE_COMMON		(HFI_COMMON_BASE + 0)
+#define HFI_DOMAIN_BASE_VDEC		(HFI_COMMON_BASE + 0x01000000)
+#define HFI_DOMAIN_BASE_VENC		(HFI_COMMON_BASE + 0x02000000)
+#define HFI_DOMAIN_BASE_VPE			(HFI_COMMON_BASE + 0x03000000)
+
+#define HFI_VIDEO_ARCH_OX			(HFI_COMMON_BASE + 0x1)
+
+#define HFI_ARCH_COMMON_OFFSET		(0)
+#define HFI_ARCH_OX_OFFSET			(0x00200000)
+
+#define HFI_ERR_NONE						HFI_COMMON_BASE
+#define HFI_ERR_SYS_FATAL				(HFI_COMMON_BASE + 0x1)
+#define HFI_ERR_SYS_INVALID_PARAMETER		(HFI_COMMON_BASE + 0x2)
+#define HFI_ERR_SYS_VERSION_MISMATCH		(HFI_COMMON_BASE + 0x3)
+#define HFI_ERR_SYS_INSUFFICIENT_RESOURCES	(HFI_COMMON_BASE + 0x4)
+#define HFI_ERR_SYS_MAX_SESSIONS_REACHED	(HFI_COMMON_BASE + 0x5)
+#define HFI_ERR_SYS_UNSUPPORTED_CODEC		(HFI_COMMON_BASE + 0x6)
+#define HFI_ERR_SYS_SESSION_IN_USE			(HFI_COMMON_BASE + 0x7)
+#define HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE	(HFI_COMMON_BASE + 0x8)
+#define HFI_ERR_SYS_UNSUPPORTED_DOMAIN		(HFI_COMMON_BASE + 0x9)
+
+#define HFI_ERR_SESSION_FATAL			(HFI_COMMON_BASE + 0x1001)
+#define HFI_ERR_SESSION_INVALID_PARAMETER	(HFI_COMMON_BASE + 0x1002)
+#define HFI_ERR_SESSION_BAD_POINTER		(HFI_COMMON_BASE + 0x1003)
+#define HFI_ERR_SESSION_INVALID_SESSION_ID	(HFI_COMMON_BASE + 0x1004)
+#define HFI_ERR_SESSION_INVALID_STREAM_ID	(HFI_COMMON_BASE + 0x1005)
+#define HFI_ERR_SESSION_INCORRECT_STATE_OPERATION		\
+	(HFI_COMMON_BASE + 0x1006)
+#define HFI_ERR_SESSION_UNSUPPORTED_PROPERTY	(HFI_COMMON_BASE + 0x1007)
+
+#define HFI_ERR_SESSION_UNSUPPORTED_SETTING	(HFI_COMMON_BASE + 0x1008)
+
+#define HFI_ERR_SESSION_INSUFFICIENT_RESOURCES	(HFI_COMMON_BASE + 0x1009)
+
+#define HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED	\
+	(HFI_COMMON_BASE + 0x100A)
+
+#define HFI_ERR_SESSION_STREAM_CORRUPT		(HFI_COMMON_BASE + 0x100B)
+#define HFI_ERR_SESSION_ENC_OVERFLOW		(HFI_COMMON_BASE + 0x100C)
+
+#define HFI_EVENT_SYS_ERROR				(HFI_COMMON_BASE + 0x1)
+#define HFI_EVENT_SESSION_ERROR			(HFI_COMMON_BASE + 0x2)
+
+#define HFI_VIDEO_CODEC_H264				0x00000002
+#define HFI_VIDEO_CODEC_H263				0x00000004
+#define HFI_VIDEO_CODEC_MPEG1				0x00000008
+#define HFI_VIDEO_CODEC_MPEG2				0x00000010
+#define HFI_VIDEO_CODEC_MPEG4				0x00000020
+#define HFI_VIDEO_CODEC_DIVX_311			0x00000040
+#define HFI_VIDEO_CODEC_DIVX				0x00000080
+#define HFI_VIDEO_CODEC_VC1					0x00000100
+#define HFI_VIDEO_CODEC_SPARK				0x00000200
+#define HFI_VIDEO_CODEC_VP8					0x00001000
+
+#define HFI_H264_PROFILE_BASELINE			0x00000001
+#define HFI_H264_PROFILE_MAIN				0x00000002
+#define HFI_H264_PROFILE_HIGH				0x00000004
+#define HFI_H264_PROFILE_STEREO_HIGH		0x00000008
+#define HFI_H264_PROFILE_MULTIVIEW_HIGH		0x00000010
+#define HFI_H264_PROFILE_CONSTRAINED_HIGH	0x00000020
+
+#define HFI_H264_LEVEL_1					0x00000001
+#define HFI_H264_LEVEL_1b					0x00000002
+#define HFI_H264_LEVEL_11					0x00000004
+#define HFI_H264_LEVEL_12					0x00000008
+#define HFI_H264_LEVEL_13					0x00000010
+#define HFI_H264_LEVEL_2					0x00000020
+#define HFI_H264_LEVEL_21					0x00000040
+#define HFI_H264_LEVEL_22					0x00000080
+#define HFI_H264_LEVEL_3					0x00000100
+#define HFI_H264_LEVEL_31					0x00000200
+#define HFI_H264_LEVEL_32					0x00000400
+#define HFI_H264_LEVEL_4					0x00000800
+#define HFI_H264_LEVEL_41					0x00001000
+#define HFI_H264_LEVEL_42					0x00002000
+#define HFI_H264_LEVEL_5					0x00004000
+#define HFI_H264_LEVEL_51					0x00008000
+
+#define HFI_H263_PROFILE_BASELINE			0x00000001
+
+#define HFI_H263_LEVEL_10					0x00000001
+#define HFI_H263_LEVEL_20					0x00000002
+#define HFI_H263_LEVEL_30					0x00000004
+#define HFI_H263_LEVEL_40					0x00000008
+#define HFI_H263_LEVEL_45					0x00000010
+#define HFI_H263_LEVEL_50					0x00000020
+#define HFI_H263_LEVEL_60					0x00000040
+#define HFI_H263_LEVEL_70					0x00000080
+
+#define HFI_MPEG2_PROFILE_SIMPLE			0x00000001
+#define HFI_MPEG2_PROFILE_MAIN				0x00000002
+#define HFI_MPEG2_PROFILE_422				0x00000004
+#define HFI_MPEG2_PROFILE_SNR				0x00000008
+#define HFI_MPEG2_PROFILE_SPATIAL			0x00000010
+#define HFI_MPEG2_PROFILE_HIGH				0x00000020
+
+#define HFI_MPEG2_LEVEL_LL					0x00000001
+#define HFI_MPEG2_LEVEL_ML					0x00000002
+#define HFI_MPEG2_LEVEL_H14					0x00000004
+#define HFI_MPEG2_LEVEL_HL					0x00000008
+
+#define HFI_MPEG4_PROFILE_SIMPLE			0x00000001
+#define HFI_MPEG4_PROFILE_ADVANCEDSIMPLE	0x00000002
+
+#define HFI_MPEG4_LEVEL_0					0x00000001
+#define HFI_MPEG4_LEVEL_0b					0x00000002
+#define HFI_MPEG4_LEVEL_1					0x00000004
+#define HFI_MPEG4_LEVEL_2					0x00000008
+#define HFI_MPEG4_LEVEL_3					0x00000010
+#define HFI_MPEG4_LEVEL_4					0x00000020
+#define HFI_MPEG4_LEVEL_4a					0x00000040
+#define HFI_MPEG4_LEVEL_5					0x00000080
+#define HFI_MPEG4_LEVEL_6					0x00000100
+#define HFI_MPEG4_LEVEL_7					0x00000200
+#define HFI_MPEG4_LEVEL_8					0x00000400
+#define HFI_MPEG4_LEVEL_9					0x00000800
+#define HFI_MPEG4_LEVEL_3b					0x00001000
+
+#define HFI_VC1_PROFILE_SIMPLE				0x00000001
+#define HFI_VC1_PROFILE_MAIN				0x00000002
+#define HFI_VC1_PROFILE_ADVANCED			0x00000004
+
+#define HFI_VC1_LEVEL_LOW					0x00000001
+#define HFI_VC1_LEVEL_MEDIUM				0x00000002
+#define HFI_VC1_LEVEL_HIGH					0x00000004
+#define HFI_VC1_LEVEL_0						0x00000008
+#define HFI_VC1_LEVEL_1						0x00000010
+#define HFI_VC1_LEVEL_2						0x00000020
+#define HFI_VC1_LEVEL_3						0x00000040
+#define HFI_VC1_LEVEL_4						0x00000080
+
+#define HFI_VPX_PROFILE_SIMPLE				0x00000001
+#define HFI_VPX_PROFILE_ADVANCED			0x00000002
+#define HFI_VPX_PROFILE_VERSION_0			0x00000004
+#define HFI_VPX_PROFILE_VERSION_1			0x00000008
+#define HFI_VPX_PROFILE_VERSION_2			0x00000010
+#define HFI_VPX_PROFILE_VERSION_3			0x00000020
+
+#define HFI_DIVX_FORMAT_4				(HFI_COMMON_BASE + 0x1)
+#define HFI_DIVX_FORMAT_5				(HFI_COMMON_BASE + 0x2)
+#define HFI_DIVX_FORMAT_6				(HFI_COMMON_BASE + 0x3)
+
+#define HFI_DIVX_PROFILE_QMOBILE		0x00000001
+#define HFI_DIVX_PROFILE_MOBILE			0x00000002
+#define HFI_DIVX_PROFILE_MT				0x00000004
+#define HFI_DIVX_PROFILE_HT				0x00000008
+#define HFI_DIVX_PROFILE_HD				0x00000010
+
+#define HFI_BUFFER_INPUT				(HFI_COMMON_BASE + 0x1)
+#define HFI_BUFFER_OUTPUT				(HFI_COMMON_BASE + 0x2)
+#define HFI_BUFFER_OUTPUT2				(HFI_COMMON_BASE + 0x3)
+#define HFI_BUFFER_INTERNAL_PERSIST		(HFI_COMMON_BASE + 0x4)
+
+struct hfi_buffer_info {
+	u32 buffer_addr;
+	u32 extra_data_addr;
+};
+
+#define HFI_PROPERTY_SYS_COMMON_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000)
+#define HFI_PROPERTY_SYS_DEBUG_CONFIG		\
+	(HFI_PROPERTY_SYS_COMMON_START + 0x001)
+#define HFI_PROPERTY_SYS_RESOURCE_OCMEM_REQUIREMENT_INFO	\
+(HFI_PROPERTY_SYS_COMMON_START + 0x002)
+#define HFI_PROPERTY_PARAM_COMMON_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000)
+#define HFI_PROPERTY_PARAM_FRAME_SIZE		\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x001)
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO	\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x002)
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT		\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x003)
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED	\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x004)
+#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT			\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x005)
+#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED			\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x006)
+#define HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED				\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x007)
+#define HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED				\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x008)
+#define HFI_PROPERTY_PARAM_CODEC_SUPPORTED			\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x009)
+#define HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED		\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x00A)
+#define HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT			\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x00B)
+#define HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT				\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x00C)
+
+#define HFI_PROPERTY_CONFIG_COMMON_START				\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x2000)
+#define HFI_PROPERTY_CONFIG_FRAME_RATE					\
+	(HFI_PROPERTY_CONFIG_COMMON_START + 0x001)
+
+#define HFI_PROPERTY_PARAM_VDEC_COMMON_START				\
+	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x3000)
+#define HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM				\
+	(HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x001)
+
+#define HFI_PROPERTY_CONFIG_VDEC_COMMON_START				\
+	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x4000)
+
+#define HFI_PROPERTY_PARAM_VENC_COMMON_START				\
+	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x5000)
+#define HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x001)
+#define HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x002)
+#define HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x003)
+#define HFI_PROPERTY_PARAM_VENC_RATE_CONTROL				\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x004)
+#define HFI_PROPERTY_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x005)
+#define HFI_PROPERTY_PARAM_VENC_QUALITY_VS_SPEED			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x010)
+#define HFI_PROPERTY_PARAM_VENC_SESSION_QP				\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x006)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x007)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_DATA_PARTITIONING		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x008)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x009)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_SHORT_HEADER			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00A)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION		\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00B)
+#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00C)
+#define HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH				\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00D)
+#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL			\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00E)
+#define HFI_PROPERTY_PARAM_VENC_VBVBUFFER_SIZE				\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00F)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_QPEL				\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x011)
+#define HFI_PROPERTY_PARAM_VENC_ADVANCED				\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x012)
+#define HFI_PROPERTY_PARAM_VENC_SYNC_FRAME_SEQUENCE_HEADER	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x013)
+#define HFI_PROPERTY_PARAM_VENC_H264_IDR_S3D_FRAME_PACKING_NAL	\
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x014)
+
+#define HFI_PROPERTY_CONFIG_VENC_COMMON_START				\
+	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000)
+#define HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE				\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x001)
+#define HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD				\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x002)
+#define HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD				\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x003)
+#define HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME			\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x004)
+#define HFI_PROPERTY_CONFIG_VENC_TIMESTAMP_SCALE			\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x005)
+#define HFI_PROPERTY_CONFIG_VENC_FRAME_QP				\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x006)
+#define HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE				\
+	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x007)
+
+#define HFI_PROPERTY_PARAM_VPE_COMMON_START				\
+	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000)
+
+#define HFI_PROPERTY_CONFIG_VPE_COMMON_START				\
+	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x8000)
+#define HFI_PROPERTY_CONFIG_VPE_DEINTERLACE				\
+	(HFI_PROPERTY_CONFIG_VPE_COMMON_START + 0x001)
+#define HFI_PROPERTY_CONFIG_VPE_OPERATIONS				\
+	(HFI_PROPERTY_CONFIG_VPE_COMMON_START + 0x002)
+
+struct hfi_bitrate {
+	u32 bit_rate;
+};
+
+#define HFI_CAPABILITY_FRAME_WIDTH			(HFI_COMMON_BASE + 0x1)
+#define HFI_CAPABILITY_FRAME_HEIGHT			(HFI_COMMON_BASE + 0x2)
+#define HFI_CAPABILITY_MBS_PER_FRAME		(HFI_COMMON_BASE + 0x3)
+#define HFI_CAPABILITY_MBS_PER_SECOND		(HFI_COMMON_BASE + 0x4)
+#define HFI_CAPABILITY_FRAMERATE			(HFI_COMMON_BASE + 0x5)
+#define HFI_CAPABILITY_SCALE_X				(HFI_COMMON_BASE + 0x6)
+#define HFI_CAPABILITY_SCALE_Y				(HFI_COMMON_BASE + 0x7)
+#define HFI_CAPABILITY_BITRATE				(HFI_COMMON_BASE + 0x8)
+
+struct hfi_capability_supported {
+	u32 capability_type;
+	u32 min;
+	u32 max;
+	u32 step_size;
+};
+
+struct hfi_capability_supported_info {
+	u32 num_capabilities;
+	struct hfi_capability_supported rg_data[1];
+};
+
+#define HFI_DEBUG_MSG_LOW					0x00000001
+#define HFI_DEBUG_MSG_MEDIUM				0x00000002
+#define HFI_DEBUG_MSG_HIGH					0x00000004
+#define HFI_DEBUG_MSG_ERROR					0x00000008
+#define HFI_DEBUG_MSG_FATAL					0x00000010
+
+struct hfi_debug_config {
+	u32 debug_config;
+};
+
+struct hfi_enable {
+	int enable;
+};
+
+#define HFI_H264_DB_MODE_DISABLE			(HFI_COMMON_BASE + 0x1)
+#define HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY	(HFI_COMMON_BASE + 0x2)
+#define HFI_H264_DB_MODE_ALL_BOUNDARY			(HFI_COMMON_BASE + 0x3)
+
+struct hfi_h264_db_control {
+	u32 mode;
+	int slice_alpha_offset;
+	int slice_beta_offset;
+};
+
+#define HFI_H264_ENTROPY_CAVLC				(HFI_COMMON_BASE + 0x1)
+#define HFI_H264_ENTROPY_CABAC				(HFI_COMMON_BASE + 0x2)
+
+#define HFI_H264_CABAC_MODEL_0				(HFI_COMMON_BASE + 0x1)
+#define HFI_H264_CABAC_MODEL_1				(HFI_COMMON_BASE + 0x2)
+#define HFI_H264_CABAC_MODEL_2				(HFI_COMMON_BASE + 0x3)
+
+struct hfi_h264_entropy_control {
+	u32 entropy_mode;
+	u32 cabac_model;
+};
+
+struct hfi_frame_rate {
+	u32 buffer_type;
+	u32 frame_rate;
+};
+
+#define HFI_INTRA_REFRESH_NONE				(HFI_COMMON_BASE + 0x1)
+#define HFI_INTRA_REFRESH_CYCLIC			(HFI_COMMON_BASE + 0x2)
+#define HFI_INTRA_REFRESH_ADAPTIVE			(HFI_COMMON_BASE + 0x3)
+#define HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE	(HFI_COMMON_BASE + 0x4)
+#define HFI_INTRA_REFRESH_RANDOM			(HFI_COMMON_BASE + 0x5)
+
+struct hfi_intra_refresh {
+	u32 mode;
+	u32 air_mbs;
+	u32 air_ref;
+	u32 cir_mbs;
+};
+
+struct hfi_idr_period {
+	u32 idr_period;
+};
+
+struct hfi_intra_period {
+	u32 pframes;
+	u32 bframes;
+};
+
+struct hfi_timestamp_scale {
+	u32 time_stamp_scale;
+};
+
+struct hfi_mpeg4_header_extension {
+	u32 header_extension;
+};
+
+struct hfi_mpeg4_time_resolution {
+	u32 time_increment_resolution;
+};
+
+struct hfi_multi_stream {
+	u32 buffer_type;
+	u32 enable;
+	u32 width;
+	u32 height;
+};
+
+struct hfi_multi_view_format {
+	u32 views;
+	u32 rg_view_order[1];
+};
+
+#define HFI_MULTI_SLICE_OFF				(HFI_COMMON_BASE + 0x1)
+#define HFI_MULTI_SLICE_BY_MB_COUNT			(HFI_COMMON_BASE + 0x2)
+#define HFI_MULTI_SLICE_BY_BYTE_COUNT		(HFI_COMMON_BASE + 0x3)
+#define HFI_MULTI_SLICE_GOB				(HFI_COMMON_BASE + 0x4)
+
+struct hfi_multi_slice_control {
+	u32 multi_slice;
+	u32 slice_size;
+};
+
+#define HFI_NAL_FORMAT_STARTCODES			0x00000001
+#define HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER	0x00000002
+#define HFI_NAL_FORMAT_ONE_BYTE_LENGTH		0x00000004
+#define HFI_NAL_FORMAT_TWO_BYTE_LENGTH		0x00000008
+#define HFI_NAL_FORMAT_FOUR_BYTE_LENGTH		0x00000010
+
+struct hfi_nal_stream_format_supported {
+	u32 nal_stream_format_supported;
+};
+
+#define HFI_PICTURE_TYPE_I					0x01
+#define HFI_PICTURE_TYPE_P					0x02
+#define HFI_PICTURE_TYPE_B					0x04
+#define HFI_PICTURE_TYPE_IDR				0x08
+
+struct hfi_profile_level {
+	u32 profile;
+	u32 level;
+};
+
+struct hfi_profile_level_supported {
+	u32 profile_count;
+	struct hfi_profile_level rg_profile_level[1];
+};
+
+struct hfi_quantization {
+	u32 qp_i;
+	u32 qp_p;
+	u32 qp_b;
+	u32 layer_id;
+};
+
+struct hfi_temporal_spatial_tradeoff {
+	u32 ts_factor;
+};
+
+struct hfi_frame_size {
+	u32 buffer_type;
+	u32 width;
+	u32 height;
+};
+
+#define HFI_COLOR_FORMAT_MONOCHROME			(HFI_COMMON_BASE + 0x1)
+#define HFI_COLOR_FORMAT_NV12				(HFI_COMMON_BASE + 0x2)
+#define HFI_COLOR_FORMAT_NV21				(HFI_COMMON_BASE + 0x3)
+#define HFI_COLOR_FORMAT_NV12_4x4TILE		(HFI_COMMON_BASE + 0x4)
+#define HFI_COLOR_FORMAT_NV21_4x4TILE		(HFI_COMMON_BASE + 0x5)
+#define HFI_COLOR_FORMAT_YUYV				(HFI_COMMON_BASE + 0x6)
+#define HFI_COLOR_FORMAT_YVYU				(HFI_COMMON_BASE + 0x7)
+#define HFI_COLOR_FORMAT_UYVY				(HFI_COMMON_BASE + 0x8)
+#define HFI_COLOR_FORMAT_VYUY				(HFI_COMMON_BASE + 0x9)
+#define HFI_COLOR_FORMAT_RGB565				(HFI_COMMON_BASE + 0xA)
+#define HFI_COLOR_FORMAT_BGR565				(HFI_COMMON_BASE + 0xB)
+#define HFI_COLOR_FORMAT_RGB888				(HFI_COMMON_BASE + 0xC)
+#define HFI_COLOR_FORMAT_BGR888				(HFI_COMMON_BASE + 0xD)
+
+struct hfi_uncompressed_format_select {
+	u32 buffer_type;
+	u32 format;
+};
+
+struct hfi_uncompressed_format_supported {
+	u32 buffer_type;
+	u32 format_entries;
+	u32 rg_format_info[1];
+};
+
+struct hfi_uncompressed_plane_actual {
+	int actual_stride;
+	u32 actual_plane_buffer_height;
+};
+
+struct hfi_uncompressed_plane_actual_info {
+	u32 buffer_type;
+	u32 num_planes;
+	struct hfi_uncompressed_plane_actual rg_plane_format[1];
+};
+
+struct hfi_uncompressed_plane_constraints {
+	u32 stride_multiples;
+	u32 max_stride;
+	u32 min_plane_buffer_height_multiple;
+	u32 buffer_alignment;
+};
+
+struct hfi_uncompressed_plane_info {
+	u32 format;
+	u32 num_planes;
+	struct hfi_uncompressed_plane_constraints rg_plane_format[1];
+};
+
+struct hfi_codec_supported {
+	u32 decoder_codec_supported;
+	u32 encoder_codec_supported;
+};
+
+struct hfi_properties_supported {
+	u32 num_properties;
+	u32 rg_properties[1];
+};
+
+#define HFI_ROTATE_NONE					(HFI_COMMON_BASE + 0x1)
+#define HFI_ROTATE_90					(HFI_COMMON_BASE + 0x2)
+#define HFI_ROTATE_180					(HFI_COMMON_BASE + 0x3)
+#define HFI_ROTATE_270					(HFI_COMMON_BASE + 0x4)
+
+#define HFI_FLIP_NONE					(HFI_COMMON_BASE + 0x1)
+#define HFI_FLIP_HORIZONTAL				(HFI_COMMON_BASE + 0x2)
+#define HFI_FLIP_VERTICAL				(HFI_COMMON_BASE + 0x3)
+
+struct hfi_operations {
+	u32 rotate;
+	u32 flip;
+};
+
+#define HFI_RESOURCE_OCMEM 0x00000001
+
+struct hfi_resource_ocmem {
+	u32 size;
+	u8 *mem;
+};
+
+struct hfi_resource_ocmem_requirement {
+	u32 session_domain;
+	u32 width;
+	u32 height;
+	u32 size;
+};
+
+struct hfi_resource_ocmem_requirement_info {
+	u32 num_entries;
+	struct hfi_resource_ocmem_requirement rg_requirements[1];
+};
+
+struct hfi_venc_config_advanced {
+	u8 pipe2d;
+	u8 hw_mode;
+	u8 low_delay_enforce;
+	int h264_constrain_intra_pred;
+	int h264_transform_8x8_flag;
+	int mpeg4_qpel_enable;
+	int multi_refp_en;
+	int qmatrix_en;
+	u8 vpp_info_packet_mode;
+	u8 ref_tile_mode;
+	u8 bitstream_flush_mode;
+	u32 ds_display_frame_width;
+	u32 ds_display_frame_height;
+	u32 perf_tune_param_ptr;
+	u32 input_x_offset;
+	u32 input_y_offset;
+	u32 input_roi_width;
+	u32 input_roi_height;
+	u32 vsp_fifo_dma_sel;
+	u32 h264_num_ref_frames;
+};
+
+#define HFI_CMD_SYS_COMMON_START			\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000)
+#define HFI_CMD_SYS_INIT		(HFI_CMD_SYS_COMMON_START + 0x001)
+#define HFI_CMD_SYS_PC_PREP		(HFI_CMD_SYS_COMMON_START + 0x002)
+#define HFI_CMD_SYS_SET_RESOURCE	(HFI_CMD_SYS_COMMON_START + 0x003)
+#define HFI_CMD_SYS_RELEASE_RESOURCE (HFI_CMD_SYS_COMMON_START + 0x004)
+#define HFI_CMD_SYS_SET_PROPERTY	(HFI_CMD_SYS_COMMON_START + 0x005)
+#define HFI_CMD_SYS_GET_PROPERTY	(HFI_CMD_SYS_COMMON_START + 0x006)
+#define HFI_CMD_SYS_SESSION_INIT	(HFI_CMD_SYS_COMMON_START + 0x007)
+#define HFI_CMD_SYS_SESSION_END		(HFI_CMD_SYS_COMMON_START + 0x008)
+#define HFI_CMD_SYS_SET_BUFFERS		(HFI_CMD_SYS_COMMON_START + 0x009)
+
+#define HFI_CMD_SESSION_COMMON_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000)
+#define HFI_CMD_SESSION_SET_PROPERTY		\
+	(HFI_CMD_SESSION_COMMON_START + 0x001)
+#define HFI_CMD_SESSION_SET_BUFFERS			\
+	(HFI_CMD_SESSION_COMMON_START + 0x002)
+#define HFI_CMD_SESSION_GET_SEQUENCE_HEADER	\
+	(HFI_CMD_SESSION_COMMON_START + 0x003)
+
+#define HFI_MSG_SYS_COMMON_START			\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000)
+#define HFI_MSG_SYS_INIT_DONE			(HFI_MSG_SYS_COMMON_START + 0x1)
+#define HFI_MSG_SYS_PC_PREP_DONE		(HFI_MSG_SYS_COMMON_START + 0x2)
+#define HFI_MSG_SYS_RELEASE_RESOURCE	(HFI_MSG_SYS_COMMON_START + 0x3)
+#define HFI_MSG_SYS_DEBUG			(HFI_MSG_SYS_COMMON_START + 0x4)
+#define HFI_MSG_SYS_SESSION_INIT_DONE	(HFI_MSG_SYS_COMMON_START + 0x6)
+#define HFI_MSG_SYS_SESSION_END_DONE	(HFI_MSG_SYS_COMMON_START + 0x7)
+
+#define HFI_MSG_SESSION_COMMON_START		\
+	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000)
+#define HFI_MSG_EVENT_NOTIFY	(HFI_MSG_SESSION_COMMON_START + 0x1)
+#define HFI_MSG_SESSION_GET_SEQUENCE_HEADER_DONE	\
+	(HFI_MSG_SESSION_COMMON_START + 0x2)
+
+struct vidc_hal_msg_pkt_hdr {
+	u32 size;
+	u32 packet;
+};
+
+struct vidc_hal_session_cmd_pkt {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+};
+
+struct hfi_cmd_sys_init_packet {
+	u32 size;
+	u32 packet_type;
+	u32 arch_type;
+};
+
+struct hfi_cmd_sys_pc_prep_packet {
+	u32 size;
+	u32 packet_type;
+};
+
+struct hfi_cmd_sys_set_resource_packet {
+	u32 size;
+	u32 packet_type;
+	u32 resource_handle;
+	u32 resource_type;
+	u32 rg_resource_data[1];
+};
+
+struct hfi_cmd_sys_release_resource_packet {
+	u32 size;
+	u32 packet_type;
+	u32 resource_type;
+	u32 resource_handle;
+};
+
+struct hfi_cmd_sys_set_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct hfi_cmd_sys_get_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct hfi_cmd_sys_session_init_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 session_domain;
+	u32 session_codec;
+};
+
+struct hfi_cmd_sys_session_end_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+};
+
+struct hfi_cmd_sys_set_buffers_packet {
+	u32 size;
+	u32 packet_type;
+	u32 buffer_type;
+	u32 buffer_size;
+	u32 num_buffers;
+	u32 rg_buffer_addr[1];
+};
+
+struct hfi_cmd_session_set_property_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 num_properties;
+	u32 rg_property_data[0];
+};
+
+struct hfi_cmd_session_set_buffers_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 buffer_type;
+	u32 buffer_mode;
+	u32 buffer_size;
+	u32 extra_data_size;
+	u32 min_buffer_size;
+	u32 num_buffers;
+	u32 rg_buffer_info[1];
+};
+
+struct hfi_cmd_session_get_sequence_header_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 buffer_len;
+	u8 *packet_buffer;
+};
+
+struct hfi_msg_event_notify_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 event_id;
+	u32 event_data1;
+	u32 event_data2;
+	u32 rg_ext_event_data[1];
+};
+
+struct hfi_msg_sys_init_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 error_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct hfi_msg_sys_pc_prep_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 error_type;
+};
+
+struct hfi_msg_sys_release_resource_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 resource_handle;
+	u32 error_type;
+};
+
+struct hfi_msg_sys_session_init_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	u32 num_properties;
+	u32 rg_property_data[1];
+};
+
+struct hfi_msg_sys_session_end_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+};
+
+struct hfi_msg_session_get_sequence_header_done_packet {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	u32 error_type;
+	u32 header_len;
+	u8 *sequence_header;
+};
+
+struct hfi_msg_sys_debug_packet {
+	u32 size;
+	u32 packet_type;
+	u32 msg_type;
+	u32 msg_size;
+	u32 time_stamp_hi;
+	u32 time_stamp_lo;
+	u8 rg_msg_data[1];
+};
+
+#endif
diff --git a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
index 02b9699..b604d0a 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
+++ b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
@@ -15,7 +15,7 @@
 #include <linux/list.h>
 #include "vidc_hal.h"
 
-static enum vidc_status vidc_map_hal_err_status(enum HFI_ERROR hfi_err)
+static enum vidc_status vidc_map_hal_err_status(int hfi_err)
 {
 	enum vidc_status vidc_err;
 	switch (hfi_err) {
@@ -64,8 +64,6 @@
 	case HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED:
 		vidc_err = VIDC_ERR_IFRAME_EXPECTED;
 		break;
-	case HFI_ERR_SYS_UNKNOWN:
-	case HFI_ERR_SESSION_UNKNOWN:
 	case HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING:
 	default:
 		vidc_err = VIDC_ERR_FAIL;
@@ -82,7 +80,7 @@
 	int num_properties_changed;
 	struct hfi_frame_size frame_sz;
 	u8 *data_ptr;
-	enum HFI_PROPERTY prop_id;
+	int prop_id;
 	HAL_MSG_LOW("RECEIVED:EVENT_NOTIFY");
 	if (sizeof(struct hfi_msg_event_notify_packet)
 		> pkt->size) {
@@ -103,12 +101,11 @@
 	if (num_properties_changed) {
 		data_ptr = (u8 *) &pkt->rg_ext_event_data[0];
 		do {
-			prop_id = (enum HFI_PROPERTY) *((u32 *)data_ptr);
+			prop_id = (int) *((u32 *)data_ptr);
 			switch (prop_id) {
 			case HFI_PROPERTY_PARAM_FRAME_SIZE:
-				frame_sz.buffer =
-					(enum HFI_BUFFER)
-						*((((u32 *)data_ptr)+1));
+				frame_sz.buffer_type =
+					(int) *((((u32 *)data_ptr)+1));
 				frame_sz.width =
 					event_notify.width =
 						*((((u32 *)data_ptr)+2));
@@ -165,7 +162,7 @@
 	struct vidc_hal_sys_init_done sys_init_done;
 	u32 rem_bytes, bytes_read = 0, num_properties;
 	u8 *data_ptr;
-	enum HFI_PROPERTY prop_id;
+	int prop_id;
 	enum vidc_status status = VIDC_ERR_NONE;
 
 	HAL_MSG_LOW("RECEIVED:SYS_INIT_DONE");
@@ -202,7 +199,7 @@
 		num_properties = pkt->num_properties;
 
 		while ((num_properties != 0) && (rem_bytes >= sizeof(u32))) {
-			prop_id = (enum HFI_PROPERTY) *((u32 *)data_ptr);
+			prop_id = *((u32 *)data_ptr);
 			data_ptr = data_ptr + 4;
 
 			switch (prop_id) {
@@ -282,8 +279,8 @@
 			rc = VIDC_ERR_FAIL;
 		}
 		HAL_MSG_LOW("got buffer requirements for: %d",
-					hfi_buf_req->buffer);
-		switch (hfi_buf_req->buffer) {
+					hfi_buf_req->buffer_type);
+		switch (hfi_buf_req->buffer_type) {
 		case HFI_BUFFER_INPUT:
 			memcpy(&buffreq->buffer[0], hfi_buf_req,
 				sizeof(struct hfi_buffer_requirements));
@@ -330,8 +327,8 @@
 				HAL_BUFFER_INTERNAL_PERSIST;
 			break;
 		default:
-			HAL_MSG_ERROR("hal_process_sess_get_prop_buf_req:"
-			"bad_buffer_type: %d", hfi_buf_req->buffer);
+			HAL_MSG_ERROR("%s: bad_buffer_type: %d",
+				__func__, hfi_buf_req->buffer_type);
 			break;
 		}
 		req_bytes -= sizeof(struct hfi_buffer_requirements);
@@ -525,8 +522,8 @@
 		data_done.size = sizeof(struct msm_vidc_cb_data_done);
 		data_done.clnt_data = (void *) pkt->input_tag;
 
-		data_done.output_done.timestamp_hi = pkt->timestamp_hi;
-		data_done.output_done.timestamp_lo = pkt->timestamp_lo;
+		data_done.output_done.timestamp_hi = pkt->time_stamp_hi;
+		data_done.output_done.timestamp_lo = pkt->time_stamp_lo;
 		data_done.output_done.flags1 = pkt->flags;
 		data_done.output_done.mark_target = pkt->mark_target;
 		data_done.output_done.mark_data = pkt->mark_data;
@@ -559,20 +556,20 @@
 
 		data_done.output_done.stream_id = pkt->stream_id;
 		data_done.output_done.view_id = pkt->view_id;
-		data_done.output_done.timestamp_hi = pkt->timestamp_hi;
-		data_done.output_done.timestamp_lo = pkt->timestamp_lo;
+		data_done.output_done.timestamp_hi = pkt->time_stamp_hi;
+		data_done.output_done.timestamp_lo = pkt->time_stamp_lo;
 		data_done.output_done.flags1 = pkt->flags;
 		data_done.output_done.mark_target = pkt->mark_target;
 		data_done.output_done.mark_data = pkt->mark_data;
 		data_done.output_done.stats = pkt->stats;
 		data_done.output_done.alloc_len1 = pkt->alloc_len;
 		data_done.output_done.filled_len1 = pkt->filled_len;
-		data_done.output_done.offset1 = pkt->oofset;
+		data_done.output_done.offset1 = pkt->offset;
 		data_done.output_done.frame_width = pkt->frame_width;
 		data_done.output_done.frame_height = pkt->frame_height;
-		data_done.output_done.start_xCoord = pkt->start_xCoord;
-		data_done.output_done.start_yCoord = pkt->start_yCoord;
-		data_done.output_done.input_tag1 = pkt->input_tag1;
+		data_done.output_done.start_xCoord = pkt->start_x_coord;
+		data_done.output_done.start_yCoord = pkt->start_y_coord;
+		data_done.output_done.input_tag1 = pkt->input_tag;
 		data_done.output_done.picture_type = pkt->picture_type;
 		data_done.output_done.packet_buffer1 = pkt->packet_buffer;
 		data_done.output_done.extra_data_buffer =
diff --git a/drivers/mfd/pm8038-core.c b/drivers/mfd/pm8038-core.c
index 8fef786..b32932b 100644
--- a/drivers/mfd/pm8038-core.c
+++ b/drivers/mfd/pm8038-core.c
@@ -327,6 +327,17 @@
 	.pdata_size	= sizeof(struct pm8xxx_tm_core_data),
 };
 
+static const struct resource ccadc_cell_resources[] __devinitconst = {
+	SINGLE_IRQ_RESOURCE("PM8921_BMS_CCADC_EOC", PM8921_BMS_CCADC_EOC),
+};
+
+static struct mfd_cell ccadc_cell __devinitdata = {
+	.name		= PM8XXX_CCADC_DEV_NAME,
+	.id		= -1,
+	.resources	= ccadc_cell_resources,
+	.num_resources	= ARRAY_SIZE(ccadc_cell_resources),
+};
+
 static struct pm8xxx_vreg regulator_data[] = {
 	/*   name	     pc_name	    ctrl   test   hpm_min */
 	NLDO1200("8038_l1",		    0x0AE, 0x0AF, LDO_1200),
@@ -641,6 +652,19 @@
 		goto bail;
 	}
 
+	if (pdata->ccadc_pdata) {
+		ccadc_cell.platform_data = pdata->ccadc_pdata;
+		ccadc_cell.pdata_size =
+				sizeof(struct pm8xxx_ccadc_platform_data);
+
+		ret = mfd_add_devices(pmic->dev, 0, &ccadc_cell, 1, NULL,
+					irq_base);
+		if (ret) {
+			pr_err("Failed to add ccadc subdevice ret=%d\n", ret);
+			goto bail;
+		}
+	}
+
 	return 0;
 bail:
 	if (pmic->irq_chip) {
diff --git a/drivers/misc/isa1200.c b/drivers/misc/isa1200.c
index 555dfdd..6c3e787 100644
--- a/drivers/misc/isa1200.c
+++ b/drivers/misc/isa1200.c
@@ -20,6 +20,7 @@
 #include <linux/workqueue.h>
 #include <linux/slab.h>
 #include <linux/regulator/consumer.h>
+#include <linux/clk.h>
 #include <linux/i2c/isa1200.h>
 #include "../staging/android/timed_output.h"
 
@@ -48,6 +49,7 @@
 	struct regulator **regs;
 	bool clk_on;
 	u8 hctrl0_val;
+	struct clk *pwm_clk;
 };
 
 static int isa1200_read_reg(struct i2c_client *client, int reg)
@@ -107,13 +109,23 @@
 				goto chip_dwn;
 			}
 		} else if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
-			/* vote for clock */
-			if (haptic->pdata->clk_enable && !haptic->clk_on) {
+			/* check for board specific clk callback */
+			if (haptic->pdata->clk_enable) {
 				rc = haptic->pdata->clk_enable(true);
 				if (rc < 0) {
+					pr_err("%s: clk enable cb failed\n",
+								__func__);
+					goto chip_dwn;
+				}
+			}
+
+			/* vote for clock */
+			if (haptic->pdata->need_pwm_clk && !haptic->clk_on) {
+				rc = clk_enable(haptic->pwm_clk);
+				if (rc < 0) {
 					pr_err("%s: clk enable failed\n",
 								__func__);
-					goto chip_dwn;
+					goto dis_clk_cb;
 				}
 				haptic->clk_on = true;
 			}
@@ -150,29 +162,35 @@
 				pr_err("%s: stop vibartion fail\n", __func__);
 
 			/* de-vote clock */
-			if (haptic->pdata->clk_enable && haptic->clk_on) {
-				rc = haptic->pdata->clk_enable(false);
-				if (rc < 0) {
-					pr_err("%s: clk disable failed\n",
-								__func__);
-					return;
-				}
+			if (haptic->pdata->need_pwm_clk && haptic->clk_on) {
+				clk_disable(haptic->pwm_clk);
 				haptic->clk_on = false;
 			}
+			/* check for board specific clk callback */
+			if (haptic->pdata->clk_enable) {
+				rc = haptic->pdata->clk_enable(false);
+				if (rc < 0)
+					pr_err("%s: clk disable cb failed\n",
+								__func__);
+			}
 		}
 	}
 
 	return;
 
 dis_clk:
-	if (haptic->pdata->clk_enable && haptic->clk_on) {
-		rc = haptic->pdata->clk_enable(false);
-		if (rc < 0) {
-			pr_err("%s: clk disable failed\n", __func__);
-			return;
-		}
+	if (haptic->pdata->need_pwm_clk && haptic->clk_on) {
+		clk_disable(haptic->pwm_clk);
 		haptic->clk_on = false;
 	}
+
+dis_clk_cb:
+	if (haptic->pdata->clk_enable) {
+		rc = haptic->pdata->clk_enable(false);
+		if (rc < 0)
+			pr_err("%s: clk disable cb failed\n", __func__);
+	}
+
 chip_dwn:
 	if (haptic->is_len_gpio_valid == true)
 		gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0);
@@ -545,6 +563,13 @@
 			ret = PTR_ERR(haptic->pwm);
 			goto reset_hctrl0;
 		}
+	} else if (haptic->pdata->need_pwm_clk) {
+		haptic->pwm_clk = clk_get(&client->dev, "pwm_clk");
+		if (IS_ERR(haptic->pwm_clk)) {
+			dev_err(&client->dev, "pwm_clk get failed\n");
+			ret = PTR_ERR(haptic->pwm_clk);
+			goto reset_hctrl0;
+		}
 	}
 
 	printk(KERN_INFO "%s: %s registered\n", __func__, id->name);
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index edf4400..c87afab 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -378,8 +378,7 @@
 	host->curr.got_dataend = 0;
 	host->curr.wait_for_auto_prog_done = false;
 	host->curr.got_auto_prog_done = false;
-	writel_relaxed(readl_relaxed(host->base + MMCIDATACTRL) &
-			(~(MCI_DPSM_ENABLE)), host->base + MMCIDATACTRL);
+	writel_relaxed(0, host->base + MMCIDATACTRL);
 	msmsdcc_sync_reg_wr(host); /* Allow the DPSM to be reset */
 }
 
@@ -1683,7 +1682,7 @@
 			msmsdcc_delay(host);
 		}
 
-		if (!host->clks_on) {
+		if (!atomic_read(&host->clks_on)) {
 			pr_debug("%s: %s: SDIO async irq received\n",
 					mmc_hostname(host->mmc), __func__);
 
@@ -1993,7 +1992,8 @@
 	/*
 	 * Don't start the request if SDCC is not in proper state to handle it
 	 */
-	if (!host->pwr || !host->clks_on || host->sdcc_irq_disabled) {
+	if (!host->pwr || !atomic_read(&host->clks_on)
+			|| host->sdcc_irq_disabled) {
 		WARN(1, "%s: %s: SDCC is in bad state. don't process"
 		     " new request (CMD%d)\n", mmc_hostname(host->mmc),
 		     __func__, mrq->cmd->opcode);
@@ -2338,17 +2338,37 @@
  * Any function calling msmsdcc_setup_clocks must
  * acquire clk_mutex. May sleep.
  */
-static inline void msmsdcc_setup_clocks(struct msmsdcc_host *host, bool enable)
+static int msmsdcc_setup_clocks(struct msmsdcc_host *host, bool enable)
 {
-	if (enable) {
-		if (!IS_ERR_OR_NULL(host->bus_clk))
-			clk_prepare_enable(host->bus_clk);
-		if (!IS_ERR(host->pclk))
-			clk_prepare_enable(host->pclk);
-		clk_prepare_enable(host->clk);
+	int rc = 0;
+
+	if (enable && !atomic_read(&host->clks_on)) {
+		if (!IS_ERR_OR_NULL(host->bus_clk)) {
+			rc = clk_prepare_enable(host->bus_clk);
+			if (rc) {
+				pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
+					mmc_hostname(host->mmc), __func__, rc);
+				goto out;
+			}
+		}
+		if (!IS_ERR(host->pclk)) {
+			rc = clk_prepare_enable(host->pclk);
+			if (rc) {
+				pr_err("%s: %s: failed to enable the pclk with error %d\n",
+					mmc_hostname(host->mmc), __func__, rc);
+				goto disable_bus;
+			}
+		}
+		rc = clk_prepare_enable(host->clk);
+		if (rc) {
+			pr_err("%s: %s: failed to enable the host-clk with error %d\n",
+				mmc_hostname(host->mmc), __func__, rc);
+			goto disable_pclk;
+		}
 		mb();
 		msmsdcc_delay(host);
-	} else {
+		atomic_set(&host->clks_on, 1);
+	} else if (!enable && atomic_read(&host->clks_on)) {
 		mb();
 		msmsdcc_delay(host);
 		clk_disable_unprepare(host->clk);
@@ -2356,7 +2376,18 @@
 			clk_disable_unprepare(host->pclk);
 		if (!IS_ERR_OR_NULL(host->bus_clk))
 			clk_disable_unprepare(host->bus_clk);
+		atomic_set(&host->clks_on, 0);
 	}
+	goto out;
+
+disable_pclk:
+	if (!IS_ERR_OR_NULL(host->pclk))
+		clk_disable_unprepare(host->pclk);
+disable_bus:
+	if (!IS_ERR_OR_NULL(host->bus_clk))
+		clk_disable_unprepare(host->bus_clk);
+out:
+	return rc;
 }
 
 static inline unsigned int msmsdcc_get_sup_clk_rate(struct msmsdcc_host *host,
@@ -2889,18 +2920,16 @@
 
 	spin_lock_irqsave(&host->lock, flags);
 	if (ios->clock) {
-		if (!host->clks_on) {
-			spin_unlock_irqrestore(&host->lock, flags);
-			msmsdcc_setup_clocks(host, true);
-			spin_lock_irqsave(&host->lock, flags);
-			host->clks_on = 1;
-			writel_relaxed(host->mci_irqenable,
-					host->base + MMCIMASK0);
-			mb();
-			msmsdcc_cfg_sdio_wakeup(host, false);
-		}
-
+		spin_unlock_irqrestore(&host->lock, flags);
+		rc = msmsdcc_setup_clocks(host, true);
+		if (rc)
+			goto out;
+		spin_lock_irqsave(&host->lock, flags);
+		writel_relaxed(host->mci_irqenable, host->base + MMCIMASK0);
+		mb();
+		msmsdcc_cfg_sdio_wakeup(host, false);
 		clock = msmsdcc_get_sup_clk_rate(host, ios->clock);
+
 		/*
 		 * For DDR50 mode, controller needs clock rate to be
 		 * double than what is required on the SD card CLK pin.
@@ -2943,7 +2972,6 @@
 		msmsdcc_delay(host);
 		clk |= MCI_CLK_ENABLE;
 	}
-
 	if (ios->bus_width == MMC_BUS_WIDTH_8)
 		clk |= MCI_CLK_WIDEBUS_8;
 	else if (ios->bus_width == MMC_BUS_WIDTH_4)
@@ -2982,7 +3010,7 @@
 		clk |= IO_PAD_PWR_SWITCH;
 
 	/* Don't write into registers if clocks are disabled */
-	if (host->clks_on) {
+	if (atomic_read(&host->clks_on)) {
 		if (readl_relaxed(host->base + MMCICLOCK) != clk) {
 			writel_relaxed(clk, host->base + MMCICLOCK);
 			msmsdcc_sync_reg_wr(host);
@@ -2994,7 +3022,7 @@
 		}
 	}
 
-	if (!(clk & MCI_CLK_ENABLE) && host->clks_on) {
+	if (!(clk & MCI_CLK_ENABLE) && atomic_read(&host->clks_on)) {
 		msmsdcc_cfg_sdio_wakeup(host, true);
 		spin_unlock_irqrestore(&host->lock, flags);
 		/*
@@ -3003,11 +3031,10 @@
 		 */
 		msmsdcc_setup_clocks(host, false);
 		spin_lock_irqsave(&host->lock, flags);
-		host->clks_on = 0;
 	}
 
 	if (host->tuning_in_progress)
-		WARN(!host->clks_on,
+		WARN(!atomic_read(&host->clks_on),
 			"tuning_in_progress but SDCC clocks are OFF\n");
 
 	/* Let interrupts be disabled if the host is powered off */
@@ -3015,8 +3042,8 @@
 		enable_irq(host->core_irqres->start);
 		host->sdcc_irq_disabled = 0;
 	}
-
 	spin_unlock_irqrestore(&host->lock, flags);
+out:
 	mutex_unlock(&host->clk_mutex);
 }
 
@@ -3089,14 +3116,14 @@
 	spin_lock_irqsave(&host->lock, flags);
 	if (enable) {
 		host->mci_irqenable |= MCI_SDIOINTOPERMASK;
-		if (host->clks_on) {
+		if (atomic_read(&host->clks_on)) {
 			writel_relaxed(readl_relaxed(host->base + MMCIMASK0) |
 				MCI_SDIOINTOPERMASK, host->base + MMCIMASK0);
 			mb();
 		}
 	} else {
 		host->mci_irqenable &= ~MCI_SDIOINTOPERMASK;
-		if (host->clks_on) {
+		if (atomic_read(&host->clks_on)) {
 			writel_relaxed(readl_relaxed(host->base + MMCIMASK0) &
 				~MCI_SDIOINTOPERMASK, host->base + MMCIMASK0);
 			mb();
@@ -3218,20 +3245,14 @@
 	}
 
 	mutex_lock(&host->clk_mutex);
-	spin_lock_irqsave(&host->lock, flags);
-	if (!host->clks_on) {
-		spin_unlock_irqrestore(&host->lock, flags);
-		msmsdcc_setup_clocks(host, true);
-		spin_lock_irqsave(&host->lock, flags);
-		host->clks_on = 1;
-	}
-	spin_unlock_irqrestore(&host->lock, flags);
+	rc = msmsdcc_setup_clocks(host, true);
 	mutex_unlock(&host->clk_mutex);
 
 out:
 	if (rc < 0) {
 		pr_info("%s: %s: failed with error %d", mmc_hostname(mmc),
 				__func__, rc);
+		msmsdcc_pm_qos_update_latency(host, 0);
 		return rc;
 	}
 	msmsdcc_msm_bus_cancel_work_and_set_vote(host, &mmc->ios);
@@ -3242,6 +3263,7 @@
 {
 	struct msmsdcc_host *host = mmc_priv(mmc);
 	unsigned long flags;
+	int rc = 0;
 
 	msmsdcc_pm_qos_update_latency(host, 0);
 
@@ -3249,19 +3271,16 @@
 		goto out;
 
 	mutex_lock(&host->clk_mutex);
-	spin_lock_irqsave(&host->lock, flags);
-	if (host->clks_on) {
-		spin_unlock_irqrestore(&host->lock, flags);
-		msmsdcc_setup_clocks(host, false);
-		spin_lock_irqsave(&host->lock, flags);
-		host->clks_on = 0;
-	}
-	spin_unlock_irqrestore(&host->lock, flags);
+	rc = msmsdcc_setup_clocks(host, false);
 	mutex_unlock(&host->clk_mutex);
 
+	if (rc) {
+		msmsdcc_pm_qos_update_latency(host, 1);
+		return rc;
+	}
 out:
 	msmsdcc_msm_bus_queue_work(host);
-	return 0;
+	return rc;
 }
 #endif
 
@@ -3718,7 +3737,7 @@
 
 	spin_lock_irqsave(&host->lock, flags);
 	WARN(!host->pwr, "SDCC power is turned off\n");
-	WARN(!host->clks_on, "SDCC clocks are turned off\n");
+	WARN(!atomic_read(&host->clks_on), "SDCC clocks are turned off\n");
 	WARN(host->sdcc_irq_disabled, "SDCC IRQ is disabled\n");
 
 	host->tuning_in_progress = 1;
@@ -4463,13 +4482,14 @@
 	pr_info("%s: SDCC PWR is %s\n", mmc_hostname(host->mmc),
 		(host->pwr ? "ON" : "OFF"));
 	pr_info("%s: SDCC clks are %s, MCLK rate=%d\n",
-		mmc_hostname(host->mmc), (host->clks_on ? "ON" : "OFF"),
+		mmc_hostname(host->mmc),
+		(atomic_read(&host->clks_on) ? "ON" : "OFF"),
 		(u32)clk_get_rate(host->clk));
 	pr_info("%s: SDCC irq is %s\n", mmc_hostname(host->mmc),
 		(host->sdcc_irq_disabled ? "disabled" : "enabled"));
 
 	/* Now dump SDCC registers. Don't print FIFO registers */
-	if (host->clks_on)
+	if (atomic_read(&host->clks_on))
 		msmsdcc_print_regs("SDCC-CORE", host->base,
 				   host->core_memres->start, 28);
 
@@ -4481,7 +4501,7 @@
 				mmc_hostname(host->mmc), host->dma.busy,
 				host->dma.channel, host->dma.crci);
 		else if (host->is_sps_mode) {
-			if (host->sps.busy && host->clks_on)
+			if (host->sps.busy && atomic_read(&host->clks_on))
 				msmsdcc_print_regs("SDCC-DML", host->dml_base,
 						   host->dml_memres->start,
 						   16);
@@ -4892,7 +4912,7 @@
 		(1 + ((3 * USEC_PER_SEC) /
 		      msmsdcc_get_min_sup_clk_rate(host)));
 
-	host->clks_on = 1;
+	atomic_set(&host->clks_on, 1);
 	/* Apply Hard reset to SDCC to put it in power on default state */
 	msmsdcc_hard_reset(host);
 
@@ -5330,6 +5350,7 @@
 {
 	struct msmsdcc_host *host = mmc_priv(mmc);
 	unsigned long flags;
+	int rc = 0;
 
 	mutex_lock(&host->clk_mutex);
 	spin_lock_irqsave(&host->lock, flags);
@@ -5342,13 +5363,9 @@
 			disable_irq_nosync(host->core_irqres->start);
 			host->sdcc_irq_disabled = 1;
 		}
-
-		if (host->clks_on) {
-			spin_unlock_irqrestore(&host->lock, flags);
-			msmsdcc_setup_clocks(host, false);
-			spin_lock_irqsave(&host->lock, flags);
-			host->clks_on = 0;
-		}
+		rc = msmsdcc_setup_clocks(host, false);
+		if (rc)
+			goto out;
 
 		if (host->plat->sdio_lpm_gpio_setup &&
 				!host->sdio_gpio_lpm) {
@@ -5364,6 +5381,10 @@
 			host->sdio_wakeupirq_disabled = 0;
 		}
 	} else {
+		rc = msmsdcc_setup_clocks(host, true);
+		if (rc)
+			goto out;
+
 		if (!host->sdio_wakeupirq_disabled) {
 			disable_irq_nosync(host->plat->sdiowakeup_irq);
 			host->sdio_wakeupirq_disabled = 1;
@@ -5378,14 +5399,7 @@
 			host->sdio_gpio_lpm = 0;
 		}
 
-		if (!host->clks_on) {
-			spin_unlock_irqrestore(&host->lock, flags);
-			msmsdcc_setup_clocks(host, true);
-			spin_lock_irqsave(&host->lock, flags);
-			host->clks_on = 1;
-		}
-
-		if (host->sdcc_irq_disabled) {
+		if (host->sdcc_irq_disabled && atomic_read(&host->clks_on)) {
 			writel_relaxed(host->mci_irqenable,
 				       host->base + MMCIMASK0);
 			mb();
@@ -5393,9 +5407,10 @@
 			host->sdcc_irq_disabled = 0;
 		}
 	}
+out:
 	spin_unlock_irqrestore(&host->lock, flags);
 	mutex_unlock(&host->clk_mutex);
-	return 0;
+	return rc;
 }
 #else
 int msmsdcc_sdio_al_lpm(struct mmc_host *mmc, bool enable)
@@ -5607,7 +5622,7 @@
 	 * during suspend and not allowing TCXO.
 	 */
 
-	if (host->clks_on && !host->plat->is_sdio_al_client) {
+	if (atomic_read(&host->clks_on) && !host->plat->is_sdio_al_client) {
 		pr_warn("%s: clocks are on after suspend, aborting system "
 				"suspend\n", mmc_hostname(mmc));
 		rc = -EAGAIN;
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index 5531f06..dc32d1c 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -351,7 +351,7 @@
 	struct clk		*clk;		/* main MMC bus clock */
 	struct clk		*pclk;		/* SDCC peripheral bus clock */
 	struct clk		*bus_clk;	/* SDCC bus voter clock */
-	unsigned int		clks_on;	/* set if clocks are enabled */
+	atomic_t		clks_on;	/* set if clocks are enabled */
 
 	unsigned int		eject;		/* eject state */
 
diff --git a/drivers/net/usb/rmnet_usb_ctrl.c b/drivers/net/usb/rmnet_usb_ctrl.c
index c2085c9..a153de1 100644
--- a/drivers/net/usb/rmnet_usb_ctrl.c
+++ b/drivers/net/usb/rmnet_usb_ctrl.c
@@ -111,7 +111,7 @@
 {
 	if (dev) {
 		mutex_lock(&dev->dev_lock);
-		if (!dev->intf) {
+		if (!dev->is_connected) {
 			mutex_unlock(&dev->dev_lock);
 			return 0;
 		}
@@ -761,10 +761,17 @@
 	dev->tx_ctrl_err_cnt = 0;
 	dev->set_ctrl_line_state_cnt = 0;
 
-	ret = rmnet_usb_ctrl_write_cmd(dev);
+	ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+			USB_CDC_REQ_SET_CONTROL_LINE_STATE,
+			(USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE),
+			dev->cbits_tomdm,
+			dev->intf->cur_altsetting->desc.bInterfaceNumber,
+			NULL, 0, USB_CTRL_SET_TIMEOUT);
 	if (ret < 0)
 		return ret;
 
+	dev->set_ctrl_line_state_cnt++;
+
 	dev->inturb = usb_alloc_urb(0, GFP_KERNEL);
 	if (!dev->inturb) {
 		dev_err(dev->devicep, "Error allocating int urb\n");
@@ -800,7 +807,11 @@
 			 notification_available_cb, dev, interval);
 
 	usb_mark_last_busy(udev);
-	return rmnet_usb_ctrl_start_rx(dev);
+	ret = rmnet_usb_ctrl_start_rx(dev);
+	if (!ret)
+		dev->is_connected = true;
+
+	return ret;
 }
 
 void rmnet_usb_ctrl_disconnect(struct rmnet_ctrl_dev *dev)
@@ -813,7 +824,7 @@
 	dev->cbits_tolocal = ~ACM_CTRL_CD;
 
 	dev->cbits_tomdm = ~ACM_CTRL_DTR;
-	dev->intf = NULL;
+	dev->is_connected = false;
 	mutex_unlock(&dev->dev_lock);
 
 	wake_up(&dev->read_wait_queue);
diff --git a/drivers/net/usb/rmnet_usb_ctrl.h b/drivers/net/usb/rmnet_usb_ctrl.h
index bc07726..3259940 100644
--- a/drivers/net/usb/rmnet_usb_ctrl.h
+++ b/drivers/net/usb/rmnet_usb_ctrl.h
@@ -46,6 +46,8 @@
 
 	unsigned		is_opened;
 
+	bool			is_connected;
+
 	/*input control lines (DSR, CTS, CD, RI)*/
 	unsigned int		cbits_tolocal;
 
diff --git a/drivers/of/of_spmi.c b/drivers/of/of_spmi.c
index 61085c9..0c23db5 100644
--- a/drivers/of/of_spmi.c
+++ b/drivers/of/of_spmi.c
@@ -43,27 +43,12 @@
 }
 
 /*
- * Allocate dev_node array for spmi_device
- */
-static inline int of_spmi_alloc_device_store(struct of_spmi_dev_info *d_info,
-					     uint32_t num_dev_node)
-{
-	d_info->b_info.num_dev_node = num_dev_node;
-	d_info->b_info.dev_node = kzalloc(sizeof(struct spmi_resource) *
-						num_dev_node, GFP_KERNEL);
-	if (!d_info->b_info.dev_node)
-		return -ENOMEM;
-
-	return 0;
-}
-
-/*
  * Calculate the number of resources to allocate
  *
  * The caller is responsible for initializing the of_spmi_res_info structure.
  */
-static void of_spmi_sum_node_resources(struct of_spmi_res_info *r_info,
-				       bool has_reg)
+static void of_spmi_sum_resources(struct of_spmi_res_info *r_info,
+				  bool has_reg)
 {
 	struct of_irq oirq;
 	uint64_t size;
@@ -92,58 +77,48 @@
 }
 
 /*
- * free spmi_resource for the spmi_device
+ * Allocate dev_node array for spmi_device - used with spmi-dev-container
  */
-static void of_spmi_free_device_resources(struct of_spmi_dev_info *d_info)
+static inline int of_spmi_alloc_devnode_store(struct of_spmi_dev_info *d_info,
+					      uint32_t num_dev_node)
 {
-	int i;
+	d_info->b_info.num_dev_node = num_dev_node;
+	d_info->b_info.dev_node = kzalloc(sizeof(struct spmi_resource) *
+						num_dev_node, GFP_KERNEL);
+	if (!d_info->b_info.dev_node)
+		return -ENOMEM;
 
-	for (i = 0; i < d_info->b_info.num_dev_node; i++)
-		kfree(d_info->b_info.dev_node[i].resource);
-
-	kfree(d_info->b_info.dev_node);
-}
-
-/*
- * Gather node resources and populate
- */
-static void of_spmi_populate_node_resources(struct of_spmi_dev_info *d_info,
-					    struct of_spmi_res_info *r_info,
-					    int idx)
-
-{
-	uint32_t num_irq = r_info->num_irq, num_reg = r_info->num_reg;
-	int i;
-	struct resource *res;
-	const  __be32 *addrp;
-	uint64_t size;
-	uint32_t flags;
-
-	res = d_info->b_info.dev_node[idx].resource;
-	d_info->b_info.dev_node[idx].of_node = r_info->node;
-
-	if ((num_irq || num_reg) && (res != NULL)) {
-		for (i = 0; i < num_reg; i++, res++) {
-			/* Addresses are always 16 bits */
-			addrp = of_get_address(r_info->node, i, &size, &flags);
-			BUG_ON(!addrp);
-			res->start = be32_to_cpup(addrp);
-			res->end = res->start + size - 1;
-			res->flags = flags;
-		}
-		WARN_ON(of_irq_to_resource_table(r_info->node, res, num_irq) !=
-								num_irq);
-	}
+	return 0;
 }
 
 /*
  * Allocate enough memory to handle the resources associated with the
- * device_node. The number of device nodes included in this allocation
- * depends on whether the spmi-dev-container flag is specified or not.
+ * primary node.
  */
 static int of_spmi_allocate_node_resources(struct of_spmi_dev_info *d_info,
-					   struct of_spmi_res_info *r_info,
-					   uint32_t idx)
+					   struct of_spmi_res_info *r_info)
+{
+	uint32_t num_irq = r_info->num_irq, num_reg = r_info->num_reg;
+	struct resource *res = NULL;
+
+	if (num_irq || num_reg) {
+		res = kzalloc(sizeof(*res) * (num_irq + num_reg), GFP_KERNEL);
+		if (!res)
+			return -ENOMEM;
+	}
+	d_info->b_info.res.num_resources = num_reg + num_irq;
+	d_info->b_info.res.resource = res;
+
+	return 0;
+}
+
+/*
+ * Allocate enough memory to handle the resources associated with the
+ * spmi-dev-container nodes.
+ */
+static int of_spmi_allocate_devnode_resources(struct of_spmi_dev_info *d_info,
+					      struct of_spmi_res_info *r_info,
+					      uint32_t idx)
 {
 	uint32_t num_irq = r_info->num_irq, num_reg = r_info->num_reg;
 	struct resource *res = NULL;
@@ -160,6 +135,87 @@
 }
 
 /*
+ * free node resources - used with primary node
+ */
+static void of_spmi_free_node_resources(struct of_spmi_dev_info *d_info)
+{
+	kfree(d_info->b_info.res.resource);
+}
+
+/*
+ * free devnode resources - used with spmi-dev-container
+ */
+static void of_spmi_free_devnode_resources(struct of_spmi_dev_info *d_info)
+{
+	int i;
+
+	for (i = 0; i < d_info->b_info.num_dev_node; i++)
+		kfree(d_info->b_info.dev_node[i].resource);
+
+	kfree(d_info->b_info.dev_node);
+}
+
+static void of_spmi_populate_resources(struct of_spmi_dev_info *d_info,
+				       struct of_spmi_res_info *r_info,
+				       struct resource *res)
+
+{
+	uint32_t num_irq = r_info->num_irq, num_reg = r_info->num_reg;
+	int i;
+	const  __be32 *addrp;
+	uint64_t size;
+	uint32_t flags;
+
+	if ((num_irq || num_reg) && (res != NULL)) {
+		for (i = 0; i < num_reg; i++, res++) {
+			/* Addresses are always 16 bits */
+			addrp = of_get_address(r_info->node, i, &size, &flags);
+			BUG_ON(!addrp);
+			res->start = be32_to_cpup(addrp);
+			res->end = res->start + size - 1;
+			res->flags = flags;
+			of_property_read_string_index(r_info->node, "reg-names",
+								i, &res->name);
+		}
+		WARN_ON(of_irq_to_resource_table(r_info->node, res, num_irq) !=
+								num_irq);
+	}
+}
+
+/*
+ * Gather primary node resources and populate.
+ */
+static void of_spmi_populate_node_resources(struct of_spmi_dev_info *d_info,
+					    struct of_spmi_res_info *r_info)
+
+{
+	struct resource *res;
+
+	res = d_info->b_info.res.resource;
+	d_info->b_info.res.of_node = r_info->node;
+	of_property_read_string(r_info->node, "label",
+				&d_info->b_info.res.label);
+	of_spmi_populate_resources(d_info, r_info, res);
+}
+
+/*
+ * Gather node devnode resources and populate - used with spmi-dev-container.
+ */
+static void of_spmi_populate_devnode_resources(struct of_spmi_dev_info *d_info,
+					       struct of_spmi_res_info *r_info,
+					       int idx)
+
+{
+	struct resource *res;
+
+	res = d_info->b_info.dev_node[idx].resource;
+	d_info->b_info.dev_node[idx].of_node = r_info->node;
+	of_property_read_string(r_info->node, "label",
+				&d_info->b_info.dev_node[idx].label);
+	of_spmi_populate_resources(d_info, r_info, res);
+}
+
+/*
  * create a single spmi_device
  */
 static int of_spmi_create_device(struct of_spmi_dev_info *d_info,
@@ -216,10 +272,10 @@
 		num_dev_node++;
 	}
 
-	rc = of_spmi_alloc_device_store(d_info, num_dev_node);
+	rc = of_spmi_alloc_devnode_store(d_info, num_dev_node);
 	if (rc) {
-		dev_err(&ctrl->dev, "%s: unable to allocate"
-				" device resources\n", __func__);
+		dev_err(&ctrl->dev, "%s: unable to allocate devnode resources\n",
+								__func__);
 		return;
 	}
 
@@ -228,23 +284,36 @@
 		if (!of_device_is_available(node))
 			continue;
 		of_spmi_init_resource(&r_info, node);
-		of_spmi_sum_node_resources(&r_info, 1);
-		rc = of_spmi_allocate_node_resources(d_info, &r_info, i);
+		of_spmi_sum_resources(&r_info, true);
+		rc = of_spmi_allocate_devnode_resources(d_info, &r_info, i);
 		if (rc) {
 			dev_err(&ctrl->dev, "%s: unable to allocate"
 					" resources\n", __func__);
-			of_spmi_free_device_resources(d_info);
+			of_spmi_free_devnode_resources(d_info);
 			return;
 		}
-		of_spmi_populate_node_resources(d_info, &r_info, i);
+		of_spmi_populate_devnode_resources(d_info, &r_info, i);
 		i++;
 	}
 
+	of_spmi_init_resource(&r_info, container);
+	of_spmi_sum_resources(&r_info, true);
+
+	rc = of_spmi_allocate_node_resources(d_info, &r_info);
+	if (rc) {
+		dev_err(&ctrl->dev, "%s: unable to allocate resources\n",
+								  __func__);
+		of_spmi_free_node_resources(d_info);
+	}
+
+	of_spmi_populate_node_resources(d_info, &r_info);
+
+
 	rc = of_spmi_create_device(d_info, container);
 	if (rc) {
 		dev_err(&ctrl->dev, "%s: unable to create device for"
 				" node %s\n", __func__, container->full_name);
-		of_spmi_free_device_resources(d_info);
+		of_spmi_free_devnode_resources(d_info);
 		return;
 	}
 }
@@ -255,7 +324,7 @@
  * point all share the same slave_id.
  */
 static void of_spmi_walk_slave_container(struct of_spmi_dev_info *d_info,
-					struct device_node *container)
+					 struct device_node *container)
 {
 	struct spmi_controller *ctrl = d_info->ctrl;
 	struct device_node *node;
@@ -276,24 +345,17 @@
 			continue;
 		}
 
-		rc = of_spmi_alloc_device_store(d_info, 1);
-		if (rc) {
-			dev_err(&ctrl->dev, "%s: unable to allocate"
-					" device resources\n", __func__);
-			goto slave_err;
-		}
-
 		of_spmi_init_resource(&r_info, node);
-		of_spmi_sum_node_resources(&r_info, 1);
+		of_spmi_sum_resources(&r_info, true);
 
-		rc = of_spmi_allocate_node_resources(d_info, &r_info, 0);
+		rc = of_spmi_allocate_node_resources(d_info, &r_info);
 		if (rc) {
 			dev_err(&ctrl->dev, "%s: unable to allocate"
 						" resources\n", __func__);
 			goto slave_err;
 		}
 
-		of_spmi_populate_node_resources(d_info, &r_info, 0);
+		of_spmi_populate_node_resources(d_info, &r_info);
 
 		rc = of_spmi_create_device(d_info, node);
 		if (rc) {
@@ -305,7 +367,7 @@
 	return;
 
 slave_err:
-	of_spmi_free_device_resources(d_info);
+	of_spmi_free_node_resources(d_info);
 }
 
 int of_spmi_register_devices(struct spmi_controller *ctrl)
@@ -370,31 +432,23 @@
 			if (!of_device_is_available(node))
 				continue;
 
-			rc = of_spmi_alloc_device_store(&d_info, 1);
-			if (rc) {
-				dev_err(&ctrl->dev, "%s: unable to allocate"
-					" device resources\n", __func__);
-				continue;
-			}
-
 			of_spmi_init_resource(&r_info, node);
-			of_spmi_sum_node_resources(&r_info, 0);
-			rc = of_spmi_allocate_node_resources(&d_info,
-								&r_info, 0);
+			of_spmi_sum_resources(&r_info, false);
+			rc = of_spmi_allocate_node_resources(&d_info, &r_info);
 			if (rc) {
 				dev_err(&ctrl->dev, "%s: unable to allocate"
 						" resources\n", __func__);
-				of_spmi_free_device_resources(&d_info);
+				of_spmi_free_node_resources(&d_info);
 				continue;
 			}
 
-			of_spmi_populate_node_resources(&d_info, &r_info, 0);
+			of_spmi_populate_node_resources(&d_info, &r_info);
 
 			rc = of_spmi_create_device(&d_info, node);
 			if (rc) {
 				dev_err(&ctrl->dev, "%s: unable to create"
 						" device\n", __func__);
-				of_spmi_free_device_resources(&d_info);
+				of_spmi_free_node_resources(&d_info);
 				continue;
 			}
 		}
@@ -404,4 +458,4 @@
 }
 EXPORT_SYMBOL(of_spmi_register_devices);
 
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 2bbc796..6b0916e 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -419,8 +419,8 @@
 	  constraint checking while the real driver is being developed.
 
 config REGULATOR_QPNP
+	depends on SPMI
 	depends on OF_SPMI
-	depends on MSM_QPNP
 	tristate "Qualcomm QPNP regulator support"
 	help
 	  This driver supports voltage regulators in Qualcomm PMIC chips which
diff --git a/drivers/regulator/qpnp-regulator.c b/drivers/regulator/qpnp-regulator.c
index 120d17e..8d592fb 100644
--- a/drivers/regulator/qpnp-regulator.c
+++ b/drivers/regulator/qpnp-regulator.c
@@ -29,8 +29,6 @@
 #include <linux/regulator/of_regulator.h>
 #include <linux/regulator/qpnp-regulator.h>
 
-#include <mach/qpnp.h>
-
 /* Debug Flag Definitions */
 enum {
 	QPNP_VREG_DEBUG_REQUEST		= BIT(0), /* Show requests */
@@ -1189,7 +1187,7 @@
 	pdata->init_data.constraints.input_uV
 		= pdata->init_data.constraints.max_uV;
 
-	res = qpnp_get_resource(spmi, 0, IORESOURCE_MEM, 0);
+	res = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
 	if (!res) {
 		dev_err(&spmi->dev, "%s: node is missing base address\n",
 			__func__);
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
index 84fd462..7026ee8 100644
--- a/drivers/spmi/Kconfig
+++ b/drivers/spmi/Kconfig
@@ -19,18 +19,11 @@
 	  This is required for communicating with Qualcomm PMICs and
 	  other devices that have the SPMI interface.
 
-config MSM_QPNP
-	depends on ARCH_MSMCOPPER
-	depends on OF_SPMI
-	bool "MSM QPNP"
-	help
-	  Say 'y' here to include support for the Qualcomm QPNP
-
 config MSM_QPNP_INT
 	depends on SPARSE_IRQ
 	depends on ARCH_MSMCOPPER
+	depends on SPMI
 	depends on OF_SPMI
-	depends on MSM_QPNP
 	bool "MSM QPNP INT"
 	help
 	  Say 'y' here to include support for the Qualcomm QPNP interrupt
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
index d59a610..becd823 100644
--- a/drivers/spmi/Makefile
+++ b/drivers/spmi/Makefile
@@ -1,7 +1,6 @@
 #
 # Makefile for kernel SPMI framework.
 #
-obj-$(CONFIG_SPMI)			+= spmi.o
+obj-$(CONFIG_SPMI)			+= spmi.o spmi-resources.o
 obj-$(CONFIG_SPMI_MSM_PMIC_ARB)		+= spmi-pmic-arb.o
-obj-$(CONFIG_MSM_QPNP)                  += qpnp.o
 obj-$(CONFIG_MSM_QPNP_INT)		+= qpnp-int.o
diff --git a/drivers/spmi/qpnp-int.c b/drivers/spmi/qpnp-int.c
index 2998c01..b6dfd51 100644
--- a/drivers/spmi/qpnp-int.c
+++ b/drivers/spmi/qpnp-int.c
@@ -31,8 +31,6 @@
 #include <asm/mach/irq.h>
 #include <mach/qpnp-int.h>
 
-#define QPNPINT_MAX_BUSSES 1
-
 /* 16 slave_ids, 256 per_ids per slave, and 8 ints per per_id */
 #define QPNPINT_NR_IRQS (16 * 256 * 8)
 
@@ -66,13 +64,18 @@
 
 struct q_chip_data {
 	int bus_nr;
-	struct irq_domain domain;
+	struct irq_domain *domain;
 	struct qpnp_local_int cb;
 	struct spmi_controller *spmi_ctrl;
 	struct radix_tree_root per_tree;
+	struct list_head list;
 };
 
-static struct q_chip_data chip_data[QPNPINT_MAX_BUSSES] __read_mostly;
+static LIST_HEAD(qpnpint_chips);
+static DEFINE_MUTEX(qpnpint_chips_mutex);
+
+#define QPNPINT_MAX_BUSSES 4
+struct q_chip_data *chip_lookup[QPNPINT_MAX_BUSSES];
 
 /**
  * qpnpint_encode_hwirq - translate between qpnp_irq_spec and
@@ -138,8 +141,7 @@
 	if (chip_d->cb.mask) {
 		rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
 		if (rc)
-			pr_err("%s: decode failed on hwirq %lu\n",
-						 __func__, d->hwirq);
+			pr_err("decode failed on hwirq %lu\n", d->hwirq);
 		else
 			chip_d->cb.mask(chip_d->spmi_ctrl, &q_spec,
 								irq_d->priv_d);
@@ -150,8 +152,7 @@
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
 					(u8 *)&irq_d->mask_shift, 1);
 	if (rc)
-		pr_err("%s: spmi failure on irq %d\n",
-						 __func__, d->irq);
+		pr_err("spmi failure on irq %d\n", d->irq);
 }
 
 static void qpnpint_irq_mask_ack(struct irq_data *d)
@@ -168,8 +169,7 @@
 	if (chip_d->cb.mask) {
 		rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
 		if (rc)
-			pr_err("%s: decode failed on hwirq %lu\n",
-						 __func__, d->hwirq);
+			pr_err("decode failed on hwirq %lu\n", d->hwirq);
 		else
 			chip_d->cb.mask(chip_d->spmi_ctrl, &q_spec,
 								irq_d->priv_d);
@@ -180,14 +180,12 @@
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
 							&irq_d->mask_shift, 1);
 	if (rc)
-		pr_err("%s: spmi failure on irq %d\n",
-						 __func__, d->irq);
+		pr_err("spmi failure on irq %d\n", d->irq);
 
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_LATCHED_CLR,
 							&irq_d->mask_shift, 1);
 	if (rc)
-		pr_err("%s: spmi failure on irq %d\n",
-						 __func__, d->irq);
+		pr_err("spmi failure on irq %d\n", d->irq);
 }
 
 static void qpnpint_irq_unmask(struct irq_data *d)
@@ -203,8 +201,7 @@
 	if (chip_d->cb.unmask) {
 		rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
 		if (rc)
-			pr_err("%s: decode failed on hwirq %lu\n",
-						 __func__, d->hwirq);
+			pr_err("decode failed on hwirq %lu\n", d->hwirq);
 		else
 			chip_d->cb.unmask(chip_d->spmi_ctrl, &q_spec,
 								irq_d->priv_d);
@@ -214,8 +211,7 @@
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_SET,
 					&irq_d->mask_shift, 1);
 	if (rc)
-		pr_err("%s: spmi failure on irq %d\n",
-						 __func__, d->irq);
+		pr_err("spmi failure on irq %d\n", d->irq);
 }
 
 static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
@@ -244,7 +240,7 @@
 		if (flow_type & IRQF_TRIGGER_HIGH)
 			per_d->pol_high |= irq_d->mask_shift;
 		else
-			per_d->pol_high &= ~irq_d->mask_shift;
+			per_d->pol_low |= irq_d->mask_shift;
 	}
 
 	buf[0] = per_d->type;
@@ -253,8 +249,7 @@
 
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_SET_TYPE, &buf, 3);
 	if (rc)
-		pr_err("%s: spmi failure on irq %d\n",
-						 __func__, d->irq);
+		pr_err("spmi failure on irq %d\n", d->irq);
 	return rc;
 }
 
@@ -279,13 +274,16 @@
 		return rc;
 	irq_d->spmi_slave = q_spec.slave;
 	irq_d->spmi_offset = q_spec.per << 8;
-	irq_d->per_d->use_count++;
 	irq_d->chip_d = chip_d;
 
 	if (chip_d->cb.register_priv_data)
 		rc = chip_d->cb.register_priv_data(chip_d->spmi_ctrl, &q_spec,
 							&irq_d->priv_d);
-	return rc;
+		if (rc)
+			return rc;
+
+	irq_d->per_d->use_count++;
+	return 0;
 }
 
 static struct q_irq_data *qpnpint_alloc_irq_data(
@@ -307,8 +305,10 @@
 	per_d = radix_tree_lookup(&chip_d->per_tree, (hwirq & ~0x7));
 	if (!per_d) {
 		per_d = kzalloc(sizeof(struct q_perip_data), GFP_KERNEL);
-		if (!per_d)
+		if (!per_d) {
+			kfree(irq_d);
 			return ERR_PTR(-ENOMEM);
+		}
 		radix_tree_insert(&chip_d->per_tree,
 				  (hwirq & ~0x7), per_d);
 	}
@@ -317,74 +317,6 @@
 	return irq_d;
 }
 
-static int qpnpint_register_int(uint32_t busno, unsigned long hwirq)
-{
-	int irq, rc;
-	struct irq_domain *domain;
-	struct q_irq_data *irq_d;
-
-	pr_debug("busno = %u hwirq = %lu\n", busno, hwirq);
-
-	if (hwirq < 0 || hwirq >= 32768) {
-		pr_err("%s: hwirq %lu out of qpnp interrupt bounds\n",
-							__func__, hwirq);
-		return -EINVAL;
-	}
-
-	if (busno < 0 || busno > QPNPINT_MAX_BUSSES) {
-		pr_err("%s: invalid bus number %d\n", __func__, busno);
-		return -EINVAL;
-	}
-
-	domain = &chip_data[busno].domain;
-	irq = irq_domain_to_irq(domain, hwirq);
-
-	rc = irq_alloc_desc_at(irq, numa_node_id());
-	if (rc < 0) {
-		if (rc != -EEXIST)
-			pr_err("%s: failed to alloc irq at %d with "
-					"rc %d\n", __func__, irq, rc);
-		return rc;
-	}
-	irq_d = qpnpint_alloc_irq_data(&chip_data[busno], hwirq);
-	if (IS_ERR(irq_d)) {
-		pr_err("%s: failed to alloc irq data %d with "
-					"rc %d\n", __func__, irq, rc);
-		rc = PTR_ERR(irq_d);
-		goto register_err_cleanup;
-	}
-	rc = qpnpint_init_irq_data(&chip_data[busno], irq_d, hwirq);
-	if (rc) {
-		pr_err("%s: failed to init irq data %d with "
-					"rc %d\n", __func__, irq, rc);
-		goto register_err_cleanup;
-	}
-
-	irq_domain_register_irq(domain, hwirq);
-
-	irq_set_chip_and_handler(irq,
-			&qpnpint_chip,
-			handle_level_irq);
-	irq_set_chip_data(irq, irq_d);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
-	irq_set_noprobe(irq);
-#endif
-	return 0;
-
-register_err_cleanup:
-	irq_free_desc(irq);
-	if (!IS_ERR(irq_d)) {
-		if (irq_d->per_d->use_count == 1)
-			kfree(irq_d->per_d);
-		else
-			irq_d->per_d->use_count--;
-		kfree(irq_d);
-	}
-	return rc;
-}
-
 static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
 				       struct device_node *controller,
 				       const u32 *intspec, unsigned int intsize,
@@ -392,11 +324,10 @@
 				       unsigned int *out_type)
 {
 	struct qpnp_irq_spec addr;
-	struct q_chip_data *chip_d = d->priv;
 	int ret;
 
-	pr_debug("%s: intspec[0] 0x%x intspec[1] 0x%x intspec[2] 0x%x\n",
-				__func__, intspec[0], intspec[1], intspec[2]);
+	pr_debug("intspec[0] 0x%x intspec[1] 0x%x intspec[2] 0x%x\n",
+				intspec[0], intspec[1], intspec[2]);
 
 	if (d->of_node != controller)
 		return -EINVAL;
@@ -409,41 +340,102 @@
 
 	ret = qpnpint_encode_hwirq(&addr);
 	if (ret < 0) {
-		pr_err("%s: invalid intspec\n", __func__);
+		pr_err("invalid intspec\n");
 		return ret;
 	}
 	*out_hwirq = ret;
 	*out_type = IRQ_TYPE_NONE;
 
-	/**
-	 * Register the interrupt if it's not already registered.
-	 * This implies that mapping a qpnp interrupt allocates
-	 * resources.
-	 */
-	ret = qpnpint_register_int(chip_d->bus_nr, *out_hwirq);
-	if (ret && ret != -EEXIST) {
-		pr_err("%s: Cannot register hwirq %lu\n", __func__, *out_hwirq);
-		return ret;
-	}
-
 	return 0;
 }
 
+static void qpnpint_free_irq_data(struct q_irq_data *irq_d)
+{
+	if (irq_d->per_d->use_count == 1)
+		kfree(irq_d->per_d);
+	else
+		irq_d->per_d->use_count--;
+	kfree(irq_d);
+}
+
+static int qpnpint_irq_domain_map(struct irq_domain *d,
+				  unsigned int virq, irq_hw_number_t hwirq)
+{
+	struct q_chip_data *chip_d = d->host_data;
+	struct q_irq_data *irq_d;
+	int rc;
+
+	pr_debug("hwirq = %lu\n", hwirq);
+
+	if (hwirq < 0 || hwirq >= 32768) {
+		pr_err("hwirq %lu out of bounds\n", hwirq);
+		return -EINVAL;
+	}
+
+	irq_radix_revmap_insert(d, virq, hwirq);
+
+	irq_d = qpnpint_alloc_irq_data(chip_d, hwirq);
+	if (IS_ERR(irq_d)) {
+		pr_err("failed to alloc irq data for hwirq %lu\n", hwirq);
+		return PTR_ERR(irq_d);
+	}
+
+	rc = qpnpint_init_irq_data(chip_d, irq_d, hwirq);
+	if (rc) {
+		pr_err("failed to init irq data for hwirq %lu\n", hwirq);
+		goto map_err;
+	}
+
+	irq_set_chip_and_handler(virq,
+			&qpnpint_chip,
+			handle_level_irq);
+	irq_set_chip_data(virq, irq_d);
+#ifdef CONFIG_ARM
+	set_irq_flags(virq, IRQF_VALID);
+#else
+	irq_set_noprobe(virq);
+#endif
+	return 0;
+
+map_err:
+	qpnpint_free_irq_data(irq_d);
+	return rc;
+}
+
+void qpnpint_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
+{
+	struct q_irq_data *irq_d = irq_get_chip_data(virq);
+
+	if (WARN_ON(!irq_d))
+		return;
+
+	qpnpint_free_irq_data(irq_d);
+}
+
 const struct irq_domain_ops qpnpint_irq_domain_ops = {
-	.dt_translate = qpnpint_irq_domain_dt_translate,
+	.map = qpnpint_irq_domain_map,
+	.unmap = qpnpint_irq_domain_unmap,
+	.xlate = qpnpint_irq_domain_dt_translate,
 };
 
-int qpnpint_register_controller(unsigned int busno,
+int qpnpint_register_controller(struct device_node *node,
+				struct spmi_controller *ctrl,
 				struct qpnp_local_int *li_cb)
 {
-	if (busno >= QPNPINT_MAX_BUSSES)
-		return -EINVAL;
-	chip_data[busno].cb = *li_cb;
-	chip_data[busno].spmi_ctrl = spmi_busnum_to_ctrl(busno);
-	if (!chip_data[busno].spmi_ctrl)
-		return -ENOENT;
+	struct q_chip_data *chip_d;
 
-	return 0;
+	if (!node || !ctrl || ctrl->nr >= QPNPINT_MAX_BUSSES)
+		return -EINVAL;
+
+	list_for_each_entry(chip_d, &qpnpint_chips, list)
+		if (node == chip_d->domain->of_node) {
+			chip_d->cb = *li_cb;
+			chip_d->spmi_ctrl = ctrl;
+			chip_lookup[ctrl->nr] = chip_d;
+			return 0;
+		}
+
+	return -ENOENT;
 }
 EXPORT_SYMBOL(qpnpint_register_controller);
 
@@ -457,21 +449,18 @@
 	pr_debug("spec slave = %u per = %u irq = %u\n",
 					spec->slave, spec->per, spec->irq);
 
-	if (!spec || !spmi_ctrl)
-		return -EINVAL;
-
 	busno = spmi_ctrl->nr;
-	if (busno >= QPNPINT_MAX_BUSSES)
+	if (!spec || !spmi_ctrl || busno >= QPNPINT_MAX_BUSSES)
 		return -EINVAL;
 
 	hwirq = qpnpint_encode_hwirq(spec);
 	if (hwirq < 0) {
-		pr_err("%s: invalid irq spec passed\n", __func__);
+		pr_err("invalid irq spec passed\n");
 		return -EINVAL;
 	}
 
-	domain = &chip_data[busno].domain;
-	irq = irq_domain_to_irq(domain, hwirq);
+	domain = chip_lookup[busno]->domain;
+	irq = irq_radix_revmap_lookup(domain, hwirq);
 
 	generic_handle_irq(irq);
 
@@ -479,31 +468,24 @@
 }
 EXPORT_SYMBOL(qpnpint_handle_irq);
 
-/**
- * This assumes that there's a relationship between the order of the interrupt
- * controllers specified to of_irq_match() is the SPMI device topology. If
- * this ever turns out to be a bad assumption, then of_irq_init_cb_t should
- * be modified to pass a parameter to this function.
- */
-static int qpnpint_cnt __initdata;
-
 int __init qpnpint_of_init(struct device_node *node, struct device_node *parent)
 {
-	struct q_chip_data *chip_d = &chip_data[qpnpint_cnt];
-	struct irq_domain *domain = &chip_d->domain;
+	struct q_chip_data *chip_d;
+
+	chip_d = kzalloc(sizeof(struct q_chip_data), GFP_KERNEL);
+	if (!chip_d)
+		return -ENOMEM;
+
+	chip_d->domain = irq_domain_add_tree(node,
+					&qpnpint_irq_domain_ops, chip_d);
+	if (!chip_d->domain) {
+		pr_err("Unable to allocate irq_domain\n");
+		kfree(chip_d);
+		return -ENOMEM;
+	}
 
 	INIT_RADIX_TREE(&chip_d->per_tree, GFP_ATOMIC);
-
-	domain->irq_base = irq_domain_find_free_range(0, QPNPINT_NR_IRQS);
-	domain->nr_irq = QPNPINT_NR_IRQS;
-	domain->of_node = of_node_get(node);
-	domain->priv = chip_d;
-	domain->ops = &qpnpint_irq_domain_ops;
-	irq_domain_add(domain);
-
-	pr_info("irq_base = %d\n", domain->irq_base);
-
-	qpnpint_cnt++;
+	list_add(&chip_d->list, &qpnpint_chips);
 
 	return 0;
 }
diff --git a/drivers/spmi/qpnp.c b/drivers/spmi/qpnp.c
deleted file mode 100644
index a164efb..0000000
--- a/drivers/spmi/qpnp.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/* Copyright (c) 2002-3 Patrick Mochel
- * Copyright (c) 2002-3 Open Source Development Labs
- * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Resource handling based on platform.c.
- */
-
-#include <linux/export.h>
-#include <mach/qpnp.h>
-
-/**
- * qpnp_get_resource - get a resource for a device
- * @dev: qpnp device
- * @type: resource type
- * @num: resource index
- */
-struct resource *qpnp_get_resource(struct spmi_device *dev,
-				   unsigned int node_idx, unsigned int type,
-				   unsigned int res_num)
-{
-	int i;
-
-	for (i = 0; i < dev->dev_node[node_idx].num_resources; i++) {
-		struct resource *r = &dev->dev_node[node_idx].resource[i];
-
-		if (type == resource_type(r) && res_num-- == 0)
-			return r;
-	}
-	return NULL;
-}
-EXPORT_SYMBOL_GPL(qpnp_get_resource);
-
-/**
- * qpnp_get_irq - get an IRQ for a device
- * @dev: qpnp device
- * @num: IRQ number index
- */
-int qpnp_get_irq(struct spmi_device *dev, unsigned int node_idx,
-					  unsigned int res_num)
-{
-	struct resource *r = qpnp_get_resource(dev, node_idx,
-						IORESOURCE_IRQ, res_num);
-
-	return r ? r->start : -ENXIO;
-}
-EXPORT_SYMBOL_GPL(qpnp_get_irq);
-
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index f22b900..422e99e 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -664,7 +664,14 @@
 		goto err_add_controller;
 
 	/* Register the interrupt enable/disable functions */
-	qpnpint_register_controller(cell_index, &spmi_pmic_arb_intr_cb);
+	ret = qpnpint_register_controller(pmic_arb->controller.dev.of_node,
+					  &pmic_arb->controller,
+					  &spmi_pmic_arb_intr_cb);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to register controller %d\n",
+					cell_index);
+		goto err_reg_controller;
+	}
 
 	/* Register device(s) from the device tree */
 	of_spmi_register_devices(&pmic_arb->controller);
@@ -674,6 +681,8 @@
 
 	return 0;
 
+err_reg_controller:
+	spmi_del_controller(&pmic_arb->controller);
 err_add_controller:
 	platform_set_drvdata(pdev, NULL);
 	return ret;
diff --git a/drivers/spmi/spmi-resources.c b/drivers/spmi/spmi-resources.c
new file mode 100644
index 0000000..97f15ae
--- /dev/null
+++ b/drivers/spmi/spmi-resources.c
@@ -0,0 +1,151 @@
+/* Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Resource handling based on platform.c.
+ */
+
+#include <linux/export.h>
+#include <linux/spmi.h>
+#include <linux/string.h>
+
+/**
+ * spmi_get_resource - get a resource for a device
+ * @dev: spmi device
+ * @node: device node resource
+ * @type: resource type
+ * @res_num: resource index
+ *
+ * If 'node' is specified as NULL, then the API treats this as a special
+ * case to assume the first devnode. For configurations that do not use
+ * spmi-dev-container, there is only one node to begin with, so NULL should
+ * be passed in this case.
+ *
+ * Returns
+ *  NULL on failure.
+ */
+struct resource *spmi_get_resource(struct spmi_device *dev,
+				   struct spmi_resource *node,
+				   unsigned int type, unsigned int res_num)
+{
+	int i;
+
+	/* if a node is not specified, default to the first node */
+	if (!node)
+		node = &dev->res;
+
+	for (i = 0; i < node->num_resources; i++) {
+		struct resource *r = &node->resource[i];
+
+		if (type == resource_type(r) && res_num-- == 0)
+			return r;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(spmi_get_resource);
+
+#define SPMI_MAX_RES_NAME 256
+
+/**
+ * spmi_get_resource_byname - get a resource for a device given a name
+ * @dev: spmi device handle
+ * @node: device node resource
+ * @type: resource type
+ * @name: resource name to lookup
+ */
+struct resource *spmi_get_resource_byname(struct spmi_device *dev,
+					  struct spmi_resource *node,
+					  unsigned int type,
+					  const char *name)
+{
+	int i;
+
+	/* if a node is not specified, default to the first node */
+	if (!node)
+		node = &dev->res;
+
+	for (i = 0; i < node->num_resources; i++) {
+		struct resource *r = &node->resource[i];
+
+		if (type == resource_type(r) && r->name &&
+				!strncmp(r->name, name, SPMI_MAX_RES_NAME))
+			return r;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(spmi_get_resource_byname);
+
+/**
+ * spmi_get_irq - get an IRQ for a device
+ * @dev: spmi device
+ * @node: device node resource
+ * @res_num: IRQ number index
+ *
+ * Returns
+ *  -ENXIO on failure.
+ */
+int spmi_get_irq(struct spmi_device *dev, struct spmi_resource *node,
+					  unsigned int res_num)
+{
+	struct resource *r = spmi_get_resource(dev, node,
+						IORESOURCE_IRQ, res_num);
+
+	return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(spmi_get_irq);
+
+/**
+ * spmi_get_irq_byname - get an IRQ for a device given a name
+ * @dev: spmi device handle
+ * @node: device node resource
+ * @name: resource name to lookup
+ *
+ * Returns -ENXIO on failure
+ */
+int spmi_get_irq_byname(struct spmi_device *dev,
+			struct spmi_resource *node, const char *name)
+{
+	struct resource *r = spmi_get_resource_byname(dev, node,
+							IORESOURCE_IRQ, name);
+	return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(spmi_get_irq_byname);
+
+/*
+ * spmi_get_container_dev_byname - get a device node resource
+ * @dev: spmi device handle
+ * @label: device name to lookup
+ *
+ * Only useable in spmi-dev-container configurations. Given a name,
+ * find the associated spmi_resource that matches the name.
+ *
+ * Return NULL if the spmi_device is not a dev-container,
+ * or if the lookup fails.
+ */
+struct spmi_resource *spmi_get_dev_container_byname(struct spmi_device *dev,
+						    const char *label)
+{
+	int i;
+
+	if (!label)
+		return NULL;
+
+	for (i = 0; i < dev->num_dev_node; i++) {
+		struct spmi_resource *r = &dev->dev_node[i];
+
+		if (r && r->label && !strncmp(r->label,
+					label, SPMI_MAX_RES_NAME))
+			return r;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(spmi_get_dev_container_byname);
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 0342b97..914df95 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -238,6 +238,7 @@
 	spmidev->dev.platform_data = (void *)info->platform_data;
 	spmidev->num_dev_node = info->num_dev_node;
 	spmidev->dev_node = info->dev_node;
+	spmidev->res = info->res;
 
 	rc = spmi_add_device(spmidev);
 	if (rc < 0) {
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 8390f5d..5d79bd2 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -631,24 +631,32 @@
 {
 	unsigned int physaddr = 0;
 	pgd_t *pgd_ptr = NULL;
+	pud_t *pud_ptr = NULL;
 	pmd_t *pmd_ptr = NULL;
 	pte_t *pte_ptr = NULL, pte;
 
 	spin_lock(&current->mm->page_table_lock);
 	pgd_ptr = pgd_offset(current->mm, virtaddr);
-	if (pgd_none(*pgd) || pgd_bad(*pgd)) {
+	if (pgd_none(*pgd_ptr) || pgd_bad(*pgd_ptr)) {
 		pr_err("Failed to convert virtaddr %x to pgd_ptr\n",
 			virtaddr);
 		goto done;
 	}
 
-	pmd_ptr = pmd_offset(pgd_ptr, virtaddr);
-	if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
-		pr_err("Failed to convert pgd_ptr %p to pmd_ptr\n",
+	pud_ptr = pud_offset(pgd_ptr, virtaddr);
+	if (pud_none(*pud_ptr) || pud_bad(*pud_ptr)) {
+		pr_err("Failed to convert pgd_ptr %p to pud_ptr\n",
 			(void *)pgd_ptr);
 		goto done;
 	}
 
+	pmd_ptr = pmd_offset(pud_ptr, virtaddr);
+	if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
+		pr_err("Failed to convert pud_ptr %p to pmd_ptr\n",
+			(void *)pud_ptr);
+		goto done;
+	}
+
 	pte_ptr = pte_offset_map(pmd_ptr, virtaddr);
 	if (!pte_ptr) {
 		pr_err("Failed to convert pmd_ptr %p to pte_ptr\n",
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index fbe0dd7..d99a02a 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -66,6 +66,7 @@
 config THERMAL_MONITOR
 	bool "Monitor thermal state and limit CPU Frequency"
 	depends on THERMAL_TSENS8960
+	depends on CPU_FREQ_MSM
 	default n
 	help
 	  This enables thermal monitoring capability in the kernel in the
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index e0d8d14..a8d3720 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -14,60 +14,51 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/cpufreq.h>
 #include <linux/mutex.h>
 #include <linux/msm_tsens.h>
 #include <linux/workqueue.h>
 #include <linux/cpu.h>
-
-#define DEF_TEMP_SENSOR      0
-#define DEF_THERMAL_CHECK_MS 1000
-#define DEF_ALLOWED_MAX_HIGH 60
-#define DEF_ALLOWED_MAX_FREQ 918000
+#include <linux/cpufreq.h>
+#include <linux/msm_tsens.h>
+#include <linux/msm_thermal.h>
+#include <mach/cpufreq.h>
 
 static int enabled;
-static int allowed_max_high = DEF_ALLOWED_MAX_HIGH;
-static int allowed_max_low = (DEF_ALLOWED_MAX_HIGH - 10);
-static int allowed_max_freq = DEF_ALLOWED_MAX_FREQ;
-static int check_interval_ms = DEF_THERMAL_CHECK_MS;
-
-module_param(allowed_max_high, int, 0);
-module_param(allowed_max_freq, int, 0);
-module_param(check_interval_ms, int, 0);
-
+static struct msm_thermal_data msm_thermal_info;
+static uint32_t limited_max_freq = MSM_CPUFREQ_NO_LIMIT;
 static struct delayed_work check_temp_work;
 
-static int update_cpu_max_freq(struct cpufreq_policy *cpu_policy,
-			       int cpu, int max_freq)
+static int update_cpu_max_freq(int cpu, uint32_t max_freq)
 {
 	int ret = 0;
 
-	if (!cpu_policy)
-		return -EINVAL;
-
-	cpufreq_verify_within_limits(cpu_policy,
-				cpu_policy->min, max_freq);
-	cpu_policy->user_policy.max = max_freq;
+	ret = msm_cpufreq_set_freq_limits(cpu, MSM_CPUFREQ_NO_LIMIT, max_freq);
+	if (ret)
+		return ret;
 
 	ret = cpufreq_update_policy(cpu);
-	if (!ret)
-		pr_info("msm_thermal: Limiting core%d max frequency to %d\n",
-			cpu, max_freq);
+	if (ret)
+		return ret;
+
+	limited_max_freq = max_freq;
+	if (max_freq != MSM_CPUFREQ_NO_LIMIT)
+		pr_info("msm_thermal: Limiting cpu%d max frequency to %d\n",
+				cpu, max_freq);
+	else
+		pr_info("msm_thermal: Max frequency reset for cpu%d\n", cpu);
 
 	return ret;
 }
 
 static void check_temp(struct work_struct *work)
 {
-	struct cpufreq_policy *cpu_policy = NULL;
 	struct tsens_device tsens_dev;
 	unsigned long temp = 0;
-	unsigned int max_freq = 0;
-	int update_policy = 0;
+	uint32_t max_freq = limited_max_freq;
 	int cpu = 0;
 	int ret = 0;
 
-	tsens_dev.sensor_num = DEF_TEMP_SENSOR;
+	tsens_dev.sensor_num = msm_thermal_info.sensor_id;
 	ret = tsens_get_temp(&tsens_dev, &temp);
 	if (ret) {
 		pr_debug("msm_thermal: Unable to read TSENS sensor %d\n",
@@ -75,61 +66,42 @@
 		goto reschedule;
 	}
 
+	if (temp >= msm_thermal_info.limit_temp)
+		max_freq = msm_thermal_info.limit_freq;
+	else if (temp <
+		msm_thermal_info.limit_temp - msm_thermal_info.temp_hysteresis)
+		max_freq = MSM_CPUFREQ_NO_LIMIT;
+
+	if (max_freq == limited_max_freq)
+		goto reschedule;
+
+	/* Update new limits */
 	for_each_possible_cpu(cpu) {
-		update_policy = 0;
-		cpu_policy = cpufreq_cpu_get(cpu);
-		if (!cpu_policy) {
-			pr_debug("msm_thermal: NULL policy on cpu %d\n", cpu);
-			continue;
-		}
-		if (temp >= allowed_max_high) {
-			if (cpu_policy->max > allowed_max_freq) {
-				update_policy = 1;
-				max_freq = allowed_max_freq;
-			} else {
-				pr_debug("msm_thermal: policy max for cpu %d "
-					 "already < allowed_max_freq\n", cpu);
-			}
-		} else if (temp < allowed_max_low) {
-			if (cpu_policy->max < cpu_policy->cpuinfo.max_freq) {
-				max_freq = cpu_policy->cpuinfo.max_freq;
-				update_policy = 1;
-			} else {
-				pr_debug("msm_thermal: policy max for cpu %d "
-					 "already at max allowed\n", cpu);
-			}
-		}
-
-		if (update_policy)
-			update_cpu_max_freq(cpu_policy, cpu, max_freq);
-
-		cpufreq_cpu_put(cpu_policy);
+		ret = update_cpu_max_freq(cpu, max_freq);
+		if (ret)
+			pr_debug("Unable to limit cpu%d max freq to %d\n",
+					cpu, max_freq);
 	}
 
 reschedule:
 	if (enabled)
 		schedule_delayed_work(&check_temp_work,
-				msecs_to_jiffies(check_interval_ms));
+				msecs_to_jiffies(msm_thermal_info.poll_ms));
 }
 
 static void disable_msm_thermal(void)
 {
 	int cpu = 0;
-	struct cpufreq_policy *cpu_policy = NULL;
 
 	/* make sure check_temp is no longer running */
 	cancel_delayed_work(&check_temp_work);
 	flush_scheduled_work();
 
+	if (limited_max_freq == MSM_CPUFREQ_NO_LIMIT)
+		return;
+
 	for_each_possible_cpu(cpu) {
-		cpu_policy = cpufreq_cpu_get(cpu);
-		if (cpu_policy) {
-			if (cpu_policy->max < cpu_policy->cpuinfo.max_freq)
-				update_cpu_max_freq(cpu_policy, cpu,
-						    cpu_policy->
-						    cpuinfo.max_freq);
-			cpufreq_cpu_put(cpu_policy);
-		}
+		update_cpu_max_freq(cpu, MSM_CPUFREQ_NO_LIMIT);
 	}
 }
 
@@ -156,16 +128,17 @@
 module_param_cb(enabled, &module_ops, &enabled, 0644);
 MODULE_PARM_DESC(enabled, "enforce thermal limit on cpu");
 
-static int __init msm_thermal_init(void)
+int __init msm_thermal_init(struct msm_thermal_data *pdata)
 {
 	int ret = 0;
 
+	BUG_ON(!pdata);
+	BUG_ON(pdata->sensor_id >= TSENS_MAX_SENSORS);
+	memcpy(&msm_thermal_info, pdata, sizeof(struct msm_thermal_data));
+
 	enabled = 1;
 	INIT_DELAYED_WORK(&check_temp_work, check_temp);
-
 	schedule_delayed_work(&check_temp_work, 0);
 
 	return ret;
 }
-fs_initcall(msm_thermal_init);
-
diff --git a/drivers/tty/serial/msm_serial_hs_lite.c b/drivers/tty/serial/msm_serial_hs_lite.c
index 5735534..ce197be 100644
--- a/drivers/tty/serial/msm_serial_hs_lite.c
+++ b/drivers/tty/serial/msm_serial_hs_lite.c
@@ -122,9 +122,15 @@
 	},
 	{}
 };
+
+#ifdef CONFIG_SERIAL_MSM_HSL_CONSOLE
+static int get_console_state(struct uart_port *port);
+#else
+static inline int get_console_state(struct uart_port *port) { return -ENODEV; };
+#endif
+
 static struct dentry *debug_base;
 static inline void wait_for_xmitr(struct uart_port *port);
-static int get_console_state(struct uart_port *port);
 static inline void msm_hsl_write(struct uart_port *port,
 				 unsigned int val, unsigned int off)
 {
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 5dceb41..d97d548 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2808,6 +2808,7 @@
 int usb_remote_wakeup(struct usb_device *udev)
 {
 	int	status = 0;
+	struct usb_hcd *hcd = bus_to_hcd(udev->bus);
 
 	if (udev->state == USB_STATE_SUSPENDED) {
 		dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-");
@@ -2816,7 +2817,11 @@
 			/* Let the drivers do their thing, then... */
 			usb_autosuspend_device(udev);
 		}
+	} else {
+		dev_dbg(&udev->dev, "usb not suspended\n");
+		clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
 	}
+
 	return status;
 }
 
@@ -3152,7 +3157,9 @@
 	 * value.
 	 */
 	for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
-		if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) {
+		if (USE_NEW_SCHEME(retry_counter) &&
+			!(hcd->driver->flags & HCD_USB3) &&
+			!(hcd->driver->flags & HCD_OLD_ENUM)) {
 			struct usb_device_descriptor *buf;
 			int r = 0;
 
@@ -3252,7 +3259,9 @@
 			 *  - read ep0 maxpacket even for high and low speed,
 			 */
 			msleep(10);
-			if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3))
+			if (USE_NEW_SCHEME(retry_counter) &&
+				!(hcd->driver->flags & HCD_USB3) &&
+				!(hcd->driver->flags & HCD_OLD_ENUM))
 				break;
   		}
 
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index d216f17..05f1a60 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -147,13 +147,6 @@
 #define USB_PHY_VDD_DIG_VOL_MIN		1045000 /* uV */
 #define USB_PHY_VDD_DIG_VOL_MAX		1320000 /* uV */
 
-enum usb_vdd_value {
-	VDD_NONE = 0,
-	VDD_MIN,
-	VDD_MAX,
-	VDD_VAL_MAX,
-};
-
 static const int vdd_val[VDD_TYPE_MAX][VDD_VAL_MAX] = {
 		{  /* VDD_CX CORNER Voting */
 			[VDD_NONE]	= RPM_VREG_CORNER_NONE,
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index be8e6aa..b5a7291 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -1438,7 +1438,6 @@
 
 	sscanf(buff, "%d", &enabled);
 	if (enabled && !dev->enabled) {
-		cdev->next_string_id = 0;
 		/*
 		 * Update values in composite driver's copy of
 		 * device descriptor.
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 8cdc2e9..487647e 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -48,7 +48,6 @@
  * - Handle requests which spawns into several TDs
  * - GET_STATUS(device) - always reports 0
  * - Gadget API (majority of optional features)
- * - Suspend & Remote Wakeup
  */
 #include <linux/delay.h>
 #include <linux/device.h>
@@ -170,6 +169,8 @@
 #define CAP_ENDPTCTRL       (hw_bank.lpm ? 0x0ECUL : 0x080UL)
 #define CAP_LAST            (hw_bank.lpm ? 0x12CUL : 0x0C0UL)
 
+#define REMOTE_WAKEUP_DELAY	msecs_to_jiffies(200)
+
 /* maximum number of enpoints: valid only after hw_device_reset() */
 static unsigned hw_ep_max;
 
@@ -1523,6 +1524,13 @@
 	return ret;
 }
 
+static void usb_do_remote_wakeup(struct work_struct *w)
+{
+	struct ci13xxx *udc = _udc;
+
+	ci13xxx_wakeup(&udc->gadget);
+}
+
 static ssize_t usb_remote_wakeup(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
@@ -1652,6 +1660,7 @@
 	unsigned i;
 	int ret = 0;
 	unsigned length = mReq->req.length;
+	struct ci13xxx *udc = _udc;
 
 	trace("%p, %p", mEp, mReq);
 
@@ -1728,6 +1737,18 @@
 		mReq->ptr->page[i] =
 			(mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;
 
+	/* Remote Wakeup */
+	if (udc->suspended) {
+		if (!udc->remote_wakeup) {
+			mReq->req.status = -EAGAIN;
+			dev_dbg(mEp->device, "%s: queue failed (suspend) ept #%d\n",
+				__func__, mEp->num);
+			return -EAGAIN;
+		}
+		usb_phy_set_suspend(udc->transceiver, 0);
+		schedule_delayed_work(&udc->rw_work, REMOTE_WAKEUP_DELAY);
+	}
+
 	if (!list_empty(&mEp->qh.queue)) {
 		struct ci13xxx_req *mReqPrev;
 		int n = hw_ep_bit(mEp->num, mEp->dir);
@@ -1960,6 +1981,8 @@
 	gadget->host_request = 0;
 	gadget->otg_srp_reqd = 0;
 
+	cancel_delayed_work_sync(&udc->rw_work);
+
 	/* flush all endpoints */
 	gadget_for_each_ep(ep, gadget) {
 		usb_ep_fifo_flush(ep);
@@ -3341,6 +3364,8 @@
 		}
 	}
 
+	INIT_DELAYED_WORK(&udc->rw_work, usb_do_remote_wakeup);
+
 	retval = hw_device_init(regs);
 	if (retval < 0)
 		goto put_transceiver;
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index 4376804..a189b45 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -149,6 +149,7 @@
 	u8                         configured;  /* is device configured */
 	u8                         test_mode;  /* the selected test mode */
 
+	struct delayed_work        rw_work;    /* remote wakeup delayed work */
 	struct usb_gadget_driver  *driver;     /* 3rd party gadget driver */
 	struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
 	int                        vbus_active; /* is VBUS active */
diff --git a/drivers/usb/gadget/msm72k_udc.c b/drivers/usb/gadget/msm72k_udc.c
index a025d95..3e40552 100644
--- a/drivers/usb/gadget/msm72k_udc.c
+++ b/drivers/usb/gadget/msm72k_udc.c
@@ -119,6 +119,7 @@
 	unsigned long dTD_update_fail_count;
 	unsigned long false_prime_fail_count;
 	unsigned actual_prime_fail_count;
+	unsigned long dTD_workaround_fail_count;
 
 	unsigned wedged:1;
 	/* pointers to DMA transfer list area */
@@ -199,6 +200,7 @@
 	unsigned phy_fail_count;
 	unsigned prime_fail_count;
 	unsigned long dTD_update_fail_count;
+	unsigned long dTD_workaround_fail_count;
 
 	struct usb_gadget		gadget;
 	struct usb_gadget_driver	*driver;
@@ -1110,6 +1112,8 @@
 	struct msm_request *req;
 	unsigned long flags;
 	int req_dequeue = 1;
+	int dtd_update_fail_count_chk = 10;
+	int check_bit = 0;
 	unsigned info;
 
 	/*
@@ -1136,12 +1140,22 @@
 		/* if the transaction is still in-flight, stop here */
 		if (info & INFO_ACTIVE) {
 			if (req_dequeue) {
-				req_dequeue = 0;
 				ui->dTD_update_fail_count++;
 				ept->dTD_update_fail_count++;
-				udelay(10);
+				udelay(1);
+				if (!dtd_update_fail_count_chk--) {
+					req_dequeue = 0;
+					check_bit = 1;
+				}
 				goto dequeue;
 			} else {
+				if (check_bit) {
+					pr_debug("%s: Delay Workaround Failed\n",
+						 __func__);
+					check_bit = 0;
+					ui->dTD_workaround_fail_count++;
+					ept->dTD_workaround_fail_count++;
+				}
 				break;
 			}
 		}
@@ -1965,11 +1979,14 @@
 			continue;
 
 		i += scnprintf(buf + i, PAGE_SIZE - i,
-			"ept%d %s false_prime_count=%lu prime_fail_count=%d dtd_fail_count=%lu\n",
+			"ept%d %s false_prime_count=%lu prime_fail_count=%d "
+					 "dtd_fail_count=%lu "
+					 "dTD_workaround_fail_count=%lu\n",
 			ept->num, (ept->flags & EPT_FLAG_IN) ? "in " : "out",
 			ept->false_prime_fail_count,
 			ept->actual_prime_fail_count,
-			ept->dTD_update_fail_count);
+			ept->dTD_update_fail_count,
+			ept->dTD_workaround_fail_count);
 	}
 
 	i += scnprintf(buf + i, PAGE_SIZE - i,
@@ -1979,6 +1996,10 @@
 	i += scnprintf(buf + i, PAGE_SIZE - i,
 			   "prime_fail count: %d\n", ui->prime_fail_count);
 
+	i += scnprintf(buf + i, PAGE_SIZE - i,
+			   "dtd_workaround_fail count: %lu\n",
+			   ui->dTD_workaround_fail_count);
+
 	spin_unlock_irqrestore(&ui->lock, flags);
 
 	return simple_read_from_buffer(ubuf, count, ppos, buf, i);
diff --git a/drivers/usb/gadget/u_sdio.c b/drivers/usb/gadget/u_sdio.c
index 8c4b4c7..5e9b0ec 100644
--- a/drivers/usb/gadget/u_sdio.c
+++ b/drivers/usb/gadget/u_sdio.c
@@ -1140,18 +1140,6 @@
 			goto free_sdio_ports;
 		}
 
-#ifdef DEBUG
-		/* REVISIT: create one file per port
-		 * or do not create any file
-		 */
-		if (i == 0) {
-			ret = device_create_file(&g->dev, &dev_attr_input);
-			if (ret)
-				pr_err("%s: unable to create device file\n",
-						__func__);
-		}
-#endif
-
 	}
 
 	gsdio_debugfs_init();
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index caf86ca..3098fbe 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -823,7 +823,7 @@
 	u32 __iomem	*status_reg = &ehci->regs->port_status[
 				(wIndex & 0xff) - 1];
 	u32 __iomem	*hostpc_reg = NULL;
-	u32		temp, temp1, status;
+	u32		temp, temp1, status, cmd = 0;
 	unsigned long	flags;
 	int		retval = 0;
 	unsigned	selector;
@@ -1202,7 +1202,32 @@
 				ehci->reset_done [wIndex] = jiffies
 						+ msecs_to_jiffies (50);
 			}
+
+			if (ehci->reset_sof_bug && (temp & PORT_RESET)) {
+				cmd = ehci_readl(ehci, &ehci->regs->command);
+				cmd &= ~CMD_RUN;
+				ehci_writel(ehci, cmd, &ehci->regs->command);
+				if (handshake(ehci, &ehci->regs->status,
+						STS_HALT, STS_HALT, 16 * 125))
+					ehci_info(ehci,
+						"controller halt failed\n");
+			}
 			ehci_writel(ehci, temp, status_reg);
+			if (ehci->reset_sof_bug && (temp & PORT_RESET)
+				&& hcd->driver->enable_ulpi_control) {
+				hcd->driver->enable_ulpi_control(hcd,
+						PORT_RESET);
+				spin_unlock_irqrestore(&ehci->lock, flags);
+				usleep_range(50000, 55000);
+				if (handshake(ehci, status_reg,
+						PORT_RESET, 0, 10 * 1000))
+					ehci_info(ehci,
+						"failed to clear reset\n");
+				spin_lock_irqsave(&ehci->lock, flags);
+				hcd->driver->disable_ulpi_control(hcd);
+				cmd |= CMD_RUN;
+				ehci_writel(ehci, cmd, &ehci->regs->command);
+			}
 			break;
 
 		/* For downstream facing ports (these):  one hub port is put
diff --git a/drivers/usb/host/ehci-msm-hsic.c b/drivers/usb/host/ehci-msm-hsic.c
index a6b7dee..a95198c 100644
--- a/drivers/usb/host/ehci-msm-hsic.c
+++ b/drivers/usb/host/ehci-msm-hsic.c
@@ -328,6 +328,29 @@
 
 }
 
+static int ulpi_read(struct msm_hsic_hcd *mehci, u32 reg)
+{
+	struct usb_hcd *hcd = hsic_to_hcd(mehci);
+	unsigned long timeout;
+
+	/* initiate read operation */
+	writel_relaxed(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg),
+	       USB_ULPI_VIEWPORT);
+
+	/* wait for completion */
+	timeout = jiffies + usecs_to_jiffies(ULPI_IO_TIMEOUT_USEC);
+	while (readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_RUN) {
+		if (time_after(jiffies, timeout)) {
+			dev_err(mehci->dev, "ulpi_read: timeout %08x\n",
+				readl_relaxed(USB_ULPI_VIEWPORT));
+			return -ETIMEDOUT;
+		}
+		udelay(1);
+	}
+
+	return ULPI_DATA_READ(readl_relaxed(USB_ULPI_VIEWPORT));
+}
+
 static int ulpi_write(struct msm_hsic_hcd *mehci, u32 val, u32 reg)
 {
 	struct usb_hcd *hcd = hsic_to_hcd(mehci);
@@ -354,6 +377,37 @@
 	return 0;
 }
 
+#define HSIC_DBG1		0X38
+#define ULPI_MANUAL_ENABLE	BIT(4)
+#define ULPI_LINESTATE_DATA	BIT(5)
+#define ULPI_LINESTATE_STROBE	BIT(6)
+static void ehci_msm_enable_ulpi_control(struct usb_hcd *hcd, u32 linestate)
+{
+	struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
+	int val;
+
+	switch (linestate) {
+	case PORT_RESET:
+		val = ulpi_read(mehci, HSIC_DBG1);
+		val |= ULPI_MANUAL_ENABLE;
+		val &= ~(ULPI_LINESTATE_DATA | ULPI_LINESTATE_STROBE);
+		ulpi_write(mehci, val, HSIC_DBG1);
+		break;
+	default:
+		pr_info("%s: Unknown linestate:%0x\n", __func__, linestate);
+	}
+}
+
+static void ehci_msm_disable_ulpi_control(struct usb_hcd *hcd)
+{
+	struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
+	int val;
+
+	val = ulpi_read(mehci, HSIC_DBG1);
+	val &= ~ULPI_MANUAL_ENABLE;
+	ulpi_write(mehci, val, HSIC_DBG1);
+}
+
 static int msm_hsic_config_gpios(struct msm_hsic_hcd *mehci, int gpio_en)
 {
 	int rc = 0;
@@ -406,50 +460,28 @@
 	return rc;
 }
 
-static int msm_hsic_phy_clk_reset(struct msm_hsic_hcd *mehci)
+static void msm_hsic_clk_reset(struct msm_hsic_hcd *mehci)
 {
 	int ret;
 
-	clk_prepare_enable(mehci->alt_core_clk);
-
 	ret = clk_reset(mehci->core_clk, CLK_RESET_ASSERT);
 	if (ret) {
-		clk_disable_unprepare(mehci->alt_core_clk);
-		dev_err(mehci->dev, "usb phy clk assert failed\n");
-		return ret;
+		dev_err(mehci->dev, "hsic clk assert failed:%d\n", ret);
+		return;
 	}
-	usleep_range(10000, 12000);
-	clk_disable_unprepare(mehci->alt_core_clk);
+	clk_disable(mehci->core_clk);
 
 	ret = clk_reset(mehci->core_clk, CLK_RESET_DEASSERT);
 	if (ret)
-		dev_err(mehci->dev, "usb phy clk deassert failed\n");
+		dev_err(mehci->dev, "hsic clk deassert failed:%d\n", ret);
 
-	return ret;
+	usleep_range(10000, 12000);
+
+	clk_enable(mehci->core_clk);
 }
 
-static int msm_hsic_phy_reset(struct msm_hsic_hcd *mehci)
-{
-	struct usb_hcd *hcd = hsic_to_hcd(mehci);
-	u32 val;
-	int ret;
-
-	ret = msm_hsic_phy_clk_reset(mehci);
-	if (ret)
-		return ret;
-
-	val = readl_relaxed(USB_PORTSC) & ~PORTSC_PTS_MASK;
-	writel_relaxed(val | PORTSC_PTS_ULPI, USB_PORTSC);
-
-	/* Ensure that RESET operation is completed before turning off clock */
-	mb();
-	dev_dbg(mehci->dev, "phy_reset: success\n");
-
-	return 0;
-}
-
-#define HSIC_GPIO150_PAD_CTL   (MSM_TLMM_BASE+0x20C0)
-#define HSIC_GPIO151_PAD_CTL   (MSM_TLMM_BASE+0x20C4)
+#define HSIC_STROBE_GPIO_PAD_CTL	(MSM_TLMM_BASE+0x20C0)
+#define HSIC_DATA_GPIO_PAD_CTL		(MSM_TLMM_BASE+0x20C4)
 #define HSIC_CAL_PAD_CTL       (MSM_TLMM_BASE+0x20C8)
 #define HSIC_LV_MODE		0x04
 #define HSIC_PAD_CALIBRATION	0xA8
@@ -458,33 +490,15 @@
 static int msm_hsic_reset(struct msm_hsic_hcd *mehci)
 {
 	struct usb_hcd *hcd = hsic_to_hcd(mehci);
-	int cnt = 0;
 	int ret;
 	struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
 
-	ret = msm_hsic_phy_reset(mehci);
-	if (ret) {
-		dev_err(mehci->dev, "phy_reset failed\n");
-		return ret;
-	}
+	msm_hsic_clk_reset(mehci);
 
-	writel_relaxed(USBCMD_RESET, USB_USBCMD);
-	while (cnt < LINK_RESET_TIMEOUT_USEC) {
-		if (!(readl_relaxed(USB_USBCMD) & USBCMD_RESET))
-			break;
-		udelay(1);
-		cnt++;
-	}
-	if (cnt >= LINK_RESET_TIMEOUT_USEC)
-		return -ETIMEDOUT;
-
-	/* Reset PORTSC and select ULPI phy */
+	/* select ulpi phy */
 	writel_relaxed(0x80000000, USB_PORTSC);
 
-	/* TODO: Need to confirm if HSIC PHY also requires delay after RESET */
-	msleep(100);
-
-	/* HSIC PHY Initialization */
+	mb();
 
 	/* HSIC init sequence when HSIC signals (Strobe/Data) are
 	routed via GPIOs */
@@ -493,6 +507,8 @@
 		/* Enable LV_MODE in HSIC_CAL_PAD_CTL register */
 		writel_relaxed(HSIC_LV_MODE, HSIC_CAL_PAD_CTL);
 
+		mb();
+
 		/*set periodic calibration interval to ~2.048sec in
 		  HSIC_IO_CAL_REG */
 		ulpi_write(mehci, 0xFF, 0x33);
@@ -500,16 +516,18 @@
 		/* Enable periodic IO calibration in HSIC_CFG register */
 		ulpi_write(mehci, HSIC_PAD_CALIBRATION, 0x30);
 
-		/* Configure GPIO 150/151 pins for HSIC functionality mode */
+		/* Configure GPIO pins for HSIC functionality mode */
 		ret = msm_hsic_config_gpios(mehci, 1);
 		if (ret) {
 			dev_err(mehci->dev, " gpio configuarion failed\n");
 			return ret;
 		}
-		/* Set LV_MODE=0x1 and DCC=0x2 in HSIC_GPIO150/151_PAD_CTL
-		   register */
-		writel_relaxed(HSIC_GPIO_PAD_VAL, HSIC_GPIO150_PAD_CTL);
-		writel_relaxed(HSIC_GPIO_PAD_VAL, HSIC_GPIO151_PAD_CTL);
+		/* Set LV_MODE=0x1 and DCC=0x2 in HSIC_GPIO PAD_CTL register */
+		writel_relaxed(HSIC_GPIO_PAD_VAL, HSIC_STROBE_GPIO_PAD_CTL);
+		writel_relaxed(HSIC_GPIO_PAD_VAL, HSIC_DATA_GPIO_PAD_CTL);
+
+		mb();
+
 		/* Enable HSIC mode in HSIC_CFG register */
 		ulpi_write(mehci, 0x01, 0x31);
 	} else {
@@ -824,7 +842,7 @@
 	 * generic hardware linkage
 	 */
 	.irq			= msm_hsic_irq,
-	.flags			= HCD_USB2 | HCD_MEMORY,
+	.flags			= HCD_USB2 | HCD_MEMORY | HCD_OLD_ENUM,
 
 	.reset			= ehci_hsic_reset,
 	.start			= ehci_run,
@@ -861,6 +879,9 @@
 	.bus_resume		= ehci_hsic_bus_resume,
 
 	.log_urb_complete	= dbg_log_event,
+
+	.enable_ulpi_control	= ehci_msm_enable_ulpi_control,
+	.disable_ulpi_control	= ehci_msm_disable_ulpi_control,
 };
 
 static int msm_hsic_init_clocks(struct msm_hsic_hcd *mehci, u32 init)
@@ -1223,6 +1244,7 @@
 	mehci->dev = &pdev->dev;
 
 	mehci->ehci.susp_sof_bug = 1;
+	mehci->ehci.reset_sof_bug = 1;
 
 	mehci->ehci.max_log2_irq_thresh = 6;
 
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 6afb70b..5754170 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -152,6 +152,7 @@
 	unsigned		has_synopsys_hc_bug:1; /* Synopsys HC */
 	unsigned		frame_index_bug:1; /* MosChip (AKA NetMos) */
 	unsigned		susp_sof_bug:1; /*Chip Idea HC*/
+	unsigned		reset_sof_bug:1; /*Chip Idea HC*/
 
 	/* required for usb32 quirk */
 	#define OHCI_CTRL_HCFS          (3 << 6)
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index dedad53..1d9c84f 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -670,6 +670,9 @@
 	if (aca_enabled())
 		return 0;
 
+	if (atomic_read(&motg->in_lpm) == suspend)
+		return 0;
+
 	if (suspend) {
 		switch (phy->state) {
 		case OTG_STATE_A_WAIT_BCON:
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 1f6d915..a749a6d 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -284,6 +284,8 @@
 	.write		     = usb_wwan_write,
 	.write_room	     = usb_wwan_write_room,
 	.chars_in_buffer     = usb_wwan_chars_in_buffer,
+	.throttle            = usb_wwan_throttle,
+	.unthrottle          = usb_wwan_unthrottle,
 	.attach		     = usb_wwan_startup,
 	.disconnect	     = usb_wwan_disconnect,
 	.release	     = qc_release,
diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h
index 9811a82..98b399f 100644
--- a/drivers/usb/serial/usb-wwan.h
+++ b/drivers/usb/serial/usb-wwan.h
@@ -24,6 +24,8 @@
 extern int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
 			  const unsigned char *buf, int count);
 extern int usb_wwan_chars_in_buffer(struct tty_struct *tty);
+extern void usb_wwan_throttle(struct tty_struct *tty);
+extern void usb_wwan_unthrottle(struct tty_struct *tty);
 #ifdef CONFIG_PM
 extern int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message);
 extern int usb_wwan_resume(struct usb_serial *serial);
@@ -33,7 +35,7 @@
 
 #define N_IN_URB 5
 #define N_OUT_URB 5
-#define IN_BUFLEN 65536
+#define IN_BUFLEN 16384
 #define OUT_BUFLEN 65536
 
 struct usb_wwan_intf_private {
@@ -55,6 +57,10 @@
 	int opened;
 	struct usb_anchor submitted;
 	struct usb_anchor delayed;
+	struct list_head in_urb_list;
+	spinlock_t in_lock;
+	ssize_t n_read;
+	struct work_struct in_work;
 
 	/* Settings for the port */
 	int rts_state;		/* Handshaking pins (outputs) */
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 0c58554..bf30c0b 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -279,15 +279,78 @@
 }
 EXPORT_SYMBOL(usb_wwan_write);
 
+static void usb_wwan_in_work(struct work_struct *w)
+{
+	struct usb_wwan_port_private *portdata =
+		container_of(w, struct usb_wwan_port_private, in_work);
+	struct list_head *q = &portdata->in_urb_list;
+	struct urb *urb;
+	unsigned char *data;
+	struct tty_struct *tty;
+	struct usb_serial_port *port;
+	int err;
+	ssize_t len;
+	ssize_t count;
+	unsigned long flags;
+
+	spin_lock_irqsave(&portdata->in_lock, flags);
+	while (!list_empty(q)) {
+		urb = list_first_entry(q, struct urb, urb_list);
+		port = urb->context;
+		if (port->throttle_req || port->throttled)
+			break;
+
+		tty = tty_port_tty_get(&port->port);
+		if (!tty)
+			continue;
+
+		list_del_init(&urb->urb_list);
+
+		spin_unlock_irqrestore(&portdata->in_lock, flags);
+
+		len = urb->actual_length - portdata->n_read;
+		data = urb->transfer_buffer + portdata->n_read;
+		count = tty_insert_flip_string(tty, data, len);
+		tty_flip_buffer_push(tty);
+		tty_kref_put(tty);
+
+		if (count < len) {
+			dbg("%s: len:%d count:%d n_read:%d\n", __func__,
+					len, count, portdata->n_read);
+			portdata->n_read += count;
+			port->throttled = true;
+
+			/* add request back to list */
+			spin_lock_irqsave(&portdata->in_lock, flags);
+			list_add(&urb->urb_list, q);
+			spin_unlock_irqrestore(&portdata->in_lock, flags);
+			return;
+		}
+		portdata->n_read = 0;
+
+		usb_anchor_urb(urb, &portdata->submitted);
+		err = usb_submit_urb(urb, GFP_ATOMIC);
+		if (err) {
+			usb_unanchor_urb(urb);
+			if (err != -EPERM)
+				pr_err("%s: submit read urb failed:%d",
+						__func__, err);
+		}
+
+		usb_mark_last_busy(port->serial->dev);
+		spin_lock_irqsave(&portdata->in_lock, flags);
+	}
+	spin_unlock_irqrestore(&portdata->in_lock, flags);
+}
+
 static void usb_wwan_indat_callback(struct urb *urb)
 {
 	int err;
 	int endpoint;
 	struct usb_wwan_port_private *portdata;
 	struct usb_serial_port *port;
-	struct tty_struct *tty;
-	unsigned char *data = urb->transfer_buffer;
 	int status = urb->status;
+	unsigned long flags;
 
 	dbg("%s: %p", __func__, urb);
 
@@ -295,38 +358,30 @@
 	port = urb->context;
 	portdata = usb_get_serial_port_data(port);
 
-	if (status) {
-		dbg("%s: nonzero status: %d on endpoint %02x.",
-		    __func__, status, endpoint);
-	} else {
-		tty = tty_port_tty_get(&port->port);
-		if (tty) {
-			if (urb->actual_length) {
-				tty_insert_flip_string(tty, data,
-						urb->actual_length);
-				tty_flip_buffer_push(tty);
-			} else
-				dbg("%s: empty read urb received", __func__);
-			tty_kref_put(tty);
-		}
+	usb_mark_last_busy(port->serial->dev);
 
-		/* Resubmit urb so we continue receiving */
-		if (status != -ESHUTDOWN) {
-			usb_anchor_urb(urb, &portdata->submitted);
-			err = usb_submit_urb(urb, GFP_ATOMIC);
-			if (err) {
-				usb_unanchor_urb(urb);
-				if (err != -EPERM) {
-					printk(KERN_ERR "%s: resubmit read urb failed. "
-						"(%d)", __func__, err);
-					/* busy also in error unless we are killed */
-					usb_mark_last_busy(port->serial->dev);
-				}
-			} else {
-				usb_mark_last_busy(port->serial->dev);
-			}
-		}
+	if (!status && urb->actual_length) {
+		spin_lock_irqsave(&portdata->in_lock, flags);
+		list_add_tail(&urb->urb_list, &portdata->in_urb_list);
+		spin_unlock_irqrestore(&portdata->in_lock, flags);
 
+		schedule_work(&portdata->in_work);
+
+		return;
+	}
+
+	dbg("%s: nonzero status: %d on endpoint %02x.",
+		__func__, status, endpoint);
+
+	if (status != -ESHUTDOWN) {
+		usb_anchor_urb(urb, &portdata->submitted);
+		err = usb_submit_urb(urb, GFP_ATOMIC);
+		if (err) {
+			usb_unanchor_urb(urb);
+			if (err != -EPERM)
+				pr_err("%s: submit read urb failed:%d",
+						__func__, err);
+		}
 	}
 }
 
@@ -401,6 +456,31 @@
 }
 EXPORT_SYMBOL(usb_wwan_chars_in_buffer);
 
+void usb_wwan_throttle(struct tty_struct *tty)
+{
+	struct usb_serial_port *port = tty->driver_data;
+
+	port->throttle_req = true;
+
+	dbg("%s:\n", __func__);
+}
+EXPORT_SYMBOL(usb_wwan_throttle);
+
+void usb_wwan_unthrottle(struct tty_struct *tty)
+{
+	struct usb_serial_port *port = tty->driver_data;
+	struct usb_wwan_port_private *portdata;
+
+	portdata = usb_get_serial_port_data(port);
+
+	dbg("%s:\n", __func__);
+	port->throttle_req = false;
+	port->throttled = false;
+
+	schedule_work(&portdata->in_work);
+}
+EXPORT_SYMBOL(usb_wwan_unthrottle);
+
 int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
 	struct usb_wwan_port_private *portdata;
@@ -560,6 +640,9 @@
 		}
 		init_usb_anchor(&portdata->delayed);
 		init_usb_anchor(&portdata->submitted);
+		INIT_WORK(&portdata->in_work, usb_wwan_in_work);
+		INIT_LIST_HEAD(&portdata->in_urb_list);
+		spin_lock_init(&portdata->in_lock);
 
 		for (j = 0; j < N_IN_URB; j++) {
 			buffer = kmalloc(IN_BUFLEN, GFP_KERNEL);
@@ -624,14 +707,25 @@
 	int i, j;
 	struct usb_serial_port *port;
 	struct usb_wwan_port_private *portdata;
-
-	dbg("%s", __func__);
+	struct urb *urb;
+	struct list_head *q;
+	unsigned long flags;
 
 	/* Now free them */
 	for (i = 0; i < serial->num_ports; ++i) {
 		port = serial->port[i];
 		portdata = usb_get_serial_port_data(port);
 
+		cancel_work_sync(&portdata->in_work);
+		/* TBD: do we really need this */
+		spin_lock_irqsave(&portdata->in_lock, flags);
+		q = &portdata->in_urb_list;
+		while (!list_empty(q)) {
+			urb = list_first_entry(q, struct urb, urb_list);
+			list_del_init(&urb->urb_list);
+		}
+		spin_unlock_irqrestore(&portdata->in_lock, flags);
+
 		for (j = 0; j < N_IN_URB; j++) {
 			usb_free_urb(portdata->in_urbs[j]);
 			kfree(portdata->in_buffer[j]);
diff --git a/drivers/video/msm/hdmi_msm.c b/drivers/video/msm/hdmi_msm.c
index 03243ac..2526d76 100644
--- a/drivers/video/msm/hdmi_msm.c
+++ b/drivers/video/msm/hdmi_msm.c
@@ -4187,39 +4187,6 @@
 }
 #endif
 
-static void hdmi_msm_hpd_read_work(struct work_struct *work)
-{
-	uint32 hpd_ctrl;
-
-	clk_prepare_enable(hdmi_msm_state->hdmi_app_clk);
-	hdmi_msm_state->pd->core_power(1, 1);
-	hdmi_msm_state->pd->enable_5v(1);
-	hdmi_msm_set_mode(FALSE);
-	hdmi_msm_init_phy(external_common_state->video_resolution);
-	/* HDMI_USEC_REFTIMER[0x0208] */
-	HDMI_OUTP(0x0208, 0x0001001B);
-	hpd_ctrl = (HDMI_INP(0x0258) & ~0xFFF) | 0xFFF;
-
-	/* Toggle HPD circuit to trigger HPD sense */
-	HDMI_OUTP(0x0258, ~(1 << 28) & hpd_ctrl);
-	HDMI_OUTP(0x0258, (1 << 28) | hpd_ctrl);
-
-	hdmi_msm_set_mode(TRUE);
-	msleep(1000);
-	external_common_state->hpd_state = (HDMI_INP(0x0250) & 0x2) >> 1;
-	if (external_common_state->hpd_state) {
-		hdmi_msm_read_edid();
-		DEV_DBG("%s: sense CONNECTED: send ONLINE\n", __func__);
-		kobject_uevent(external_common_state->uevent_kobj,
-			KOBJ_ONLINE);
-	}
-	hdmi_msm_hpd_off();
-	hdmi_msm_set_mode(FALSE);
-	hdmi_msm_state->pd->core_power(0, 1);
-	hdmi_msm_state->pd->enable_5v(0);
-	clk_disable_unprepare(hdmi_msm_state->hdmi_app_clk);
-}
-
 static void hdmi_msm_hpd_off(void)
 {
 	int rc = 0;
@@ -4583,8 +4550,6 @@
 #endif
 	}
 
-	queue_work(hdmi_work_queue, &hdmi_msm_state->hpd_read_work);
-
 	/* Initialize hdmi node and register with switch driver */
 	if (hdmi_prim_display)
 		external_common_state->sdev.name = "hdmi_as_primary";
@@ -4754,7 +4719,6 @@
 	hdmi_common_init_panel_info(&hdmi_msm_panel_data.panel_info);
 	init_completion(&hdmi_msm_state->ddc_sw_done);
 	INIT_WORK(&hdmi_msm_state->hpd_state_work, hdmi_msm_hpd_state_work);
-	INIT_WORK(&hdmi_msm_state->hpd_read_work, hdmi_msm_hpd_read_work);
 #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
 	init_completion(&hdmi_msm_state->hdcp_success_done);
 	INIT_WORK(&hdmi_msm_state->hdcp_reauth_work, hdmi_msm_hdcp_reauth_work);
diff --git a/drivers/video/msm/hdmi_msm.h b/drivers/video/msm/hdmi_msm.h
index 5195f2c..06ebb06 100644
--- a/drivers/video/msm/hdmi_msm.h
+++ b/drivers/video/msm/hdmi_msm.h
@@ -61,7 +61,7 @@
 	boolean hpd_cable_chg_detected;
 	boolean full_auth_done;
 	boolean hpd_during_auth;
-	struct work_struct hpd_state_work, hpd_read_work;
+	struct work_struct hpd_state_work;
 	struct timer_list hpd_state_timer;
 	struct completion ddc_sw_done;
 
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index cad6e02..79bbce4 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -1099,31 +1099,21 @@
 		goto error;
 	}
 
-	if (mgmt->hist == NULL) {
-		if ((mgmt->mdp_is_hist_init == TRUE) &&
-			((!completion_done(&mgmt->mdp_hist_comp)) &&
-			waitqueue_active(&mgmt->mdp_hist_comp.wait)))
-			pr_err("mgmt->hist invalid NULL\n");
+	switch (mgmt->block) {
+	case MDP_BLOCK_DMA_P:
+	case MDP_BLOCK_DMA_S:
+		ret = _mdp_histogram_read_dma_data(mgmt);
+		break;
+	case MDP_BLOCK_VG_1:
+	case MDP_BLOCK_VG_2:
+		ret = _mdp_histogram_read_vg_data(mgmt);
+		break;
+	default:
+		pr_err("%s, invalid MDP block = %d\n", __func__, mgmt->block);
 		ret = -EINVAL;
+		goto error;
 	}
 
-	if (!ret) {
-		switch (mgmt->block) {
-		case MDP_BLOCK_DMA_P:
-		case MDP_BLOCK_DMA_S:
-			ret = _mdp_histogram_read_dma_data(mgmt);
-			break;
-		case MDP_BLOCK_VG_1:
-		case MDP_BLOCK_VG_2:
-			ret = _mdp_histogram_read_vg_data(mgmt);
-			break;
-		default:
-			pr_err("%s, invalid MDP block = %d\n", __func__,
-								mgmt->block);
-			ret = -EINVAL;
-			goto error;
-		}
-	}
 	/*
 	 * if read was triggered by an underrun or failed copying,
 	 * don't wake up readers
@@ -1624,7 +1614,16 @@
 		__mdp_histogram_kickoff(mgmt);
 
 	if (isr & INTR_HIST_DONE) {
-		queue_work(mdp_hist_wq, &mgmt->mdp_histogram_worker);
+		if ((waitqueue_active(&mgmt->mdp_hist_comp.wait))
+			 && (mgmt->hist != NULL)) {
+			if (!queue_work(mdp_hist_wq,
+						&mgmt->mdp_histogram_worker)) {
+				pr_err("%s %d- can't queue hist_read\n",
+							 __func__, mgmt->block);
+			}
+		} else {
+			__mdp_histogram_reset(mgmt);
+		}
 	}
 }
 
diff --git a/drivers/video/msm/mdss/Makefile b/drivers/video/msm/mdss/Makefile
index 2a61f07..492437e 100644
--- a/drivers/video/msm/mdss/Makefile
+++ b/drivers/video/msm/mdss/Makefile
@@ -1,6 +1,10 @@
 mdss-mdp-objs := mdss_mdp.o mdss_mdp_ctl.o mdss_mdp_pipe.o mdss_mdp_util.o
+mdss-mdp-objs += mdss_mdp_pp.o
+mdss-mdp-objs += mdss_mdp_intf_video.o
 mdss-mdp-objs += mdss_mdp_intf_writeback.o
+mdss-mdp-objs += mdss_mdp_rotator.o
 mdss-mdp-objs += mdss_mdp_overlay.o
+mdss-mdp-objs += mdss_mdp_wb.o
 obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp.o
 obj-$(CONFIG_FB_MSM_MDSS) += mdss_fb.o
 obj-$(CONFIG_FB_MSM_MDSS_WRITEBACK) += mdss_wb.o
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index aaf6690..a58c3e6 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -23,13 +23,20 @@
 
 extern unsigned char *mdss_reg_base;
 
+enum mdss_mdp_clk_type {
+	MDSS_CLK_AHB,
+	MDSS_CLK_AXI,
+	MDSS_CLK_MDP_SRC,
+	MDSS_CLK_MDP_CORE,
+	MDSS_CLK_MDP_LUT,
+	MDSS_CLK_MDP_VSYNC,
+	MDSS_MAX_CLK
+};
+
 struct mdss_res_type {
 	u32 rev;
 	u32 mdp_rev;
-	struct clk *mdp_clk;
-	struct clk *mdp_pclk;
-	struct clk *mdp_lut_clk;
-	struct clk *vsync_clk;
+	struct clk *mdp_clk[MDSS_MAX_CLK];
 	struct regulator *fs;
 
 	struct workqueue_struct *clk_ctrl_wq;
@@ -40,6 +47,8 @@
 	u32 irq_ena;
 	u32 irq_buzy;
 
+	u32 mdp_irq_mask;
+
 	u32 clk_ena;
 	u32 suspend;
 	u32 timeout;
@@ -60,4 +69,22 @@
 	u32 *mixer_type_map;
 };
 extern struct mdss_res_type *mdss_res;
+
+enum mdss_hw_index {
+	MDSS_HW_MDP,
+	MDSS_HW_DSI0,
+	MDSS_HW_DSI1,
+	MDSS_HW_HDMI,
+	MDSS_HW_EDP,
+	MDSS_MAX_HW_BLK
+};
+
+struct mdss_hw {
+	u32 hw_ndx;
+	irqreturn_t (*irq_handler)(int irq, void *ptr);
+};
+
+void mdss_enable_irq(struct mdss_hw *hw);
+void mdss_disable_irq(struct mdss_hw *hw);
+void mdss_disable_irq_nosync(struct mdss_hw *hw);
 #endif /* MDSS_H */
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index 0fedb6c..a96bf3a 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -1158,6 +1158,20 @@
 	return ret;
 }
 
+struct fb_info *msm_fb_get_writeback_fb(void)
+{
+	int c = 0;
+	for (c = 0; c < fbi_list_index; ++c) {
+		struct msm_fb_data_type *mfd;
+		mfd = (struct msm_fb_data_type *)fbi_list[c]->par;
+		if (mfd->panel.type == WRITEBACK_PANEL)
+			return fbi_list[c];
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(msm_fb_get_writeback_fb);
+
 int mdss_register_panel(struct mdss_panel_data *pdata)
 {
 	struct platform_device *mdss_fb_dev = NULL;
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index a3f0dbe..ac6c213 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -55,6 +55,7 @@
 
 	int op_enable;
 	u32 fb_imgType;
+	u32 dst_format;
 
 	int hw_refresh;
 
@@ -90,6 +91,7 @@
 	struct ion_client *iclient;
 
 	struct mdss_mdp_ctl *ctl;
+	struct mdss_mdp_wb *wb;
 };
 
 int mdss_fb_get_phys_info(unsigned long *start, unsigned long *len, int fb_num);
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index d1847c3..41e0c18 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -39,14 +39,13 @@
 #include <mach/board.h>
 #include <mach/clk.h>
 #include <mach/hardware.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
 
 #include "mdss.h"
 #include "mdss_fb.h"
 #include "mdss_mdp.h"
 
-/* 1.15 mdp clk factor */
-#define MDP_CLK_FACTOR(rate) (((rate) * 23) / 20)
-
 unsigned char *mdss_reg_base;
 
 struct mdss_res_type *mdss_res;
@@ -75,45 +74,261 @@
 	MDSS_MDP_MIXER_TYPE_WRITEBACK,
 };
 
-irqreturn_t mdss_irq_handler(int mdss_irq, void *ptr)
+#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
+	{ \
+		.src = MSM_BUS_MASTER_MDP_PORT0,	\
+		.dst = MSM_BUS_SLAVE_EBI_CH0,		\
+		.ab = (ab_val),				\
+		.ib = (ib_val),				\
+	}
+
+#define MDP_BUS_VECTOR_ENTRY_NDX(n) \
+		MDP_BUS_VECTOR_ENTRY((n) * 100000000, (n) * 200000000)
+
+static struct msm_bus_vectors mdp_bus_vectors[] = {
+	MDP_BUS_VECTOR_ENTRY_NDX(0),
+	MDP_BUS_VECTOR_ENTRY_NDX(1),
+	MDP_BUS_VECTOR_ENTRY_NDX(2),
+	MDP_BUS_VECTOR_ENTRY_NDX(3),
+	MDP_BUS_VECTOR_ENTRY_NDX(4),
+	MDP_BUS_VECTOR_ENTRY_NDX(5),
+	MDP_BUS_VECTOR_ENTRY_NDX(6),
+	MDP_BUS_VECTOR_ENTRY_NDX(7),
+	MDP_BUS_VECTOR_ENTRY_NDX(8),
+	MDP_BUS_VECTOR_ENTRY_NDX(9),
+	MDP_BUS_VECTOR_ENTRY_NDX(10),
+	MDP_BUS_VECTOR_ENTRY(200000000, 200000000)
+};
+static struct msm_bus_paths mdp_bus_usecases[ARRAY_SIZE(mdp_bus_vectors)];
+static struct msm_bus_scale_pdata mdp_bus_scale_table = {
+	.usecase = mdp_bus_usecases,
+	.num_usecases = ARRAY_SIZE(mdp_bus_usecases),
+	.name = "mdss_mdp",
+};
+
+struct mdss_hw mdss_mdp_hw = {
+	.hw_ndx = MDSS_HW_MDP,
+	.irq_handler = mdss_mdp_isr,
+};
+
+static DEFINE_SPINLOCK(mdss_lock);
+struct mdss_hw *mdss_irq_handlers[MDSS_MAX_HW_BLK];
+
+static inline int mdss_irq_dispatch(u32 hw_ndx, int irq, void *ptr)
+{
+	struct mdss_hw *hw;
+
+	spin_lock(&mdss_lock);
+	hw = mdss_irq_handlers[hw_ndx];
+	spin_unlock(&mdss_lock);
+	if (hw)
+		return hw->irq_handler(irq, ptr);
+
+	return -ENODEV;
+}
+
+static irqreturn_t mdss_irq_handler(int irq, void *ptr)
 {
 	u32 intr = MDSS_MDP_REG_READ(MDSS_REG_HW_INTR_STATUS);
 
 	mdss_res->irq_buzy = true;
 
 	if (intr & MDSS_INTR_MDP)
-		mdss_mdp_isr(mdss_irq, ptr);
+		mdss_irq_dispatch(MDSS_HW_MDP, irq, ptr);
+
+	if (intr & MDSS_INTR_DSI0)
+		mdss_irq_dispatch(MDSS_HW_DSI0, irq, ptr);
+
+	if (intr & MDSS_INTR_DSI1)
+		mdss_irq_dispatch(MDSS_HW_DSI1, irq, ptr);
+
+	if (intr & MDSS_INTR_EDP)
+		mdss_irq_dispatch(MDSS_HW_EDP, irq, ptr);
+
+	if (intr & MDSS_INTR_HDMI)
+		mdss_irq_dispatch(MDSS_HW_HDMI, irq, ptr);
 
 	mdss_res->irq_buzy = false;
 
 	return IRQ_HANDLED;
 }
 
+
+void mdss_enable_irq(struct mdss_hw *hw)
+{
+	unsigned long irq_flags;
+	u32 ndx_bit;
+
+	if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
+		return;
+
+	ndx_bit = BIT(hw->hw_ndx);
+
+	pr_debug("Enable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
+			mdss_res->irq_ena, mdss_res->irq_mask);
+
+	spin_lock_irqsave(&mdss_lock, irq_flags);
+	if (mdss_res->irq_mask & ndx_bit) {
+		pr_debug("MDSS HW ndx=%d is already set, mask=%x\n",
+				hw->hw_ndx, mdss_res->irq_mask);
+	} else {
+		mdss_irq_handlers[hw->hw_ndx] = hw;
+		mdss_res->irq_mask |= ndx_bit;
+		if (!mdss_res->irq_ena) {
+			mdss_res->irq_ena = true;
+			enable_irq(mdss_res->irq);
+		}
+	}
+	spin_unlock_irqrestore(&mdss_lock, irq_flags);
+}
+EXPORT_SYMBOL(mdss_enable_irq);
+
+void mdss_disable_irq(struct mdss_hw *hw)
+{
+	unsigned long irq_flags;
+	u32 ndx_bit;
+
+	if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
+		return;
+
+	ndx_bit = BIT(hw->hw_ndx);
+
+	pr_debug("Disable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
+			mdss_res->irq_ena, mdss_res->irq_mask);
+
+	spin_lock_irqsave(&mdss_lock, irq_flags);
+	if (!(mdss_res->irq_mask & ndx_bit)) {
+		pr_warn("MDSS HW ndx=%d is NOT set, mask=%x\n",
+			hw->hw_ndx, mdss_res->mdp_irq_mask);
+	} else {
+		mdss_irq_handlers[hw->hw_ndx] = NULL;
+		mdss_res->irq_mask &= ~ndx_bit;
+		if (mdss_res->irq_mask == 0) {
+			mdss_res->irq_ena = false;
+			disable_irq(mdss_res->irq);
+		}
+	}
+	spin_unlock_irqrestore(&mdss_lock, irq_flags);
+}
+EXPORT_SYMBOL(mdss_disable_irq);
+
+void mdss_disable_irq_nosync(struct mdss_hw *hw)
+{
+	u32 ndx_bit;
+
+	if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
+		return;
+
+	ndx_bit = BIT(hw->hw_ndx);
+
+	pr_debug("Disable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
+			mdss_res->irq_ena, mdss_res->irq_mask);
+
+	spin_lock(&mdss_lock);
+	if (!(mdss_res->irq_mask & ndx_bit)) {
+		pr_warn("MDSS HW ndx=%d is NOT set, mask=%x\n",
+			hw->hw_ndx, mdss_res->mdp_irq_mask);
+	} else {
+		mdss_irq_handlers[hw->hw_ndx] = NULL;
+		mdss_res->irq_mask &= ~ndx_bit;
+		if (mdss_res->irq_mask == 0) {
+			mdss_res->irq_ena = false;
+			disable_irq_nosync(mdss_res->irq);
+		}
+	}
+	spin_unlock(&mdss_lock);
+}
+EXPORT_SYMBOL(mdss_disable_irq_nosync);
+
+static int mdss_mdp_bus_scale_register(void)
+{
+	if (!mdss_res->bus_hdl) {
+		struct msm_bus_scale_pdata *bus_pdata = &mdp_bus_scale_table;
+		int i;
+
+		for (i = 0; i < bus_pdata->num_usecases; i++) {
+			mdp_bus_usecases[i].num_paths = 1;
+			mdp_bus_usecases[i].vectors = &mdp_bus_vectors[i];
+		}
+
+		mdss_res->bus_hdl = msm_bus_scale_register_client(bus_pdata);
+		if (!mdss_res->bus_hdl) {
+			pr_err("not able to get bus scale\n");
+			return -ENOMEM;
+		}
+
+		pr_debug("register bus_hdl=%x\n", mdss_res->bus_hdl);
+	}
+	return 0;
+}
+
+static void mdss_mdp_bus_scale_unregister(void)
+{
+	pr_debug("unregister bus_hdl=%x\n", mdss_res->bus_hdl);
+
+	if (mdss_res->bus_hdl)
+		msm_bus_scale_unregister_client(mdss_res->bus_hdl);
+}
+
+int mdss_mdp_bus_scale_set_min_quota(u32 quota)
+{
+	struct msm_bus_scale_pdata *bus_pdata = &mdp_bus_scale_table;
+	struct msm_bus_vectors *vect = NULL;
+	int lvl;
+
+	if (mdss_res->bus_hdl < 1) {
+		pr_err("invalid bus handle %d\n", mdss_res->bus_hdl);
+		return -EINVAL;
+	}
+
+	for (lvl = 0; lvl < bus_pdata->num_usecases; lvl++) {
+		if (bus_pdata->usecase[lvl].num_paths) {
+			vect = &bus_pdata->usecase[lvl].vectors[0];
+			if (vect->ab >= quota) {
+				pr_debug("lvl=%d quota=%u ab=%u\n", lvl, quota,
+						vect->ab);
+				break;
+			}
+		}
+	}
+
+	if (lvl == bus_pdata->num_usecases) {
+		pr_warn("cannot match quota=%u try with max level\n", quota);
+		lvl--;
+	}
+
+	return msm_bus_scale_client_update_request(mdss_res->bus_hdl, lvl);
+}
+
+static inline u32 mdss_mdp_irq_mask(u32 intr_type, u32 intf_num)
+{
+	if (intr_type == MDSS_MDP_IRQ_INTF_UNDER_RUN ||
+	    intr_type == MDSS_MDP_IRQ_INTF_VSYNC)
+		intf_num = (intf_num - MDSS_MDP_INTF0) * 2;
+	return 1 << (intr_type + intf_num);
+}
+
 int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num)
 {
 	u32 irq;
 	unsigned long irq_flags;
 	int ret = 0;
 
-	if (intr_type == MDSS_MDP_IRQ_INTF_UNDER_RUN ||
-	    intr_type == MDSS_MDP_IRQ_INTF_VSYNC)
-		intf_num = intf_num << 1;
-
-	irq =  BIT(intr_type + intf_num);
+	irq = mdss_mdp_irq_mask(intr_type, intf_num);
 
 	spin_lock_irqsave(&mdp_lock, irq_flags);
-	if (mdss_res->irq_mask & irq) {
-		pr_warn("MDSS IRQ-0x%x is already set, mask=%x irq=%d\n",
-			irq, mdss_res->irq_mask, mdss_res->irq_ena);
+	if (mdss_res->mdp_irq_mask & irq) {
+		pr_warn("MDSS MDP IRQ-0x%x is already set, mask=%x\n",
+				irq, mdss_res->mdp_irq_mask);
 		ret = -EBUSY;
 	} else {
-		mdss_res->irq_mask |= irq;
+		pr_debug("MDP IRQ mask old=%x new=%x\n",
+				mdss_res->mdp_irq_mask, irq);
+		mdss_res->mdp_irq_mask |= irq;
 		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_CLEAR, irq);
-		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN, mdss_res->irq_mask);
-		if (!mdss_res->irq_ena) {
-			mdss_res->irq_ena = true;
-			enable_irq(mdss_res->irq);
-		}
+		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN,
+				mdss_res->mdp_irq_mask);
+		mdss_enable_irq(&mdss_mdp_hw);
 	}
 	spin_unlock_irqrestore(&mdp_lock, irq_flags);
 
@@ -125,24 +340,17 @@
 	u32 irq;
 	unsigned long irq_flags;
 
-
-	if (intr_type == MDSS_MDP_IRQ_INTF_UNDER_RUN ||
-	    intr_type == MDSS_MDP_IRQ_INTF_VSYNC)
-		intf_num = intf_num << 1;
-
-	irq = BIT(intr_type + intf_num);
+	irq = mdss_mdp_irq_mask(intr_type, intf_num);
 
 	spin_lock_irqsave(&mdp_lock, irq_flags);
-	if (!(mdss_res->irq_mask & irq)) {
-		pr_warn("MDSS IRQ-%x is NOT set, mask=%x irq=%d\n",
-			irq, mdss_res->irq_mask, mdss_res->irq_ena);
+	if (!(mdss_res->mdp_irq_mask & irq)) {
+		pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
+				irq, mdss_res->mdp_irq_mask);
 	} else {
-		mdss_res->irq_mask &= ~irq;
-		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN, mdss_res->irq_mask);
-		if (!mdss_res->irq_mask) {
-			mdss_res->irq_ena = false;
-			disable_irq(mdss_res->irq);
-		}
+		mdss_res->mdp_irq_mask &= ~irq;
+		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN,
+				mdss_res->mdp_irq_mask);
+		mdss_disable_irq(&mdss_mdp_hw);
 	}
 	spin_unlock_irqrestore(&mdp_lock, irq_flags);
 }
@@ -151,34 +359,114 @@
 {
 	u32 irq;
 
-	if (intr_type == MDSS_MDP_IRQ_INTF_UNDER_RUN ||
-	    intr_type == MDSS_MDP_IRQ_INTF_VSYNC)
-		intf_num = intf_num << 1;
-
-	irq = BIT(intr_type + intf_num);
+	irq = mdss_mdp_irq_mask(intr_type, intf_num);
 
 	spin_lock(&mdp_lock);
-	if (!(mdss_res->irq_mask & irq)) {
-		pr_warn("MDSS IRQ-%x is NOT set, mask=%x irq=%d\n",
-			irq, mdss_res->irq_mask, mdss_res->irq_ena);
+	if (!(mdss_res->mdp_irq_mask & irq)) {
+		pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
+				irq, mdss_res->mdp_irq_mask);
 	} else {
-		mdss_res->irq_mask &= ~irq;
-		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN, mdss_res->irq_mask);
-		if (!mdss_res->irq_mask) {
-			mdss_res->irq_ena = false;
-			disable_irq_nosync(mdss_res->irq);
-		}
+		mdss_res->mdp_irq_mask &= ~irq;
+		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN,
+				mdss_res->mdp_irq_mask);
+		mdss_disable_irq_nosync(&mdss_mdp_hw);
 	}
 	spin_unlock(&mdp_lock);
 }
 
+static inline struct clk *mdss_mdp_get_clk(u32 clk_idx)
+{
+	if (clk_idx < MDSS_MAX_CLK)
+		return mdss_res->mdp_clk[clk_idx];
+	return NULL;
+}
+
+static int mdss_mdp_clk_update(u32 clk_idx, u32 enable)
+{
+	int ret = -ENODEV;
+	struct clk *clk = mdss_mdp_get_clk(clk_idx);
+
+	if (clk) {
+		pr_debug("clk=%d en=%d\n", clk_idx, enable);
+		if (enable) {
+			ret = clk_prepare_enable(clk);
+		} else {
+			clk_disable_unprepare(clk);
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+int mdss_mdp_vsync_clk_enable(int enable)
+{
+	int ret = 0;
+	pr_debug("clk enable=%d\n", enable);
+	mutex_lock(&mdp_clk_lock);
+	if (mdss_res->vsync_ena != enable) {
+		mdss_res->vsync_ena = enable;
+		ret = mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
+	}
+	mutex_unlock(&mdp_clk_lock);
+	return ret;
+}
+
+void mdss_mdp_set_clk_rate(unsigned long min_clk_rate)
+{
+	unsigned long clk_rate;
+	struct clk *clk = mdss_mdp_get_clk(MDSS_CLK_MDP_SRC);
+	if (clk) {
+		mutex_lock(&mdp_clk_lock);
+		clk_rate = clk_round_rate(clk, min_clk_rate);
+		if (IS_ERR_VALUE(clk_rate)) {
+			pr_err("unable to round rate err=%ld\n", clk_rate);
+		} else if (clk_rate != clk_get_rate(clk)) {
+			if (IS_ERR_VALUE(clk_set_rate(clk, clk_rate)))
+				pr_err("clk_set_rate failed\n");
+			else
+				pr_debug("mdp clk rate=%lu\n", clk_rate);
+		}
+		mutex_unlock(&mdp_clk_lock);
+	}
+}
+
+unsigned long mdss_mdp_get_clk_rate(u32 clk_idx)
+{
+	unsigned long clk_rate = 0;
+	struct clk *clk = mdss_mdp_get_clk(clk_idx);
+	mutex_lock(&mdp_clk_lock);
+	if (clk)
+		clk_rate = clk_get_rate(clk);
+	mutex_unlock(&mdp_clk_lock);
+
+	return clk_rate;
+}
+
 static void mdss_mdp_clk_ctrl_update(int enable)
 {
 	if (mdss_res->clk_ena == enable)
 		return;
 
 	pr_debug("MDP CLKS %s\n", (enable ? "Enable" : "Disable"));
+
+	mutex_lock(&mdp_clk_lock);
 	mdss_res->clk_ena = enable;
+	mb();
+
+	mdss_mdp_clk_update(MDSS_CLK_AHB, enable);
+	mdss_mdp_clk_update(MDSS_CLK_AXI, enable);
+
+	mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, enable);
+	mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, enable);
+	if (mdss_res->vsync_ena)
+		mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
+
+	mutex_unlock(&mdp_clk_lock);
+}
+
+static void mdss_mdp_clk_ctrl_workqueue_handler(struct work_struct *work)
+{
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 }
 
 void mdss_mdp_clk_ctrl(int enable, int isr)
@@ -233,14 +521,30 @@
 	}
 }
 
-static void mdss_mdp_clk_ctrl_workqueue_handler(struct work_struct *work)
+static inline int mdss_mdp_irq_clk_register(struct platform_device *pdev,
+					    char *clk_name, int clk_idx)
 {
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+	struct clk *tmp;
+	if (clk_idx >= MDSS_MAX_CLK) {
+		pr_err("invalid clk index %d\n", clk_idx);
+		return -EINVAL;
+	}
+
+
+	tmp = clk_get(&pdev->dev, clk_name);
+	if (IS_ERR(tmp)) {
+		pr_err("unable to get clk: %s\n", clk_name);
+		return PTR_ERR(tmp);
+	}
+
+	mdss_res->mdp_clk[clk_idx] = tmp;
+	return 0;
 }
 
-static int mdss_mdp_irq_clk_setup(void)
+static int mdss_mdp_irq_clk_setup(struct platform_device *pdev)
 {
 	int ret;
+	int i;
 
 	ret = request_irq(mdss_res->irq, mdss_irq_handler, IRQF_DISABLED,
 			  "MDSS", 0);
@@ -250,15 +554,38 @@
 	}
 	disable_irq(mdss_res->irq);
 
-	mdss_res->fs = regulator_get(NULL, "fs_mdp");
-	if (IS_ERR(mdss_res->fs))
+	mdss_res->fs = regulator_get(NULL, "gdsc_mdss");
+	if (IS_ERR_OR_NULL(mdss_res->fs)) {
 		mdss_res->fs = NULL;
-	else {
-		regulator_enable(mdss_res->fs);
-		mdss_res->fs_ena = true;
+		pr_err("unable to get gdsc_mdss regulator\n");
+		goto error;
 	}
+	regulator_enable(mdss_res->fs);
+
+	if (mdss_mdp_irq_clk_register(pdev, "bus_clk", MDSS_CLK_AXI) ||
+	    mdss_mdp_irq_clk_register(pdev, "iface_clk", MDSS_CLK_AHB) ||
+	    mdss_mdp_irq_clk_register(pdev, "core_clk_src", MDSS_CLK_MDP_SRC) ||
+	    mdss_mdp_irq_clk_register(pdev, "core_clk", MDSS_CLK_MDP_CORE) ||
+	    mdss_mdp_irq_clk_register(pdev, "lut_clk", MDSS_CLK_MDP_LUT) ||
+	    mdss_mdp_irq_clk_register(pdev, "vsync_clk", MDSS_CLK_MDP_VSYNC))
+		goto error;
+
+	mdss_mdp_set_clk_rate(MDP_CLK_DEFAULT_RATE);
+	pr_debug("mdp clk rate=%ld\n", mdss_mdp_get_clk_rate(MDSS_CLK_MDP_SRC));
 
 	return 0;
+error:
+	for (i = 0; i < MDSS_MAX_CLK; i++) {
+		if (mdss_res->mdp_clk[i])
+			clk_put(mdss_res->mdp_clk[i]);
+	}
+	if (mdss_res->fs)
+		regulator_put(mdss_res->fs);
+	if (mdss_res->irq)
+		free_irq(mdss_res->irq, 0);
+
+	return -EINVAL;
+
 }
 
 static struct msm_panel_common_pdata *mdss_mdp_populate_pdata(
@@ -272,11 +599,11 @@
 	return pdata;
 }
 
-static u32 mdss_mdp_res_init(void)
+static u32 mdss_mdp_res_init(struct platform_device *pdev)
 {
 	u32 rc;
 
-	rc = mdss_mdp_irq_clk_setup();
+	rc = mdss_mdp_irq_clk_setup(pdev);
 	if (rc)
 		return rc;
 
@@ -365,12 +692,12 @@
 		goto probe_done;
 	}
 
-	rc = mdss_mdp_res_init();
+	rc = mdss_mdp_res_init(pdev);
 	if (rc) {
 		pr_err("unable to initialize mdss mdp resources\n");
 		goto probe_done;
 	}
-
+	rc = mdss_mdp_bus_scale_register();
 probe_done:
 	if (IS_ERR_VALUE(rc)) {
 		if (mdss_res) {
@@ -448,6 +775,7 @@
 		regulator_put(mdss_res->fs);
 	iounmap(mdss_reg_base);
 	pm_runtime_disable(&pdev->dev);
+	mdss_mdp_bus_scale_unregister();
 	return 0;
 }
 
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index c65d5a7..4489fbb 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -93,9 +93,18 @@
 	MDSS_MDP_BLOCK_MAX
 };
 
+enum mdss_mdp_csc_type {
+	MDSS_MDP_CSC_RGB2RGB,
+	MDSS_MDP_CSC_YUV2RGB,
+	MDSS_MDP_CSC_RGB2YUV,
+	MDSS_MDP_CSC_YUV2YUV,
+	MDSS_MDP_MAX_CSC
+};
+
 struct mdss_mdp_ctl {
 	u32 num;
 	u32 ref_cnt;
+	int power_on;
 
 	u32 intf_num;
 	u32 intf_type;
@@ -109,6 +118,8 @@
 	u16 height;
 	u32 dst_format;
 
+	u32 bus_quota;
+
 	struct msm_fb_data_type *mfd;
 	struct mdss_mdp_mixer *mixer_left;
 	struct mdss_mdp_mixer *mixer_right;
@@ -133,6 +144,8 @@
 	u8 cursor_enabled;
 	u8 rotator_mode;
 
+	u32 bus_quota;
+
 	struct mdss_mdp_ctl *ctl;
 	struct mdss_mdp_pipe *stage_pipe[MDSS_MDP_MAX_STAGE];
 };
@@ -218,6 +231,7 @@
 	struct mdss_mdp_format_params *src_fmt;
 	struct mdss_mdp_plane_sizes src_planes;
 
+	u32 bus_quota;
 	u8 mixer_stage;
 	u8 is_fg;
 	u8 alpha;
@@ -233,6 +247,12 @@
 	unsigned long smp[MAX_PLANES];
 };
 
+struct mdss_mdp_writeback_arg {
+	struct mdss_mdp_data *data;
+	void (*callback_fnc) (void *arg);
+	void *priv_data;
+};
+
 static inline void mdss_mdp_ctl_write(struct mdss_mdp_ctl *ctl,
 				      u32 reg, u32 val)
 {
@@ -253,17 +273,22 @@
 int mdss_mdp_set_intr_callback(u32 intr_type, u32 intf_num,
 			       void (*fnc_ptr)(void *), void *arg);
 
+int mdss_mdp_bus_scale_set_min_quota(u32 quota);
+void mdss_mdp_set_clk_rate(unsigned long min_clk_rate);
 unsigned long mdss_mdp_get_clk_rate(u32 clk_idx);
 int mdss_mdp_vsync_clk_enable(int enable);
 void mdss_mdp_clk_ctrl(int enable, int isr);
 void mdss_mdp_footswitch_ctrl(int on);
 
 int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd);
+int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl);
 int mdss_mdp_writeback_start(struct mdss_mdp_ctl *ctl);
 
 int mdss_mdp_ctl_on(struct msm_fb_data_type *mfd);
 int mdss_mdp_ctl_off(struct msm_fb_data_type *mfd);
 
+struct mdss_mdp_mixer *mdss_mdp_wb_mixer_alloc(int rotator);
+int mdss_mdp_wb_mixer_destroy(struct mdss_mdp_mixer *mixer);
 struct mdss_mdp_mixer *mdss_mdp_mixer_get(struct mdss_mdp_ctl *ctl, int mux);
 struct mdss_mdp_pipe *mdss_mdp_mixer_stage_pipe(struct mdss_mdp_ctl *ctl,
 						int mux, int stage);
@@ -271,6 +296,9 @@
 int mdss_mdp_mixer_pipe_unstage(struct mdss_mdp_pipe *pipe);
 int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg);
 
+int mdss_mdp_csc_setup(u32 block, u32 blk_idx, u32 tbl_idx, u32 csc_type);
+int mdss_mdp_dspp_setup(struct mdss_mdp_ctl *ctl, struct mdss_mdp_mixer *mixer);
+
 struct mdss_mdp_pipe *mdss_mdp_pipe_alloc_pnum(u32 pnum);
 struct mdss_mdp_pipe *mdss_mdp_pipe_alloc_locked(u32 type);
 struct mdss_mdp_pipe *mdss_mdp_pipe_get_locked(u32 ndx);
@@ -291,4 +319,7 @@
 int mdss_mdp_get_img(struct ion_client *iclient, struct msmfb_data *img,
 		     struct mdss_mdp_img_data *data);
 
+int mdss_mdp_wb_kickoff(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_wb_ioctl_handler(struct msm_fb_data_type *mfd, u32 cmd, void *arg);
+
 #endif /* MDSS_MDP_H */
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index d89347e..c80527d 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -20,10 +20,127 @@
 #include "mdss_fb.h"
 #include "mdss_mdp.h"
 
+enum {
+	MDSS_MDP_BUS_UPDATE_SKIP,
+	MDSS_MDP_BUS_UPDATE_EARLY,
+	MDSS_MDP_BUS_UPDATE_LATE,
+};
+
 static DEFINE_MUTEX(mdss_mdp_ctl_lock);
 static struct mdss_mdp_ctl mdss_mdp_ctl_list[MDSS_MDP_MAX_CTL];
 static struct mdss_mdp_mixer mdss_mdp_mixer_list[MDSS_MDP_MAX_LAYERMIXER];
 
+static int mdss_mdp_ctl_update_clk_rate(void)
+{
+	struct mdss_mdp_ctl *ctl;
+	int cnum;
+	unsigned long clk_rate = MDP_CLK_DEFAULT_RATE;
+
+	mutex_lock(&mdss_mdp_ctl_lock);
+	for (cnum = 0; cnum < MDSS_MDP_MAX_CTL; cnum++) {
+		ctl = &mdss_mdp_ctl_list[cnum];
+		if (ctl->power_on && ctl->mfd) {
+			unsigned long tmp;
+			pr_debug("ctl=%d pclk_rate=%u\n", ctl->num,
+					ctl->mfd->panel_info.clk_rate);
+			tmp = (ctl->mfd->panel_info.clk_rate * 23) / 20;
+			if (tmp > clk_rate)
+				clk_rate = tmp;
+		}
+	}
+	mdss_mdp_set_clk_rate(clk_rate);
+	mutex_unlock(&mdss_mdp_ctl_lock);
+
+	return 0;
+}
+
+static int mdss_mdp_ctl_update_bus_scale(void)
+{
+	struct mdss_mdp_ctl *ctl;
+	int cnum;
+	u32 bus_quota = 0;
+
+	mutex_lock(&mdss_mdp_ctl_lock);
+	for (cnum = 0; cnum < MDSS_MDP_MAX_CTL; cnum++) {
+		ctl = &mdss_mdp_ctl_list[cnum];
+		if (ctl->power_on)
+			bus_quota += ctl->bus_quota;
+	}
+	mdss_mdp_bus_scale_set_min_quota(bus_quota);
+	mutex_unlock(&mdss_mdp_ctl_lock);
+
+	return 0;
+}
+
+static void mdss_mdp_bus_update_pipe_quota(struct mdss_mdp_pipe *pipe)
+{
+	u32 quota;
+
+	quota = pipe->img_width * pipe->img_height * 60 * pipe->src_fmt->bpp;
+	quota *= 5 / 4; /* 1.25 factor */
+
+	pr_debug("pipe=%d quota old=%u new=%u\n", pipe->num,
+		   pipe->bus_quota, quota);
+	pipe->bus_quota = quota;
+}
+
+static int mdss_mdp_bus_update_mixer_quota(struct mdss_mdp_mixer *mixer)
+{
+	struct mdss_mdp_pipe *pipe;
+	u32 quota, stage;
+
+	if (!mixer)
+		return 0;
+
+	quota = 0;
+	for (stage = 0; stage < MDSS_MDP_MAX_STAGE; stage++) {
+		pipe = mixer->stage_pipe[stage];
+		if (pipe == NULL)
+			continue;
+
+		quota += pipe->bus_quota;
+	}
+
+	pr_debug("mixer=%d quota old=%u new=%u\n", mixer->num,
+		   mixer->bus_quota, quota);
+
+	if (quota != mixer->bus_quota) {
+		mixer->bus_quota = quota;
+		return 1;
+	}
+
+	return 0;
+}
+
+static int mdss_mdp_bus_update_ctl_quota(struct mdss_mdp_ctl *ctl)
+{
+	int ret = MDSS_MDP_BUS_UPDATE_SKIP;
+
+	if (mdss_mdp_bus_update_mixer_quota(ctl->mixer_left) ||
+			mdss_mdp_bus_update_mixer_quota(ctl->mixer_right)) {
+		u32 quota = 0;
+
+		if (ctl->mixer_left)
+			quota += ctl->mixer_left->bus_quota;
+		if (ctl->mixer_right)
+			quota += ctl->mixer_right->bus_quota;
+
+		pr_debug("ctl=%d quota old=%u new=%u\n",
+			   ctl->num, ctl->bus_quota, quota);
+
+		if (quota != ctl->bus_quota) {
+			if (quota > ctl->bus_quota)
+				ret = MDSS_MDP_BUS_UPDATE_EARLY;
+			else
+				ret = MDSS_MDP_BUS_UPDATE_LATE;
+
+			ctl->bus_quota = quota;
+		}
+	}
+
+	return ret;
+}
+
 static struct mdss_mdp_ctl *mdss_mdp_ctl_alloc(void)
 {
 	struct mdss_mdp_ctl *ctl = NULL;
@@ -110,6 +227,71 @@
 	return 0;
 }
 
+struct mdss_mdp_mixer *mdss_mdp_wb_mixer_alloc(int rotator)
+{
+	struct mdss_mdp_ctl *ctl = NULL;
+	struct mdss_mdp_mixer *mixer = NULL;
+
+	ctl = mdss_mdp_ctl_alloc();
+
+	if (!ctl)
+		return NULL;
+
+	mixer = mdss_mdp_mixer_alloc(MDSS_MDP_MIXER_TYPE_WRITEBACK);
+	if (!mixer)
+		goto error;
+
+	mixer->rotator_mode = rotator;
+
+	switch (mixer->num) {
+	case MDSS_MDP_LAYERMIXER3:
+		ctl->opmode = (rotator ? MDSS_MDP_CTL_OP_ROT0_MODE :
+			       MDSS_MDP_CTL_OP_WB0_MODE);
+		break;
+	case MDSS_MDP_LAYERMIXER4:
+		ctl->opmode = (rotator ? MDSS_MDP_CTL_OP_ROT1_MODE :
+			       MDSS_MDP_CTL_OP_WB1_MODE);
+		break;
+	default:
+		pr_err("invalid layer mixer=%d\n", mixer->num);
+		goto error;
+	}
+
+	ctl->mixer_left = mixer;
+	mixer->ctl = ctl;
+
+	ctl->start_fnc = mdss_mdp_writeback_start;
+
+	if (ctl->start_fnc)
+		ctl->start_fnc(ctl);
+
+	return mixer;
+error:
+	if (mixer)
+		mdss_mdp_mixer_free(mixer);
+	if (ctl)
+		mdss_mdp_ctl_free(ctl);
+
+	return NULL;
+}
+
+int mdss_mdp_wb_mixer_destroy(struct mdss_mdp_mixer *mixer)
+{
+	struct mdss_mdp_ctl *ctl;
+
+	ctl = mixer->ctl;
+
+	pr_debug("destroy ctl=%d mixer=%d\n", ctl->num, mixer->num);
+
+	if (ctl->stop_fnc)
+		ctl->stop_fnc(ctl);
+
+	mdss_mdp_mixer_free(mixer);
+	mdss_mdp_ctl_free(ctl);
+
+	return 0;
+}
+
 static int mdss_mdp_ctl_init(struct msm_fb_data_type *mfd)
 {
 	struct mdss_mdp_ctl *ctl;
@@ -166,6 +348,27 @@
 	}
 
 	switch (mfd->panel_info.type) {
+	case EDP_PANEL:
+		ctl->intf_num = MDSS_MDP_INTF0;
+		ctl->intf_type = MDSS_INTF_EDP;
+		ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
+		ctl->start_fnc = mdss_mdp_video_start;
+		break;
+	case MIPI_VIDEO_PANEL:
+		if (mfd->panel_info.pdest == DISPLAY_1)
+			ctl->intf_num = MDSS_MDP_INTF1;
+		else
+			ctl->intf_num = MDSS_MDP_INTF2;
+		ctl->intf_type = MDSS_INTF_DSI;
+		ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
+		ctl->start_fnc = mdss_mdp_video_start;
+		break;
+	case DTV_PANEL:
+		ctl->intf_num = MDSS_MDP_INTF3;
+		ctl->intf_type = MDSS_INTF_HDMI;
+		ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
+		ctl->start_fnc = mdss_mdp_video_start;
+		break;
 	case WRITEBACK_PANEL:
 		ctl->intf_num = MDSS_MDP_NO_INTF;
 		ctl->opmode = MDSS_MDP_CTL_OP_WFD_MODE;
@@ -237,6 +440,10 @@
 	ctl = mfd->ctl;
 
 	mutex_lock(&ctl->lock);
+
+	ctl->power_on = true;
+	mdss_mdp_ctl_update_clk_rate();
+
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 	if (ctl->start_fnc)
 		ret = ctl->start_fnc(ctl);
@@ -255,7 +462,7 @@
 	mixer->params_changed++;
 
 	temp = MDSS_MDP_REG_READ(MDSS_MDP_REG_DISP_INTF_SEL);
-	temp |= (ctl->intf_type << (ctl->intf_num * 8));
+	temp |= (ctl->intf_type << ((ctl->intf_num - MDSS_MDP_INTF0) * 8));
 	MDSS_MDP_REG_WRITE(MDSS_MDP_REG_DISP_INTF_SEL, temp);
 
 	outsize = (mixer->height << 16) | mixer->width;
@@ -308,6 +515,8 @@
 	pr_debug("ctl_num=%d\n", mfd->ctl->num);
 
 	mutex_lock(&ctl->lock);
+	ctl->power_on = false;
+
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 	if (ctl->stop_fnc)
 		ret = ctl->stop_fnc(ctl);
@@ -321,6 +530,10 @@
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 
 	ctl->play_cnt = 0;
+
+	mdss_mdp_ctl_update_bus_scale();
+	mdss_mdp_ctl_update_clk_rate();
+
 	mutex_unlock(&ctl->lock);
 
 	mdss_mdp_pipe_release_all(mfd);
@@ -328,7 +541,6 @@
 	if (!mfd->ref_cnt)
 		mdss_mdp_ctl_destroy(mfd);
 
-
 	return ret;
 }
 
@@ -493,6 +705,7 @@
 	if (params_changed) {
 		mixer->params_changed++;
 		mixer->stage_pipe[pipe->mixer_stage] = pipe;
+		mdss_mdp_bus_update_pipe_quota(pipe);
 	}
 
 	if (pipe->type == MDSS_MDP_PIPE_TYPE_DMA)
@@ -537,6 +750,9 @@
 {
 	mixer->params_changed = 0;
 
+	if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF)
+		mdss_mdp_dspp_setup(mixer->ctl, mixer);
+
 	/* skip mixer setup for rotator */
 	if (!mixer->rotator_mode)
 		mdss_mdp_mixer_setup(mixer->ctl, mixer);
@@ -548,6 +764,7 @@
 {
 	int mixer1_changed, mixer2_changed;
 	int ret = 0;
+	int bus_update = MDSS_MDP_BUS_UPDATE_SKIP;
 
 	if (!ctl) {
 		pr_err("display function not set\n");
@@ -564,6 +781,8 @@
 
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 	if (mixer1_changed || mixer2_changed) {
+		bus_update = mdss_mdp_bus_update_ctl_quota(ctl);
+
 		if (ctl->prepare_fnc)
 			ret = ctl->prepare_fnc(ctl, arg);
 		if (ret) {
@@ -571,6 +790,9 @@
 			goto done;
 		}
 
+		if (bus_update == MDSS_MDP_BUS_UPDATE_EARLY)
+			mdss_mdp_ctl_update_bus_scale();
+
 		if (mixer1_changed)
 			mdss_mdp_mixer_update(ctl->mixer_left);
 		if (mixer2_changed)
@@ -591,6 +813,9 @@
 
 	ctl->play_cnt++;
 
+	if (bus_update == MDSS_MDP_BUS_UPDATE_LATE)
+		mdss_mdp_ctl_update_bus_scale();
+
 done:
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
new file mode 100644
index 0000000..21ef290
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -0,0 +1,319 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+
+/* intf timing settings */
+struct intf_timing_params {
+	u32 width;
+	u32 height;
+	u32 xres;
+	u32 yres;
+
+	u32 h_back_porch;
+	u32 h_front_porch;
+	u32 v_back_porch;
+	u32 v_front_porch;
+	u32 hsync_pulse_width;
+	u32 vsync_pulse_width;
+
+	u32 border_clr;
+	u32 underflow_clr;
+	u32 hsync_skew;
+};
+
+#define MAX_SESSIONS 3
+struct mdss_mdp_video_ctx {
+	u32 ctl_num;
+	u32 pp_num;
+	u8 ref_cnt;
+
+	u8 timegen_en;
+	struct completion pp_comp;
+	struct completion vsync_comp;
+};
+
+struct mdss_mdp_video_ctx mdss_mdp_video_ctx_list[MAX_SESSIONS];
+
+static int mdss_mdp_video_timegen_setup(struct mdss_mdp_ctl *ctl,
+					struct intf_timing_params *p)
+{
+	u32 hsync_period, vsync_period;
+	u32 hsync_start_x, hsync_end_x, display_v_start, display_v_end;
+	u32 active_h_start, active_h_end, active_v_start, active_v_end;
+	u32 display_hctl, active_hctl, hsync_ctl, polarity_ctl;
+	int off;
+
+	off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
+
+	hsync_period = p->hsync_pulse_width + p->h_back_porch +
+			p->width + p->h_front_porch;
+	vsync_period = p->vsync_pulse_width + p->v_back_porch +
+			p->height + p->v_front_porch;
+
+	display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+			hsync_period) + p->hsync_skew;
+	display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+			p->hsync_skew - 1;
+
+	if (ctl->intf_type == MDSS_INTF_EDP) {
+		display_v_start += p->hsync_pulse_width + p->h_back_porch;
+		display_v_end -= p->h_front_porch;
+	}
+
+	hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+	hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+	if (p->width != p->xres) {
+		active_h_start = hsync_start_x;
+		active_h_end = active_h_start + p->xres - 1;
+	} else {
+		active_h_start = 0;
+		active_h_end = 0;
+	}
+
+	if (p->height != p->yres) {
+		active_v_start = display_v_start;
+		active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+	} else {
+		active_v_start = 0;
+		active_v_end = 0;
+	}
+
+
+	if (active_h_end) {
+		active_hctl = (active_h_end << 16) | active_h_start;
+		active_hctl |= BIT(31);	/* ACTIVE_H_ENABLE */
+	} else {
+		active_hctl = 0;
+	}
+
+	if (active_v_end)
+		active_v_start |= BIT(31); /* ACTIVE_V_ENABLE */
+
+	hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+	display_hctl = (hsync_end_x << 16) | hsync_start_x;
+	polarity_ctl = (0 << 2) |	/* DEN Polarity */
+		       (0 << 1) |      /* VSYNC Polarity */
+		       (0);	       /* HSYNC Polarity */
+
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_HSYNC_CTL, hsync_ctl);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
+			   vsync_period * hsync_period);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_VSYNC_PULSE_WIDTH_F0,
+			   p->vsync_pulse_width * hsync_period);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_DISPLAY_HCTL,
+			   display_hctl);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_DISPLAY_V_START_F0,
+			   display_v_start);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_DISPLAY_V_END_F0,
+			   display_v_end);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_ACTIVE_HCTL, active_hctl);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_ACTIVE_V_START_F0,
+			   active_v_start);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_ACTIVE_V_END_F0,
+			   active_v_end);
+
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_BORDER_COLOR,
+			   p->border_clr);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_UNDERFLOW_COLOR,
+			   p->underflow_clr);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_HSYNC_SKEW,
+			   p->hsync_skew);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_POLARITY_CTL,
+			   polarity_ctl);
+
+	return 0;
+}
+
+static int mdss_mdp_video_stop(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_video_ctx *ctx;
+	int off;
+
+	pr_debug("stop ctl=%d\n", ctl->num);
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->priv_data;
+	if (!ctx) {
+		pr_err("invalid ctx for ctl=%d\n", ctl->num);
+		return -ENODEV;
+	}
+
+	if (ctx->timegen_en) {
+		off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
+		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0);
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+		ctx->timegen_en = false;
+	}
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	return 0;
+}
+
+static void mdss_mdp_video_pp_intr_done(void *arg)
+{
+	struct mdss_mdp_video_ctx *ctx;
+
+	ctx = (struct mdss_mdp_video_ctx *) arg;
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return;
+	}
+
+	pr_debug("intr mixer=%d\n", ctx->pp_num);
+
+	complete(&ctx->pp_comp);
+}
+
+static void mdss_mdp_video_vsync_intr_done(void *arg)
+{
+	struct mdss_mdp_video_ctx *ctx;
+
+	ctx = (struct mdss_mdp_video_ctx *) arg;
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return;
+	}
+
+	pr_debug("intr ctl=%d\n", ctx->ctl_num);
+
+	complete(&ctx->vsync_comp);
+}
+
+static int mdss_mdp_video_prepare(struct mdss_mdp_ctl *ctl, void *arg)
+{
+	struct mdss_mdp_video_ctx *ctx;
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->priv_data;
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return -ENODEV;
+	}
+
+	if (ctx->timegen_en) {
+		u32 intr_type = MDSS_MDP_IRQ_PING_PONG_COMP;
+
+		pr_debug("waiting for ping pong %d done\n", ctx->pp_num);
+		mdss_mdp_set_intr_callback(intr_type, ctx->pp_num,
+					   mdss_mdp_video_pp_intr_done, ctx);
+		mdss_mdp_irq_enable(intr_type, ctx->pp_num);
+
+		wait_for_completion_interruptible(&ctx->pp_comp);
+		mdss_mdp_irq_disable(intr_type, ctx->pp_num);
+	}
+
+	return 0;
+}
+
+static int mdss_mdp_video_display(struct mdss_mdp_ctl *ctl, void *arg)
+{
+	struct mdss_mdp_video_ctx *ctx;
+	u32 intr_type = MDSS_MDP_IRQ_INTF_VSYNC;
+
+	pr_debug("kickoff ctl=%d\n", ctl->num);
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->priv_data;
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return -ENODEV;
+	}
+	mdss_mdp_set_intr_callback(intr_type, ctl->intf_num,
+				   mdss_mdp_video_vsync_intr_done, ctx);
+	mdss_mdp_irq_enable(intr_type, ctl->intf_num);
+
+	if (!ctx->timegen_en) {
+		int off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
+
+		pr_debug("enabling timing gen for intf=%d\n", ctl->intf_num);
+
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 1);
+		ctx->timegen_en = true;
+		wmb();
+	}
+
+	wait_for_completion_interruptible(&ctx->vsync_comp);
+	mdss_mdp_irq_disable(intr_type, ctl->intf_num);
+
+	return 0;
+}
+
+int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl)
+{
+	struct msm_fb_data_type *mfd;
+	struct mdss_panel_info *pinfo;
+	struct mdss_mdp_video_ctx *ctx;
+	struct mdss_mdp_mixer *mixer;
+	struct intf_timing_params itp = {0};
+	struct fb_info *fbi;
+	int i;
+
+	mfd = ctl->mfd;
+	fbi = mfd->fbi;
+	pinfo = &mfd->panel_info;
+	mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+
+	if (!mixer) {
+		pr_err("mixer not setup correctly\n");
+		return -ENODEV;
+	}
+
+	pr_debug("start ctl=%u\n", ctl->num);
+
+	for (i = 0; i < MAX_SESSIONS; i++) {
+		ctx = &mdss_mdp_video_ctx_list[i];
+		if (ctx->ref_cnt == 0) {
+			ctx->ref_cnt++;
+			break;
+		}
+	}
+	if (i == MAX_SESSIONS) {
+		pr_err("too many sessions\n");
+		return -ENOMEM;
+	}
+	ctl->priv_data = ctx;
+	ctx->ctl_num = ctl->num;
+	ctx->pp_num = mixer->num;
+	init_completion(&ctx->pp_comp);
+	init_completion(&ctx->vsync_comp);
+
+	itp.width = pinfo->xres + pinfo->lcdc.xres_pad;
+	itp.height = pinfo->yres + pinfo->lcdc.yres_pad;
+	itp.border_clr = pinfo->lcdc.border_clr;
+	itp.underflow_clr = pinfo->lcdc.underflow_clr;
+	itp.hsync_skew = pinfo->lcdc.hsync_skew;
+
+	itp.xres = fbi->var.xres;
+	itp.yres = fbi->var.yres;
+	itp.h_back_porch = fbi->var.left_margin;
+	itp.h_front_porch = fbi->var.right_margin;
+	itp.v_back_porch = fbi->var.upper_margin;
+	itp.v_front_porch = fbi->var.lower_margin;
+	itp.hsync_pulse_width = fbi->var.hsync_len;
+	itp.vsync_pulse_width = fbi->var.vsync_len;
+
+	if (mdss_mdp_video_timegen_setup(ctl, &itp)) {
+		pr_err("unable to get timing parameters\n");
+		return -EINVAL;
+	}
+
+	ctl->stop_fnc = mdss_mdp_video_stop;
+	ctl->prepare_fnc = mdss_mdp_video_prepare;
+	ctl->display_fnc = mdss_mdp_video_display;
+
+	return 0;
+}
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
index 99d4b4c..af422b7 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
@@ -15,6 +15,7 @@
 
 #include "mdss_fb.h"
 #include "mdss_mdp.h"
+#include "mdss_mdp_rotator.h"
 
 #define ROT_BLK_SIZE	128
 
@@ -38,9 +39,12 @@
 	u16 height;
 	u8 rot90;
 
-	struct completion comp;
+	int initialized;
+
 	struct mdss_mdp_plane_sizes dst_planes;
-	struct mdss_mdp_data wb_data;
+
+	void (*callback_fnc) (void *arg);
+	void *callback_arg;
 };
 
 static struct mdss_mdp_writeback_ctx wb_ctx_list[MDSS_MDP_MAX_WRITEBACK] = {
@@ -71,8 +75,6 @@
 	},
 };
 
-static void *videomemory;
-
 static int mdss_mdp_writeback_addr_setup(struct mdss_mdp_writeback_ctx *ctx,
 					 struct mdss_mdp_data *data)
 {
@@ -100,7 +102,8 @@
 {
 	struct mdss_mdp_format_params *fmt;
 	u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
-	int off, ret;
+	int off;
+	u32 opmode = ctx->opmode;
 
 	pr_debug("wb_num=%d format=%d\n", ctx->wb_num, ctx->format);
 
@@ -110,11 +113,30 @@
 	fmt = mdss_mdp_get_format_params(ctx->format);
 	if (!fmt) {
 		pr_err("wb format=%d not supported\n", ctx->format);
-		return ret;
+		return -EINVAL;
 	}
 
 	chroma_samp = fmt->chroma_sample;
-	if (ctx->rot90) {
+
+	if (ctx->type != MDSS_MDP_WRITEBACK_TYPE_ROTATOR && fmt->is_yuv) {
+		mdss_mdp_csc_setup(MDSS_MDP_BLOCK_WB, ctx->wb_num, 0,
+				   MDSS_MDP_CSC_RGB2YUV);
+		opmode |= (1 << 8) |	/* CSC_EN */
+			  (0 << 9) |	/* SRC_DATA=RGB */
+			  (1 << 10);	/* DST_DATA=YCBCR */
+
+		switch (chroma_samp) {
+		case MDSS_MDP_CHROMA_RGB:
+		case MDSS_MDP_CHROMA_420:
+		case MDSS_MDP_CHROMA_H2V1:
+			opmode |= (chroma_samp << 11);
+			break;
+		case MDSS_MDP_CHROMA_H1V2:
+		default:
+			pr_err("unsupported wb chroma samp=%d\n", chroma_samp);
+			return -EINVAL;
+		}
+	} else if (ctx->rot90) {
 		if (chroma_samp == MDSS_MDP_CHROMA_H2V1)
 			chroma_samp = MDSS_MDP_CHROMA_H1V2;
 		else if (chroma_samp == MDSS_MDP_CHROMA_H1V2)
@@ -146,7 +168,7 @@
 
 	off = MDSS_MDP_REG_WB_OFFSET(ctx->wb_num);
 	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_FORMAT, dst_format);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_OP_MODE, ctx->opmode);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_OP_MODE, opmode);
 	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_PACK_PATTERN, pattern);
 	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_YSTRIDE0, ystride0);
 	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_YSTRIDE1, ystride1);
@@ -155,32 +177,24 @@
 	return 0;
 }
 
-static int mdss_mdp_writeback_wfd_setup(struct mdss_mdp_ctl *ctl,
-					struct mdss_mdp_writeback_ctx *ctx)
+static int mdss_mdp_writeback_prepare_wfd(struct mdss_mdp_ctl *ctl, void *arg)
 {
-	struct msm_fb_data_type *mfd;
-	struct fb_info *fbi;
+	struct mdss_mdp_writeback_ctx *ctx;
 	int ret;
-	u32 plane_size;
 
-	mfd = ctl->mfd;
-	fbi = mfd->fbi;
+	ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+	if (!ctx)
+		return -ENODEV;
 
-	pr_debug("setup ctl=%d\n", ctl->num);
+	if (ctx->initialized) /* already set */
+		return 0;
+
+	pr_debug("wfd setup ctl=%d\n", ctl->num);
 
 	ctx->opmode = 0;
 	ctx->format = ctl->dst_format;
-	ctx->width = fbi->var.xres;
-	ctx->height = fbi->var.yres;
-
-	plane_size = ctx->width * ctx->height * fbi->var.bits_per_pixel / 8;
-
-	videomemory = (void *) fbi->fix.smem_start + fbi->fix.smem_len -
-		      plane_size;
-
-	ctx->wb_data.num_planes = 1;
-	ctx->wb_data.p[0].addr = (u32) videomemory;
-	ctx->wb_data.p[0].len = plane_size;
+	ctx->width = ctl->width;
+	ctx->height = ctl->height;
 
 	ret = mdss_mdp_writeback_format_setup(ctx);
 	if (ret) {
@@ -188,7 +202,50 @@
 		return ret;
 	}
 
-	ctl->flush_bits |=  BIT(16); /* WB */
+	ctx->initialized = true;
+
+	return 0;
+}
+
+static int mdss_mdp_writeback_prepare_rot(struct mdss_mdp_ctl *ctl, void *arg)
+{
+	struct mdss_mdp_writeback_ctx *ctx;
+	struct mdss_mdp_writeback_arg *wb_args;
+	struct mdss_mdp_rotator_session *rot;
+
+	ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+	if (!ctx)
+		return -ENODEV;
+	wb_args = (struct mdss_mdp_writeback_arg *) arg;
+	if (!wb_args)
+		return -ENOENT;
+
+	rot = (struct mdss_mdp_rotator_session *) wb_args->priv_data;
+	if (!rot) {
+		pr_err("unable to retrieve rot session ctl=%d\n", ctl->num);
+		return -ENODEV;
+	}
+	pr_debug("rot setup wb_num=%d\n", ctx->wb_num);
+
+	ctx->opmode = BIT(6); /* ROT EN */
+	if (ROT_BLK_SIZE == 128)
+		ctx->opmode |= BIT(4); /* block size 128 */
+
+	ctx->opmode |= rot->bwc_mode;
+
+	ctx->width = rot->src_rect.w;
+	ctx->height = rot->src_rect.h;
+
+	ctx->format = rot->format;
+
+	ctx->rot90 = !!(rot->rotations & MDP_ROT_90);
+	if (ctx->rot90) {
+		ctx->opmode |= BIT(5); /* ROT 90 */
+		swap(ctx->width, ctx->height);
+	}
+
+	if (mdss_mdp_writeback_format_setup(ctx))
+		return -EINVAL;
 
 	return 0;
 }
@@ -223,13 +280,14 @@
 	mdss_mdp_irq_disable_nosync(ctx->intr_type, ctx->intf_num);
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, true);
 
-	complete_all(&ctx->comp);
+	if (ctx->callback_fnc)
+		ctx->callback_fnc(ctx->callback_arg);
 }
 
 static int mdss_mdp_writeback_display(struct mdss_mdp_ctl *ctl, void *arg)
 {
 	struct mdss_mdp_writeback_ctx *ctx;
-	struct mdss_mdp_data *wb_data;
+	struct mdss_mdp_writeback_arg *wb_args;
 	u32 flush_bits;
 	int ret;
 
@@ -237,18 +295,22 @@
 	if (!ctx)
 		return -ENODEV;
 
-	wb_data = &ctx->wb_data;
+	wb_args = (struct mdss_mdp_writeback_arg *) arg;
+	if (!wb_args)
+		return -ENOENT;
 
-	ret = mdss_mdp_writeback_addr_setup(ctx, wb_data);
+	ret = mdss_mdp_writeback_addr_setup(ctx, wb_args->data);
 	if (ret) {
 		pr_err("writeback data setup error ctl=%d\n", ctl->num);
 		return ret;
 	}
 
+	ctx->callback_fnc = wb_args->callback_fnc;
+	ctx->callback_arg = wb_args->priv_data;
+
 	flush_bits = BIT(16); /* WB */
 	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
 
-	INIT_COMPLETION(ctx->comp);
 	mdss_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
 				   mdss_mdp_writeback_intr_done, ctx);
 	mdss_mdp_irq_enable(ctx->intr_type, ctx->intf_num);
@@ -257,9 +319,6 @@
 	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
 	wmb();
 
-	pr_debug("writeback kickoff wb_num=%d\n", ctx->wb_num);
-	wait_for_completion_interruptible(&ctx->comp);
-
 	return 0;
 }
 
@@ -285,14 +344,12 @@
 	}
 	ctl->priv_data = ctx;
 	ctx->wb_num = ctl->num;	/* wb num should match ctl num */
+	ctx->initialized = false;
 
-	init_completion(&ctx->comp);
-
-	if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_WFD)
-		ret = mdss_mdp_writeback_wfd_setup(ctl, ctx);
-	else /* line mode not supported */
-		return -ENOSYS;
-
+	if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR)
+		ctl->prepare_fnc = mdss_mdp_writeback_prepare_rot;
+	else /* wfd or line mode */
+		ctl->prepare_fnc = mdss_mdp_writeback_prepare_wfd;
 	ctl->stop_fnc = mdss_mdp_writeback_stop;
 	ctl->display_fnc = mdss_mdp_writeback_display;
 
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index bd4a974..43ddb5e 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -21,6 +21,7 @@
 
 #include "mdss_fb.h"
 #include "mdss_mdp.h"
+#include "mdss_mdp_rotator.h"
 
 #define CHECK_BOUNDS(offset, size, max_size) \
 	(((size) > (max_size)) || ((offset) > ((max_size) - (size))))
@@ -85,28 +86,30 @@
 		dst_h = req->dst_rect.h;
 	}
 
-	if ((req->src_rect.w * MAX_UPSCALE_RATIO) < dst_w) {
-		pr_err("too much upscaling Width %d->%d\n",
-		       req->src_rect.w, req->dst_rect.w);
-		return -EINVAL;
-	}
+	if (!(req->flags & MDSS_MDP_ROT_ONLY)) {
+		if ((req->src_rect.w * MAX_UPSCALE_RATIO) < dst_w) {
+			pr_err("too much upscaling Width %d->%d\n",
+			       req->src_rect.w, req->dst_rect.w);
+			return -EINVAL;
+		}
 
-	if ((req->src_rect.h * MAX_UPSCALE_RATIO) < dst_h) {
-		pr_err("too much upscaling. Height %d->%d\n",
-		       req->src_rect.h, req->dst_rect.h);
-		return -EINVAL;
-	}
+		if ((req->src_rect.h * MAX_UPSCALE_RATIO) < dst_h) {
+			pr_err("too much upscaling. Height %d->%d\n",
+			       req->src_rect.h, req->dst_rect.h);
+			return -EINVAL;
+		}
 
-	if (req->src_rect.w > (dst_w * MAX_DOWNSCALE_RATIO)) {
-		pr_err("too much downscaling. Width %d->%d\n",
-		       req->src_rect.w, req->dst_rect.w);
-		return -EINVAL;
-	}
+		if (req->src_rect.w > (dst_w * MAX_DOWNSCALE_RATIO)) {
+			pr_err("too much downscaling. Width %d->%d\n",
+			       req->src_rect.w, req->dst_rect.w);
+			return -EINVAL;
+		}
 
-	if (req->src_rect.h > (dst_h * MAX_DOWNSCALE_RATIO)) {
-		pr_err("too much downscaling. Height %d->%d\n",
-		       req->src_rect.h, req->dst_rect.h);
-		return -EINVAL;
+		if (req->src_rect.h > (dst_h * MAX_DOWNSCALE_RATIO)) {
+			pr_err("too much downscaling. Height %d->%d\n",
+			       req->src_rect.h, req->dst_rect.h);
+			return -EINVAL;
+		}
 	}
 
 	if (fmt->is_yuv) {
@@ -141,6 +144,61 @@
 	return 0;
 }
 
+static int mdss_mdp_overlay_rotator_setup(struct msm_fb_data_type *mfd,
+					  struct mdp_overlay *req)
+{
+	struct mdss_mdp_rotator_session *rot;
+	struct mdss_mdp_format_params *fmt;
+	int ret = 0;
+
+	pr_debug("rot ctl=%u req id=%x\n", mfd->ctl->num, req->id);
+
+	fmt = mdss_mdp_get_format_params(req->src.format);
+	if (!fmt) {
+		pr_err("invalid rot format %d\n", req->src.format);
+		return -EINVAL;
+	}
+
+	ret = mdss_mdp_overlay_req_check(mfd, req, fmt);
+	if (ret)
+		return ret;
+
+	if (req->id == MSMFB_NEW_REQUEST) {
+		rot = mdss_mdp_rotator_session_alloc();
+
+		if (!rot) {
+			pr_err("unable to allocate rotator session\n");
+			return -ENOMEM;
+		}
+	} else if (req->id & MDSS_MDP_ROT_SESSION_MASK) {
+		rot = mdss_mdp_rotator_session_get(req->id);
+
+		if (!rot) {
+			pr_err("rotator session=%x not found\n", req->id);
+			return -ENODEV;
+		}
+	} else {
+		pr_err("invalid rotator session id=%x\n", req->id);
+		return -EINVAL;
+	}
+
+	rot->rotations = req->flags & (MDP_ROT_90 | MDP_FLIP_LR | MDP_FLIP_UD);
+
+	rot->format = fmt->format;
+	rot->img_width = req->src.width;
+	rot->img_height = req->src.height;
+	rot->src_rect.x = req->src_rect.x;
+	rot->src_rect.y = req->src_rect.y;
+	rot->src_rect.w = req->src_rect.w;
+	rot->src_rect.h = req->src_rect.h;
+
+	rot->params_changed++;
+
+	req->id = rot->session_id;
+
+	return ret;
+}
+
 static int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
 				       struct mdp_overlay *req,
 				       struct mdss_mdp_pipe **ppipe)
@@ -256,14 +314,19 @@
 				struct mdp_overlay *req)
 {
 	int ret;
-	struct mdss_mdp_pipe *pipe;
 
-	/* userspace zorder start with stage 0 */
-	req->z_order += MDSS_MDP_STAGE_0;
+	if (req->flags & MDSS_MDP_ROT_ONLY) {
+		ret = mdss_mdp_overlay_rotator_setup(mfd, req);
+	} else {
+		struct mdss_mdp_pipe *pipe;
 
-	ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe);
+		/* userspace zorder start with stage 0 */
+		req->z_order += MDSS_MDP_STAGE_0;
 
-	req->z_order -= MDSS_MDP_STAGE_0;
+		ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe);
+
+		req->z_order -= MDSS_MDP_STAGE_0;
+	}
 
 	return ret;
 }
@@ -280,6 +343,19 @@
 
 	pr_debug("unset ndx=%x\n", ndx);
 
+	if (ndx & MDSS_MDP_ROT_SESSION_MASK) {
+		struct mdss_mdp_rotator_session *rot;
+		rot = mdss_mdp_rotator_session_get(ndx);
+		if (rot) {
+			mdss_mdp_rotator_finish(rot);
+		} else {
+			pr_warn("unknown session id=%x\n", ndx);
+			ret = -ENODEV;
+		}
+
+		return ret;
+	}
+
 	for (i = 0; unset_ndx != ndx && i < MDSS_MDP_MAX_SSPP; i++) {
 		pipe_ndx = BIT(i);
 		if (pipe_ndx & ndx) {
@@ -319,6 +395,28 @@
 	return ret;
 }
 
+static int mdss_mdp_overlay_rotate(struct msmfb_overlay_data *req,
+				   struct mdss_mdp_data *src_data,
+				   struct mdss_mdp_data *dst_data)
+{
+	struct mdss_mdp_rotator_session *rot;
+	int ret;
+
+	rot = mdss_mdp_rotator_session_get(req->id);
+	if (!rot) {
+		pr_err("invalid session id=%x\n", req->id);
+		return -ENODEV;
+	}
+
+	ret = mdss_mdp_rotator_queue(rot, src_data, dst_data);
+	if (ret) {
+		pr_err("rotator queue error session id=%x\n", req->id);
+		return ret;
+	}
+
+	return 0;
+}
+
 static int mdss_mdp_overlay_queue(struct msmfb_overlay_data *req,
 				  struct mdss_mdp_data *src_data)
 {
@@ -364,7 +462,23 @@
 	}
 	src_data.num_planes = 1;
 
-	ret = mdss_mdp_overlay_queue(req, &src_data);
+	if (req->id & MDSS_MDP_ROT_SESSION_MASK) {
+		struct mdss_mdp_data dst_data;
+		memset(&dst_data, 0, sizeof(dst_data));
+
+		mdss_mdp_get_img(mfd->iclient, &req->dst_data, &dst_data.p[0]);
+		if (dst_data.p[0].len == 0) {
+			pr_err("dst data pmem error\n");
+			return -ENOMEM;
+		}
+		dst_data.num_planes = 1;
+
+		ret = mdss_mdp_overlay_rotate(req, &src_data, &dst_data);
+
+		mdss_mdp_put_img(&dst_data.p[0]);
+	} else {
+		ret = mdss_mdp_overlay_queue(req, &src_data);
+	}
 
 	mdss_mdp_put_img(&src_data.p[0]);
 
@@ -679,6 +793,11 @@
 			ret = 0;
 		}
 		break;
+
+	default:
+		if (mfd->panel_info.type == WRITEBACK_PANEL)
+			ret = mdss_mdp_wb_ioctl_handler(mfd, cmd, argp);
+		break;
 	}
 
 	return ret;
@@ -695,7 +814,11 @@
 	mfd->cursor_update = mdss_mdp_hw_cursor_update;
 	mfd->dma_fnc = mdss_mdp_overlay_pan_display;
 	mfd->ioctl_handler = mdss_mdp_overlay_ioctl_handler;
-	mfd->kickoff_fnc = mdss_mdp_overlay_kickoff;
+
+	if (mfd->panel_info.type == WRITEBACK_PANEL)
+		mfd->kickoff_fnc = mdss_mdp_wb_kickoff;
+	else
+		mfd->kickoff_fnc = mdss_mdp_overlay_kickoff;
 
 	return 0;
 }
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index b52cff5..52f4324 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -581,6 +581,11 @@
 			  (1 << 18) |	/* SRC_DATA=YCBCR */
 			  (1 << 17);	/* CSC_1_EN */
 
+	/* only need to program once */
+	if (pipe->play_cnt == 0) {
+		mdss_mdp_csc_setup(MDSS_MDP_BLOCK_SSPP, pipe->num, 1,
+				   MDSS_MDP_CSC_YUV2RGB);
+	}
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_VIG_OP_MODE, opmode);
 
 	return 0;
diff --git a/drivers/video/msm/mdss/mdss_mdp_pp.c b/drivers/video/msm/mdss/mdss_mdp_pp.c
new file mode 100644
index 0000000..db840a8
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_mdp_pp.c
@@ -0,0 +1,175 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include "mdss_mdp.h"
+
+struct mdp_csc_cfg mdp_csc_convert[MDSS_MDP_MAX_CSC] = {
+	[MDSS_MDP_CSC_RGB2RGB] = {
+		0,
+		{
+			0x0200, 0x0000, 0x0000,
+			0x0000, 0x0200, 0x0000,
+			0x0000, 0x0000, 0x0200,
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+	},
+	[MDSS_MDP_CSC_YUV2RGB] = {
+		0,
+		{
+			0x0254, 0x0000, 0x0331,
+			0x0254, 0xff37, 0xfe60,
+			0x0254, 0x0409, 0x0000,
+		},
+		{ 0xfff0, 0xff80, 0xff80,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+	},
+	[MDSS_MDP_CSC_RGB2YUV] = {
+		0,
+		{
+			0x0083, 0x0102, 0x0032,
+			0x1fb5, 0x1f6c, 0x00e1,
+			0x00e1, 0x1f45, 0x1fdc
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0010, 0x0080, 0x0080,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+		{ 0x0010, 0x00eb, 0x0010, 0x00f0, 0x0010, 0x00f0,},
+	},
+	[MDSS_MDP_CSC_YUV2YUV] = {
+		0,
+		{
+			0x0200, 0x0000, 0x0000,
+			0x0000, 0x0200, 0x0000,
+			0x0000, 0x0000, 0x0200,
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+	},
+};
+
+#define CSC_MV_OFF	0x0
+#define CSC_BV_OFF	0x2C
+#define CSC_LV_OFF	0x14
+#define CSC_POST_OFF	0xC
+
+static int mdss_mdp_csc_setup_data(u32 block, u32 blk_idx, u32 tbl_idx,
+				   struct mdp_csc_cfg *data)
+{
+	int i, ret = 0;
+	u32 *off, base, val = 0;
+
+	if (data == NULL) {
+		pr_err("no csc matrix specified\n");
+		return -EINVAL;
+	}
+
+	switch (block) {
+	case MDSS_MDP_BLOCK_SSPP:
+		if (blk_idx < MDSS_MDP_SSPP_RGB0) {
+			base = MDSS_MDP_REG_SSPP_OFFSET(blk_idx);
+			if (tbl_idx == 1)
+				base += MDSS_MDP_REG_VIG_CSC_1_BASE;
+			else
+				base += MDSS_MDP_REG_VIG_CSC_0_BASE;
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	case MDSS_MDP_BLOCK_WB:
+		if (blk_idx < MDSS_MDP_MAX_WRITEBACK) {
+			base = MDSS_MDP_REG_WB_OFFSET(blk_idx) +
+			       MDSS_MDP_REG_WB_CSC_BASE;
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	if (ret != 0) {
+		pr_err("unsupported block id for csc\n");
+		return ret;
+	}
+
+	off = (u32 *) (base + CSC_MV_OFF);
+	for (i = 0; i < 9; i++) {
+		if (i & 0x1) {
+			val |= data->csc_mv[i] << 16;
+			MDSS_MDP_REG_WRITE(off, val);
+			off++;
+		} else {
+			val = data->csc_mv[i];
+		}
+	}
+	MDSS_MDP_REG_WRITE(off, val); /* COEFF_33 */
+
+	off = (u32 *) (base + CSC_BV_OFF);
+	for (i = 0; i < 3; i++) {
+		MDSS_MDP_REG_WRITE(off, data->csc_pre_bv[i]);
+		MDSS_MDP_REG_WRITE((u32 *)(((u32)off) + CSC_POST_OFF),
+				   data->csc_post_bv[i]);
+		off++;
+	}
+
+	off = (u32 *) (base + CSC_LV_OFF);
+	for (i = 0; i < 6; i += 2) {
+		val = (data->csc_pre_lv[i] << 8) | data->csc_pre_lv[i+1];
+		MDSS_MDP_REG_WRITE(off, val);
+
+		val = (data->csc_post_lv[i] << 8) | data->csc_post_lv[i+1];
+		MDSS_MDP_REG_WRITE((u32 *)(((u32)off) + CSC_POST_OFF), val);
+		off++;
+	}
+
+	return ret;
+}
+
+int mdss_mdp_csc_setup(u32 block, u32 blk_idx, u32 tbl_idx, u32 csc_type)
+{
+	struct mdp_csc_cfg *data;
+
+	if (csc_type >= MDSS_MDP_MAX_CSC) {
+		pr_err("invalid csc matrix index %d\n", csc_type);
+		return -ERANGE;
+	}
+
+	pr_debug("csc type=%d blk=%d idx=%d tbl=%d\n", csc_type,
+		 block, blk_idx, tbl_idx);
+
+	data = &mdp_csc_convert[csc_type];
+	return mdss_mdp_csc_setup_data(block, blk_idx, tbl_idx, data);
+}
+
+int mdss_mdp_dspp_setup(struct mdss_mdp_ctl *ctl, struct mdss_mdp_mixer *mixer)
+{
+	int dspp_num;
+
+	if (!ctl || !mixer)
+		return -EINVAL;
+
+	dspp_num = mixer->num;
+
+	ctl->flush_bits |= BIT(13 + dspp_num);	/* DSPP */
+
+	return 0;
+}
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
new file mode 100644
index 0000000..fc3a843
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -0,0 +1,260 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "mdss_mdp.h"
+#include "mdss_mdp_rotator.h"
+
+#define MAX_ROTATOR_SESSIONS 8
+
+static DEFINE_MUTEX(rotator_lock);
+static struct mdss_mdp_rotator_session rotator_session[MAX_ROTATOR_SESSIONS];
+static LIST_HEAD(rotator_queue);
+
+struct mdss_mdp_rotator_session *mdss_mdp_rotator_session_alloc(void)
+{
+	struct mdss_mdp_rotator_session *rot;
+	int i;
+
+	mutex_lock(&rotator_lock);
+	for (i = 0; i < MAX_ROTATOR_SESSIONS; i++) {
+		rot = &rotator_session[i];
+		if (rot->ref_cnt == 0) {
+			rot->ref_cnt++;
+			rot->session_id = i | MDSS_MDP_ROT_SESSION_MASK;
+			mutex_init(&rot->lock);
+			init_completion(&rot->comp);
+			break;
+		}
+	}
+	mutex_unlock(&rotator_lock);
+	if (i == MAX_ROTATOR_SESSIONS) {
+		pr_err("max rotator sessions reached\n");
+		return NULL;
+	}
+
+	return rot;
+}
+
+struct mdss_mdp_rotator_session *mdss_mdp_rotator_session_get(u32 session_id)
+{
+	struct mdss_mdp_rotator_session *rot;
+	u32 ndx;
+
+	ndx = session_id & ~MDSS_MDP_ROT_SESSION_MASK;
+	if (ndx < MAX_ROTATOR_SESSIONS) {
+		rot = &rotator_session[ndx];
+		if (rot->ref_cnt && rot->session_id == session_id)
+			return rot;
+	}
+	return NULL;
+}
+
+static struct mdss_mdp_pipe *mdss_mdp_rotator_pipe_alloc(void)
+{
+	struct mdss_mdp_mixer *mixer;
+	struct mdss_mdp_pipe *pipe = NULL;
+	int pnum;
+
+	mixer = mdss_mdp_wb_mixer_alloc(1);
+	if (!mixer)
+		return NULL;
+
+	switch (mixer->num) {
+	case MDSS_MDP_LAYERMIXER3:
+		pnum = MDSS_MDP_SSPP_DMA0;
+		break;
+	case MDSS_MDP_LAYERMIXER4:
+		pnum = MDSS_MDP_SSPP_DMA1;
+		break;
+	default:
+		goto done;
+	}
+
+	pipe = mdss_mdp_pipe_alloc_pnum(pnum);
+
+	if (pipe)
+		pipe->mixer = mixer;
+done:
+	if (!pipe)
+		mdss_mdp_wb_mixer_destroy(mixer);
+
+	return pipe;
+}
+
+static int mdss_mdp_rotator_busy_wait(struct mdss_mdp_rotator_session *rot)
+{
+	mutex_lock(&rot->lock);
+	if (rot->busy) {
+		pr_debug("waiting for rot=%d to complete\n", rot->pipe->num);
+		wait_for_completion_interruptible(&rot->comp);
+		rot->busy = false;
+
+	}
+	mutex_unlock(&rot->lock);
+
+	return 0;
+}
+
+static void mdss_mdp_rotator_callback(void *arg)
+{
+	struct mdss_mdp_rotator_session *rot;
+
+	rot = (struct mdss_mdp_rotator_session *) arg;
+	if (rot)
+		complete(&rot->comp);
+}
+
+static int mdss_mdp_rotator_kickoff(struct mdss_mdp_ctl *ctl,
+				    struct mdss_mdp_rotator_session *rot,
+				    struct mdss_mdp_data *dst_data)
+{
+	int ret;
+	struct mdss_mdp_writeback_arg wb_args = {
+		.callback_fnc = mdss_mdp_rotator_callback,
+		.data = dst_data,
+		.priv_data = rot,
+	};
+
+	mutex_lock(&rot->lock);
+	INIT_COMPLETION(rot->comp);
+	rot->busy = true;
+	ret = mdss_mdp_display_commit(ctl, &wb_args);
+	if (ret) {
+		rot->busy = false;
+		pr_err("problem with kickoff rot pipe=%d", rot->pipe->num);
+	}
+	mutex_unlock(&rot->lock);
+	return ret;
+}
+
+static int mdss_mdp_rotator_pipe_dequeue(struct mdss_mdp_rotator_session *rot)
+{
+	if (rot->pipe) {
+		pr_debug("reusing existing session=%d\n", rot->pipe->num);
+		mdss_mdp_rotator_busy_wait(rot);
+		list_move_tail(&rot->head, &rotator_queue);
+	} else {
+		struct mdss_mdp_rotator_session *tmp;
+
+		rot->params_changed++;
+		rot->pipe = mdss_mdp_rotator_pipe_alloc();
+		if (rot->pipe) {
+			pr_debug("use new rotator pipe=%d\n", rot->pipe->num);
+
+			rot->pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
+			list_add_tail(&rot->head, &rotator_queue);
+		} else if (!list_empty(&rotator_queue)) {
+			tmp = list_first_entry(&rotator_queue,
+					       struct mdss_mdp_rotator_session,
+					       head);
+
+			pr_debug("wait for rotator pipe=%d\n", tmp->pipe->num);
+			mdss_mdp_rotator_busy_wait(tmp);
+			rot->pipe = tmp->pipe;
+			tmp->pipe = NULL;
+
+			list_del(&tmp->head);
+			list_add_tail(&rot->head, &rotator_queue);
+		} else {
+			pr_err("no available rotator pipes\n");
+			return -EBUSY;
+		}
+	}
+
+	return 0;
+}
+
+int mdss_mdp_rotator_queue(struct mdss_mdp_rotator_session *rot,
+			   struct mdss_mdp_data *src_data,
+			   struct mdss_mdp_data *dst_data)
+{
+	struct mdss_mdp_pipe *rot_pipe;
+	struct mdss_mdp_ctl *ctl;
+	int ret;
+
+	if (!rot)
+		return -ENODEV;
+
+	mutex_lock(&rotator_lock);
+	ret = mdss_mdp_rotator_pipe_dequeue(rot);
+	if (ret) {
+		pr_err("unable to acquire rotator\n");
+		goto done;
+	}
+
+	rot_pipe = rot->pipe;
+
+	pr_debug("queue rotator pnum=%d\n", rot_pipe->num);
+
+	ctl = rot_pipe->mixer->ctl;
+
+	if (rot->params_changed) {
+		rot->params_changed = 0;
+		rot_pipe->flags = rot->rotations;
+		rot_pipe->src_fmt = mdss_mdp_get_format_params(rot->format);
+		rot_pipe->img_width = rot->img_width;
+		rot_pipe->img_height = rot->img_height;
+		rot_pipe->src = rot->src_rect;
+		rot_pipe->bwc_mode = rot->bwc_mode;
+		rot_pipe->params_changed++;
+	}
+
+	ret = mdss_mdp_pipe_queue_data(rot->pipe, src_data);
+	if (ret) {
+		pr_err("unable to queue rot data\n");
+		goto done;
+	}
+
+	ret = mdss_mdp_rotator_kickoff(ctl, rot, dst_data);
+
+done:
+	mutex_unlock(&rotator_lock);
+
+	if (!rot->no_wait)
+		mdss_mdp_rotator_busy_wait(rot);
+
+	return ret;
+}
+
+int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot)
+{
+	struct mdss_mdp_pipe *rot_pipe;
+
+	if (!rot)
+		return -ENODEV;
+
+	pr_debug("finish rot id=%x\n", rot->session_id);
+
+	mutex_lock(&rotator_lock);
+	rot_pipe = rot->pipe;
+	if (rot_pipe) {
+		mdss_mdp_rotator_busy_wait(rot);
+		list_del(&rot->head);
+	}
+	memset(rot, 0, sizeof(*rot));
+	if (rot_pipe) {
+		struct mdss_mdp_mixer *mixer = rot_pipe->mixer;
+		mdss_mdp_pipe_destroy(rot_pipe);
+		mdss_mdp_wb_mixer_destroy(mixer);
+	}
+	mutex_unlock(&rotator_lock);
+
+	return 0;
+}
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.h b/drivers/video/msm/mdss/mdss_mdp_rotator.h
new file mode 100644
index 0000000..1e4b81e0
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef MDSS_MDP_ROTATOR_H
+#define MDSS_MDP_ROTATOR_H
+
+#include <linux/types.h>
+
+#include "mdss_mdp.h"
+
+#define MDSS_MDP_ROT_SESSION_MASK	0x80000000
+
+struct mdss_mdp_rotator_session {
+	u32 session_id;
+	u32 ref_cnt;
+	u32 params_changed;
+
+	u32 format;
+	u32 rotations;
+
+	u16 img_width;
+	u16 img_height;
+	struct mdss_mdp_img_rect src_rect;
+
+	u32 bwc_mode;
+	struct mdss_mdp_pipe *pipe;
+
+	struct mutex lock;
+	struct completion comp;
+	u8 busy;
+	u8 no_wait;
+
+	struct list_head head;
+};
+
+struct mdss_mdp_rotator_session *mdss_mdp_rotator_session_alloc(void);
+struct mdss_mdp_rotator_session *mdss_mdp_rotator_session_get(u32 session_id);
+
+int mdss_mdp_rotator_queue(struct mdss_mdp_rotator_session *rot,
+			   struct mdss_mdp_data *src_data,
+			   struct mdss_mdp_data *dst_data);
+int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot);
+int mdss_mdp_rotator_ctl_busy_wait(struct mdss_mdp_ctl *ctl);
+
+#endif /* MDSS_MDP_ROTATOR_H */
diff --git a/drivers/video/msm/mdss/mdss_mdp_util.c b/drivers/video/msm/mdss/mdss_mdp_util.c
index 25c9ac4..2e86806 100644
--- a/drivers/video/msm/mdss/mdss_mdp_util.c
+++ b/drivers/video/msm/mdss/mdss_mdp_util.c
@@ -10,7 +10,6 @@
  * GNU General Public License for more details.
  *
  */
-
 #define pr_fmt(fmt)	"%s: " fmt, __func__
 
 #include <linux/android_pmem.h>
@@ -53,7 +52,7 @@
 	int index = -1;
 	switch (intr_type) {
 	case MDSS_MDP_IRQ_INTF_VSYNC:
-		index = MDP_INTR_VSYNC_INTF_0 + intf_num;
+		index = MDP_INTR_VSYNC_INTF_0 + (intf_num - MDSS_MDP_INTF0);
 		break;
 	case MDSS_MDP_IRQ_PING_PONG_COMP:
 		index = MDP_INTR_PING_PONG_0 + intf_num;
@@ -116,11 +115,12 @@
 
 
 	isr = MDSS_MDP_REG_READ(MDSS_MDP_REG_INTR_STATUS);
+
+	pr_debug("isr=%x\n", isr);
+
 	if (isr == 0)
 		goto done;
 
-	pr_devel("isr=%x\n", isr);
-
 	mask = MDSS_MDP_REG_READ(MDSS_MDP_REG_INTR_EN);
 	MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_CLEAR, isr);
 
diff --git a/drivers/video/msm/mdss/mdss_mdp_wb.c b/drivers/video/msm/mdss/mdss_mdp_wb.c
new file mode 100644
index 0000000..da55edc
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_mdp_wb.c
@@ -0,0 +1,539 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include "mdss_mdp.h"
+#include "mdss_fb.h"
+
+#define DEBUG_WRITEBACK
+
+enum mdss_mdp_wb_state {
+	WB_OPEN,
+	WB_START,
+	WB_STOPING,
+	WB_STOP
+};
+
+struct mdss_mdp_wb {
+	u32 fb_ndx;
+	struct mutex lock;
+	struct list_head busy_queue;
+	struct list_head free_queue;
+	struct list_head register_queue;
+	wait_queue_head_t wait_q;
+	u32 state;
+};
+
+enum mdss_mdp_wb_node_state {
+	REGISTERED,
+	IN_FREE_QUEUE,
+	IN_BUSY_QUEUE,
+	WITH_CLIENT
+};
+
+struct mdss_mdp_wb_data {
+	struct list_head registered_entry;
+	struct list_head active_entry;
+	struct msmfb_data buf_info;
+	struct mdss_mdp_data buf_data;
+	int state;
+};
+
+static DEFINE_MUTEX(mdss_mdp_wb_buf_lock);
+static struct mdss_mdp_wb mdss_mdp_wb_info;
+
+#ifdef DEBUG_WRITEBACK
+/* for debugging: writeback output buffer to framebuffer memory */
+static inline
+struct mdss_mdp_data *mdss_mdp_wb_debug_buffer(struct msm_fb_data_type *mfd)
+{
+	static void *videomemory;
+	static void *mdss_wb_mem;
+	static struct mdss_mdp_data buffer = {
+		.num_planes = 1,
+	};
+
+	struct fb_info *fbi;
+	int img_size;
+	int offset;
+
+
+	fbi = mfd->fbi;
+	img_size = fbi->var.xres * fbi->var.yres * fbi->var.bits_per_pixel / 8;
+	offset = fbi->fix.smem_len - img_size;
+
+	videomemory = fbi->screen_base + offset;
+	mdss_wb_mem = (void *)(fbi->fix.smem_start + offset);
+
+	buffer.p[0].addr = fbi->fix.smem_start + offset;
+	buffer.p[0].len = img_size;
+
+	return &buffer;
+}
+#else
+static inline
+struct mdss_mdp_data *mdss_mdp_wb_debug_buffer(struct msm_fb_data_type *mfd)
+{
+	return NULL;
+}
+#endif
+
+static int mdss_mdp_wb_init(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_wb *wb;
+
+	mutex_lock(&mdss_mdp_wb_buf_lock);
+	wb = mfd->wb;
+	if (wb == NULL) {
+		wb = &mdss_mdp_wb_info;
+		wb->fb_ndx = mfd->index;
+		mfd->wb = wb;
+	} else if (mfd->index != wb->fb_ndx) {
+		pr_err("only one writeback intf supported at a time\n");
+		return -EMLINK;
+	} else {
+		pr_debug("writeback already initialized\n");
+	}
+
+	pr_debug("init writeback on fb%d\n", wb->fb_ndx);
+
+	mutex_init(&wb->lock);
+	INIT_LIST_HEAD(&wb->free_queue);
+	INIT_LIST_HEAD(&wb->busy_queue);
+	INIT_LIST_HEAD(&wb->register_queue);
+	wb->state = WB_OPEN;
+	init_waitqueue_head(&wb->wait_q);
+
+	mfd->wb = wb;
+	mutex_unlock(&mdss_mdp_wb_buf_lock);
+	return 0;
+}
+
+static int mdss_mdp_wb_terminate(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_wb *wb = mfd->wb;
+
+	if (!wb) {
+		pr_err("unable to terminate, writeback is not initialized\n");
+		return -ENODEV;
+	}
+
+	pr_debug("terminate writeback\n");
+
+	mutex_lock(&mdss_mdp_wb_buf_lock);
+	mutex_lock(&wb->lock);
+	if (!list_empty(&wb->register_queue)) {
+		struct mdss_mdp_wb_data *node, *temp;
+		list_for_each_entry_safe(node, temp, &wb->register_queue,
+					 registered_entry) {
+			list_del(&node->registered_entry);
+			kfree(node);
+		}
+	}
+	mutex_unlock(&wb->lock);
+
+	mfd->wb = NULL;
+	mutex_unlock(&mdss_mdp_wb_buf_lock);
+
+	return 0;
+}
+
+static int mdss_mdp_wb_start(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_wb *wb = mfd->wb;
+
+	if (!wb) {
+		pr_err("unable to start, writeback is not initialized\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&wb->lock);
+	wb->state = WB_START;
+	mutex_unlock(&wb->lock);
+	wake_up(&wb->wait_q);
+
+	return 0;
+}
+
+static int mdss_mdp_wb_stop(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_wb *wb = mfd->wb;
+
+	if (!wb) {
+		pr_err("unable to stop, writeback is not initialized\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&wb->lock);
+	wb->state = WB_STOPING;
+	mutex_unlock(&wb->lock);
+	wake_up(&wb->wait_q);
+
+	return 0;
+}
+
+static int mdss_mdp_wb_register_node(struct mdss_mdp_wb *wb,
+				     struct mdss_mdp_wb_data *node)
+{
+	node->state = REGISTERED;
+	list_add_tail(&node->registered_entry, &wb->register_queue);
+	if (!node) {
+		pr_err("Invalid wb node\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct mdss_mdp_wb_data *get_local_node(struct mdss_mdp_wb *wb,
+					       struct msmfb_data *data) {
+	struct mdss_mdp_wb_data *node;
+	struct mdss_mdp_img_data *buf;
+	int ret;
+
+	if (!data->iova)
+		return NULL;
+
+	if (!list_empty(&wb->register_queue)) {
+		list_for_each_entry(node, &wb->register_queue, registered_entry)
+		if (node->buf_info.iova == data->iova) {
+			pr_debug("found node iova=%x addr=%x\n",
+				 data->iova, node->buf_data.p[0].addr);
+			return node;
+		}
+	}
+
+	node = kzalloc(sizeof(struct mdss_mdp_wb_data), GFP_KERNEL);
+	if (node == NULL) {
+		pr_err("out of memory\n");
+		return NULL;
+	}
+
+	node->buf_data.num_planes = 1;
+	buf = &node->buf_data.p[0];
+	buf->addr = (u32) (data->iova + data->offset);
+	buf->len = UINT_MAX; /* trusted source */
+	ret = mdss_mdp_wb_register_node(wb, node);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("error registering wb node\n");
+		kfree(node);
+		return NULL;
+	}
+
+	pr_debug("register node iova=0x%x addr=0x%x\n", data->iova, buf->addr);
+
+	return node;
+}
+
+static struct mdss_mdp_wb_data *get_user_node(struct msm_fb_data_type *mfd,
+					      struct msmfb_data *data) {
+	struct mdss_mdp_wb *wb = mfd->wb;
+	struct mdss_mdp_wb_data *node;
+	struct mdss_mdp_img_data *buf;
+	int ret;
+
+	node = kzalloc(sizeof(struct mdss_mdp_wb_data), GFP_KERNEL);
+	if (node == NULL) {
+		pr_err("out of memory\n");
+		return NULL;
+	}
+
+	node->buf_data.num_planes = 1;
+	buf = &node->buf_data.p[0];
+	ret = mdss_mdp_get_img(mfd->iclient, data, buf);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("error getting buffer info\n");
+		goto register_fail;
+	}
+	memcpy(&node->buf_info, data, sizeof(*data));
+
+	ret = mdss_mdp_wb_register_node(wb, node);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("error registering wb node\n");
+		goto register_fail;
+	}
+
+	pr_debug("register node mem_id=%d offset=%u addr=0x%x len=%d\n",
+		 data->memory_id, data->offset, buf->addr, buf->len);
+
+	return node;
+
+register_fail:
+	kfree(node);
+	return NULL;
+}
+
+static int mdss_mdp_wb_queue(struct msm_fb_data_type *mfd,
+			     struct msmfb_data *data, int local)
+{
+	struct mdss_mdp_wb *wb = mfd->wb;
+	struct mdss_mdp_wb_data *node = NULL;
+	int ret = 0;
+
+	if (!wb) {
+		pr_err("unable to queue, writeback is not initialized\n");
+		return -ENODEV;
+	}
+
+	pr_debug("fb%d queue\n", wb->fb_ndx);
+
+	mutex_lock(&wb->lock);
+	if (local)
+		node = get_local_node(wb, data);
+	if (node == NULL)
+		node = get_user_node(mfd, data);
+
+	if (!node || node->state == IN_BUSY_QUEUE ||
+	    node->state == IN_FREE_QUEUE) {
+		pr_err("memory not registered or Buffer already with us\n");
+		ret = -EINVAL;
+	} else {
+		list_add_tail(&node->active_entry, &wb->free_queue);
+		node->state = IN_FREE_QUEUE;
+	}
+	mutex_unlock(&wb->lock);
+
+	return ret;
+}
+
+static int is_buffer_ready(struct mdss_mdp_wb *wb)
+{
+	int rc;
+	mutex_lock(&wb->lock);
+	rc = !list_empty(&wb->busy_queue) || (wb->state == WB_STOPING);
+	mutex_unlock(&wb->lock);
+
+	return rc;
+}
+
+static int mdss_mdp_wb_dequeue(struct msm_fb_data_type *mfd,
+			       struct msmfb_data *data)
+{
+	struct mdss_mdp_wb *wb = mfd->wb;
+	struct mdss_mdp_wb_data *node = NULL;
+	int ret;
+
+	if (!wb) {
+		pr_err("unable to dequeue, writeback is not initialized\n");
+		return -ENODEV;
+	}
+
+	ret = wait_event_interruptible(wb->wait_q, is_buffer_ready(wb));
+	if (ret) {
+		pr_err("failed to get dequeued buffer\n");
+		return -ENOBUFS;
+	}
+
+	mutex_lock(&wb->lock);
+	if (wb->state == WB_STOPING) {
+		pr_debug("wfd stopped\n");
+		wb->state = WB_STOP;
+		ret = -ENOBUFS;
+	} else if (!list_empty(&wb->busy_queue)) {
+		struct mdss_mdp_img_data *buf;
+		node = list_first_entry(&wb->busy_queue,
+					struct mdss_mdp_wb_data,
+					active_entry);
+		list_del(&node->active_entry);
+		node->state = WITH_CLIENT;
+		memcpy(data, &node->buf_info, sizeof(*data));
+
+		buf = &node->buf_data.p[0];
+		pr_debug("found node addr=%x len=%d\n", buf->addr, buf->len);
+	} else {
+		pr_debug("node is NULL, wait for next\n");
+		ret = -ENOBUFS;
+	}
+	mutex_unlock(&wb->lock);
+	return 0;
+}
+
+static void mdss_mdp_wb_callback(void *arg)
+{
+	if (arg)
+		complete((struct completion *) arg);
+}
+
+int mdss_mdp_wb_kickoff(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_wb *wb;
+	struct mdss_mdp_wb_data *node = NULL;
+	int ret = 0;
+	DECLARE_COMPLETION_ONSTACK(comp);
+	struct mdss_mdp_writeback_arg wb_args = {
+		.callback_fnc = mdss_mdp_wb_callback,
+		.priv_data = &comp,
+	};
+
+	if (!ctl || !ctl->mfd)
+		return -ENODEV;
+
+	mutex_lock(&mdss_mdp_wb_buf_lock);
+	wb = ctl->mfd->wb;
+	if (wb) {
+		mutex_lock(&wb->lock);
+		if (!list_empty(&wb->free_queue) && wb->state != WB_STOPING &&
+		    wb->state != WB_STOP) {
+			node = list_first_entry(&wb->free_queue,
+						struct mdss_mdp_wb_data,
+						active_entry);
+			list_del(&node->active_entry);
+			node->state = IN_BUSY_QUEUE;
+			wb_args.data = &node->buf_data;
+		} else {
+			pr_debug("unable to get buf wb state=%d\n", wb->state);
+		}
+		mutex_unlock(&wb->lock);
+	}
+
+	if (wb_args.data == NULL)
+		wb_args.data = mdss_mdp_wb_debug_buffer(ctl->mfd);
+
+	if (wb_args.data == NULL) {
+		pr_err("unable to get writeback buf ctl=%d\n", ctl->num);
+		ret = -ENOMEM;
+		goto kickoff_fail;
+	}
+
+	ret = mdss_mdp_display_commit(ctl, &wb_args);
+	if (ret) {
+		pr_err("error on commit ctl=%d\n", ctl->num);
+		goto kickoff_fail;
+	}
+
+	wait_for_completion_interruptible(&comp);
+	if (wb && node) {
+		mutex_lock(&wb->lock);
+		list_add_tail(&node->active_entry, &wb->busy_queue);
+		mutex_unlock(&wb->lock);
+		wake_up(&wb->wait_q);
+	}
+
+kickoff_fail:
+	mutex_unlock(&mdss_mdp_wb_buf_lock);
+	return ret;
+}
+
+int mdss_mdp_wb_ioctl_handler(struct msm_fb_data_type *mfd, u32 cmd, void *arg)
+{
+	struct msmfb_data data;
+	int ret = -ENOSYS;
+
+	switch (cmd) {
+	case MSMFB_WRITEBACK_INIT:
+		ret = mdss_mdp_wb_init(mfd);
+		break;
+	case MSMFB_WRITEBACK_START:
+		ret = mdss_mdp_wb_start(mfd);
+		break;
+	case MSMFB_WRITEBACK_STOP:
+		ret = mdss_mdp_wb_stop(mfd);
+		break;
+	case MSMFB_WRITEBACK_QUEUE_BUFFER:
+		if (!copy_from_user(&data, arg, sizeof(data))) {
+			ret = mdss_mdp_wb_queue(mfd, arg, false);
+		} else {
+			pr_err("wb queue buf failed on copy_from_user\n");
+			ret = -EFAULT;
+		}
+		break;
+	case MSMFB_WRITEBACK_DEQUEUE_BUFFER:
+		if (!copy_from_user(&data, arg, sizeof(data))) {
+			ret = mdss_mdp_wb_dequeue(mfd, arg);
+		} else {
+			pr_err("wb dequeue buf failed on copy_from_user\n");
+			ret = -EFAULT;
+		}
+		break;
+	case MSMFB_WRITEBACK_TERMINATE:
+		ret = mdss_mdp_wb_terminate(mfd);
+		break;
+	}
+
+	return ret;
+}
+
+int msm_fb_writeback_start(struct fb_info *info)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+	if (!mfd)
+		return -ENODEV;
+
+	return mdss_mdp_wb_start(mfd);
+}
+EXPORT_SYMBOL(msm_fb_writeback_start);
+
+int msm_fb_writeback_queue_buffer(struct fb_info *info,
+				  struct msmfb_data *data)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+	if (!mfd)
+		return -ENODEV;
+
+	return mdss_mdp_wb_queue(mfd, data, true);
+}
+EXPORT_SYMBOL(msm_fb_writeback_queue_buffer);
+
+int msm_fb_writeback_dequeue_buffer(struct fb_info *info,
+				    struct msmfb_data *data)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+	if (!mfd)
+		return -ENODEV;
+
+	return mdss_mdp_wb_dequeue(mfd, data);
+}
+EXPORT_SYMBOL(msm_fb_writeback_dequeue_buffer);
+
+int msm_fb_writeback_stop(struct fb_info *info)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+	if (!mfd)
+		return -ENODEV;
+
+	return mdss_mdp_wb_stop(mfd);
+}
+EXPORT_SYMBOL(msm_fb_writeback_stop);
+
+int msm_fb_writeback_init(struct fb_info *info)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+	if (!mfd)
+		return -ENODEV;
+
+	return mdss_mdp_wb_init(mfd);
+}
+EXPORT_SYMBOL(msm_fb_writeback_init);
+
+int msm_fb_writeback_terminate(struct fb_info *info)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+	if (!mfd)
+		return -ENODEV;
+
+	return mdss_mdp_wb_terminate(mfd);
+}
+EXPORT_SYMBOL(msm_fb_writeback_terminate);
diff --git a/include/linux/i2c/isa1200.h b/include/linux/i2c/isa1200.h
index 9dab3eb..ffadf96 100644
--- a/include/linux/i2c/isa1200.h
+++ b/include/linux/i2c/isa1200.h
@@ -49,6 +49,7 @@
 	bool smart_en; /* smart mode enable/disable */
 	bool is_erm;
 	bool ext_clk_en;
+	bool need_pwm_clk;
 	unsigned int chip_en;
 	unsigned int duty;
 	struct isa1200_regulator *regulator_info;
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index ed6bb39..c65740d 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -9,106 +9,178 @@
  * representation into a hardware irq number that can be mapped back to a
  * Linux irq number without any extra platform support code.
  *
- * irq_domain is expected to be embedded in an interrupt controller's private
- * data structure.
+ * Interrupt controller "domain" data structure. This could be defined as a
+ * irq domain controller. That is, it handles the mapping between hardware
+ * and virtual interrupt numbers for a given interrupt domain. The domain
+ * structure is generally created by the PIC code for a given PIC instance
+ * (though a domain can cover more than one PIC if they have a flat number
+ * model). It's the domain callbacks that are responsible for setting the
+ * irq_chip on a given irq_desc after it's been mapped.
+ *
+ * The host code and data structures are agnostic to whether or not
+ * we use an open firmware device-tree. We do have references to struct
+ * device_node in two places: in irq_find_host() to find the host matching
+ * a given interrupt controller node, and of course as an argument to its
+ * counterpart domain->ops->match() callback. However, those are treated as
+ * generic pointers by the core and the fact that it's actually a device-node
+ * pointer is purely a convention between callers and implementation. This
+ * code could thus be used on other architectures by replacing those two
+ * by some sort of arch-specific void * "token" used to identify interrupt
+ * controllers.
  */
+
 #ifndef _LINUX_IRQDOMAIN_H
 #define _LINUX_IRQDOMAIN_H
 
-#include <linux/irq.h>
-#include <linux/mod_devicetable.h>
+#include <linux/types.h>
+#include <linux/radix-tree.h>
 
-#ifdef CONFIG_IRQ_DOMAIN
 struct device_node;
 struct irq_domain;
+struct of_device_id;
+
+/* Number of irqs reserved for a legacy isa controller */
+#define NUM_ISA_INTERRUPTS	16
 
 /**
  * struct irq_domain_ops - Methods for irq_domain objects
- * @to_irq: (optional) given a local hardware irq number, return the linux
- *          irq number.  If to_irq is not implemented, then the irq_domain
- *          will use this translation: irq = (domain->irq_base + hwirq)
- * @dt_translate: Given a device tree node and interrupt specifier, decode
- *                the hardware irq number and linux irq type value.
+ * @match: Match an interrupt controller device node to a host, returns
+ *         1 on a match
+ * @map: Create or update a mapping between a virtual irq number and a hw
+ *       irq number. This is called only once for a given mapping.
+ * @unmap: Dispose of such a mapping
+ * @xlate: Given a device tree node and interrupt specifier, decode
+ *         the hardware irq number and linux irq type value.
+ *
+ * Functions below are provided by the driver and called whenever a new mapping
+ * is created or an old mapping is disposed. The driver can then proceed to
+ * whatever internal data structures management is required. It also needs
+ * to setup the irq_desc when returning from map().
  */
 struct irq_domain_ops {
-	unsigned int (*to_irq)(struct irq_domain *d, unsigned long hwirq);
-
-#ifdef CONFIG_OF
-	int (*dt_translate)(struct irq_domain *d, struct device_node *node,
-			    const u32 *intspec, unsigned int intsize,
-			    unsigned long *out_hwirq, unsigned int *out_type);
-#endif /* CONFIG_OF */
+	int (*match)(struct irq_domain *d, struct device_node *node);
+	int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
+	void (*unmap)(struct irq_domain *d, unsigned int virq);
+	int (*xlate)(struct irq_domain *d, struct device_node *node,
+		     const u32 *intspec, unsigned int intsize,
+		     unsigned long *out_hwirq, unsigned int *out_type);
 };
 
 /**
  * struct irq_domain - Hardware interrupt number translation object
- * @list: Element in global irq_domain list.
+ * @link: Element in global irq_domain list.
+ * @revmap_type: Method used for reverse mapping hwirq numbers to linux irq. This
+ *               will be one of the IRQ_DOMAIN_MAP_* values.
+ * @revmap_data: Revmap method specific data.
+ * @ops: pointer to irq_domain methods
+ * @host_data: private data pointer for use by owner.  Not touched by irq_domain
+ *             core code.
  * @irq_base: Start of irq_desc range assigned to the irq_domain.  The creator
  *            of the irq_domain is responsible for allocating the array of
  *            irq_desc structures.
  * @nr_irq: Number of irqs managed by the irq domain
  * @hwirq_base: Starting number for hwirqs managed by the irq domain
- * @ops: pointer to irq_domain methods
- * @priv: private data pointer for use by owner.  Not touched by irq_domain
- *        core code.
  * @of_node: (optional) Pointer to device tree nodes associated with the
  *           irq_domain.  Used when decoding device tree interrupt specifiers.
  */
 struct irq_domain {
-	struct list_head list;
-	unsigned int irq_base;
-	unsigned int nr_irq;
-	unsigned int hwirq_base;
+	struct list_head link;
+
+	/* type of reverse mapping_technique */
+	unsigned int revmap_type;
+	union {
+		struct {
+			unsigned int size;
+			unsigned int first_irq;
+			irq_hw_number_t first_hwirq;
+		} legacy;
+		struct {
+			unsigned int size;
+			unsigned int *revmap;
+		} linear;
+		struct {
+			unsigned int max_irq;
+		} nomap;
+		struct radix_tree_root tree;
+	} revmap_data;
 	const struct irq_domain_ops *ops;
-	void *priv;
+	void *host_data;
+	irq_hw_number_t inval_irq;
+
+	/* Optional device node pointer */
 	struct device_node *of_node;
 };
 
-/**
- * irq_domain_to_irq() - Translate from a hardware irq to a linux irq number
- *
- * Returns the linux irq number associated with a hardware irq.  By default,
- * the mapping is irq == domain->irq_base + hwirq, but this mapping can
- * be overridden if the irq_domain implements a .to_irq() hook.
- */
-static inline unsigned int irq_domain_to_irq(struct irq_domain *d,
-					     unsigned long hwirq)
+#ifdef CONFIG_IRQ_DOMAIN
+struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
+					 unsigned int size,
+					 unsigned int first_irq,
+					 irq_hw_number_t first_hwirq,
+					 const struct irq_domain_ops *ops,
+					 void *host_data);
+struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
+					 unsigned int size,
+					 const struct irq_domain_ops *ops,
+					 void *host_data);
+struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
+					 unsigned int max_irq,
+					 const struct irq_domain_ops *ops,
+					 void *host_data);
+struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
+					 const struct irq_domain_ops *ops,
+					 void *host_data);
+
+extern struct irq_domain *irq_find_host(struct device_node *node);
+extern void irq_set_default_host(struct irq_domain *host);
+
+static inline struct irq_domain *irq_domain_add_legacy_isa(
+				struct device_node *of_node,
+				const struct irq_domain_ops *ops,
+				void *host_data)
 {
-	if (d->ops->to_irq)
-		return d->ops->to_irq(d, hwirq);
-	if (WARN_ON(hwirq < d->hwirq_base))
-		return 0;
-	return d->irq_base + hwirq - d->hwirq_base;
+	return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops,
+				     host_data);
 }
+extern struct irq_domain *irq_find_host(struct device_node *node);
+extern void irq_set_default_host(struct irq_domain *host);
 
-#define irq_domain_for_each_hwirq(d, hw) \
-	for (hw = d->hwirq_base; hw < d->hwirq_base + d->nr_irq; hw++)
 
-#define irq_domain_for_each_irq(d, hw, irq) \
-	for (hw = d->hwirq_base, irq = irq_domain_to_irq(d, hw); \
-	     hw < d->hwirq_base + d->nr_irq; \
-	     hw++, irq = irq_domain_to_irq(d, hw))
-
+extern unsigned int irq_create_mapping(struct irq_domain *host,
+				       irq_hw_number_t hwirq);
 extern void irq_dispose_mapping(unsigned int virq);
+extern unsigned int irq_find_mapping(struct irq_domain *host,
+				     irq_hw_number_t hwirq);
+extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
+extern void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq,
+				    irq_hw_number_t hwirq);
+extern unsigned int irq_radix_revmap_lookup(struct irq_domain *host,
+					    irq_hw_number_t hwirq);
+extern unsigned int irq_linear_revmap(struct irq_domain *host,
+				      irq_hw_number_t hwirq);
 
-extern int irq_domain_add(struct irq_domain *domain);
-extern void irq_domain_del(struct irq_domain *domain);
-extern void irq_domain_register(struct irq_domain *domain);
-extern void irq_domain_register_irq(struct irq_domain *domain, int hwirq);
-extern void irq_domain_unregister(struct irq_domain *domain);
-extern void irq_domain_unregister_irq(struct irq_domain *domain, int hwirq);
-extern int irq_domain_find_free_range(unsigned int from, unsigned int cnt);
+extern const struct irq_domain_ops irq_domain_simple_ops;
 
-extern struct irq_domain_ops irq_domain_simple_ops;
-#endif /* CONFIG_IRQ_DOMAIN */
+/* stock xlate functions */
+int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
+			const u32 *intspec, unsigned int intsize,
+			irq_hw_number_t *out_hwirq, unsigned int *out_type);
+int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
+			const u32 *intspec, unsigned int intsize,
+			irq_hw_number_t *out_hwirq, unsigned int *out_type);
+int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
+			const u32 *intspec, unsigned int intsize,
+			irq_hw_number_t *out_hwirq, unsigned int *out_type);
 
-#if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ)
-extern void irq_domain_add_simple(struct device_node *controller, int irq_base);
+#if defined(CONFIG_OF_IRQ)
 extern void irq_domain_generate_simple(const struct of_device_id *match,
 					u64 phys_base, unsigned int irq_start);
-#else /* CONFIG_IRQ_DOMAIN && CONFIG_OF_IRQ */
+#else /* CONFIG_OF_IRQ */
 static inline void irq_domain_generate_simple(const struct of_device_id *match,
 					u64 phys_base, unsigned int irq_start) { }
-#endif /* CONFIG_IRQ_DOMAIN && CONFIG_OF_IRQ */
+#endif /* !CONFIG_OF_IRQ */
+
+#else /* CONFIG_IRQ_DOMAIN */
+static inline void irq_dispose_mapping(unsigned int virq) { }
+#endif /* !CONFIG_IRQ_DOMAIN */
 
 #endif /* _LINUX_IRQDOMAIN_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 447fbbb..05a6b5b 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -241,7 +241,6 @@
 #define MMC_CAP2_BROKEN_VOLTAGE	(1 << 7)	/* Use the broken voltage */
 #define MMC_CAP2_DETECT_ON_ERR	(1 << 8)	/* On I/O err check card removal */
 #define MMC_CAP2_HC_ERASE_SZ	(1 << 9)	/* High-capacity erase size */
-#define MMC_CAP2_POWER_OFF_VCCQ_DURING_SUSPEND	(1 << 10)
 #define MMC_CAP2_PACKED_RD	(1 << 10)	/* Allow packed read */
 #define MMC_CAP2_PACKED_WR	(1 << 11)	/* Allow packed write */
 #define MMC_CAP2_PACKED_CMD	(MMC_CAP2_PACKED_RD | \
@@ -250,6 +249,7 @@
 #define MMC_CAP2_SANITIZE	(1 << 13)		/* Support Sanitize */
 #define MMC_CAP2_BKOPS		    (1 << 14)	/* BKOPS supported */
 #define MMC_CAP2_INIT_BKOPS	    (1 << 15)	/* Need to set BKOPS_EN */
+#define MMC_CAP2_POWER_OFF_VCCQ_DURING_SUSPEND	(1 << 16)
 
 	mmc_pm_flag_t		pm_caps;	/* supported pm features */
 
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index 8b6351f..19728fe 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -120,6 +120,7 @@
 	NUM_HSIC_PARAM,
 };
 
+#define MDSS_MDP_ROT_ONLY		0x80
 #define MDSS_MDP_RIGHT_MIXER		0x100
 
 /* mdp_blit_req flag values */
@@ -249,6 +250,7 @@
 	uint32_t version_key;
 	struct msmfb_data plane1_data;
 	struct msmfb_data plane2_data;
+	struct msmfb_data dst_data;
 };
 
 struct msmfb_img {
diff --git a/include/linux/msm_thermal.h b/include/linux/msm_thermal.h
new file mode 100644
index 0000000..fe9be89
--- /dev/null
+++ b/include/linux/msm_thermal.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_THERMAL_H
+#define __MSM_THERMAL_H
+
+struct msm_thermal_data {
+	uint32_t sensor_id;
+	uint32_t poll_ms;
+	uint32_t limit_temp;
+	uint32_t temp_hysteresis;
+	uint32_t limit_freq;
+};
+
+#ifdef CONFIG_THERMAL_MONITOR
+extern int msm_thermal_init(struct msm_thermal_data *pdata);
+#else
+static inline int msm_thermal_init(struct msm_thermal_data *pdata)
+{
+	return -ENOSYS;
+}
+#endif
+
+#endif /*__MSM_THERMAL_H*/
diff --git a/include/linux/qpnp/gpio.h b/include/linux/qpnp/gpio.h
deleted file mode 100644
index e7fb53e..0000000
--- a/include/linux/qpnp/gpio.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <mach/qpnp.h>
-
-#define QPNP_GPIO_DIR_IN			0
-#define QPNP_GPIO_DIR_OUT			1
-#define QPNP_GPIO_DIR_BOTH			2
-
-#define QPNP_GPIO_INVERT_DISABLE		0
-#define QPNP_GPIO_INVERT_ENABLE			1
-
-#define QPNP_GPIO_OUT_BUF_CMOS			0
-#define QPNP_GPIO_OUT_BUF_OPEN_DRAIN_NMOS	1
-#define QPNP_GPIO_OUT_BUF_OPEN_DRAIN_PMOS	2
-
-#define QPNP_GPIO_VIN0				0
-#define QPNP_GPIO_VIN1				1
-#define QPNP_GPIO_VIN2				2
-#define QPNP_GPIO_VIN3				3
-#define QPNP_GPIO_VIN4				4
-#define QPNP_GPIO_VIN5				5
-#define QPNP_GPIO_VIN6				6
-#define QPNP_GPIO_VIN7				7
-
-#define QPNP_GPIO_PULL_UP_30			0
-#define QPNP_GPIO_PULL_UP_1P5			1
-#define QPNP_GPIO_PULL_UP_31P5			2
-#define QPNP_GPIO_PULL_UP_1P5_30		3
-#define QPNP_GPIO_PULL_DN			4
-#define QPNP_GPIO_PULL_NO			5
-
-#define QPNP_GPIO_OUT_STRENGTH_LOW		1
-#define QPNP_GPIO_OUT_STRENGTH_MED		2
-#define QPNP_GPIO_OUT_STRENGTH_HIGH		3
-
-#define QPNP_GPIO_SRC_FUNC_NORMAL		0
-#define QPNP_GPIO_SRC_FUNC_PAIRED		1
-#define QPNP_GPIO_SRC_FUNC_1			2
-#define QPNP_GPIO_SRC_FUNC_2			3
-#define QPNP_GPIO_SRC_DTEST1			4
-#define QPNP_GPIO_SRC_DTEST2			5
-#define QPNP_GPIO_SRC_DTEST3			6
-#define QPNP_GPIO_SRC_DTEST4			7
-
-#define QPNP_GPIO_MASTER_DISABLE		0
-#define QPNP_GPIO_MASTER_ENABLE			1
-
-/**
- * struct qpnp_gpio_cfg - structure to specify gpio configurtion values
- * @direction:		indicates whether the gpio should be input, output, or
- *			both. Should be of the type QPNP_GPIO_DIR_*
- * @output_type:	indicates gpio should be configured as CMOS or open
- *			drain. Should be of the type QPNP_GPIO_OUT_BUF_*
- * @invert:		Invert the signal of the gpio line -
- *			QPNP_GPIO_INVERT_DISABLE or QPNP_GPIO_INVERT_ENABLE
- * @pull:		Indicates whether a pull up or pull down should be
- *			applied. If a pullup is required the current strength
- *			needs to be specified. Current values of 30uA, 1.5uA,
- *			31.5uA, 1.5uA with 30uA boost are supported. This value
- *			should be one of the QPNP_GPIO_PULL_*
- * @vin_sel:		specifies the voltage level when the output is set to 1.
- *			For an input gpio specifies the voltage level at which
- *			the input is interpreted as a logical 1.
- * @out_strength:	the amount of current supplied for an output gpio,
- *			should be of the type QPNP_GPIO_STRENGTH_*
- * @source_sel:		choose alternate function for the gpio. Certain gpios
- *			can be paired (shorted) with each other. Some gpio pin
- *			can act as alternate functions. This parameter should
- *			be of type QPNP_GPIO_SRC_*.
- * @master_en:		QPNP_GPIO_MASTER_ENABLE = Enable features within the
- *			GPIO block based on configurations.
- *			QPNP_GPIO_MASTER_DISABLE = Completely disable the GPIO
- *			block and let the pin float with high impedance
- *			regardless of other settings.
- */
-struct qpnp_gpio_cfg {
-	unsigned int direction;
-	unsigned int output_type;
-	unsigned int invert;
-	unsigned int pull;
-	unsigned int vin_sel;
-	unsigned int out_strength;
-	unsigned int src_select;
-	unsigned int master_en;
-};
-
-/**
- * qpnp_gpio_config - Apply gpio configuration for Linux gpio
- * @gpio: Linux gpio number to configure.
- * @param: parameters to configure.
- *
- * This routine takes a Linux gpio number that corresponds with a
- * PMIC gpio and applies the configuration specified in 'param'.
- * This gpio number can be ascertained by of_get_gpio_flags() or
- * the qpnp_gpio_map_gpio() API.
- */
-int qpnp_gpio_config(int gpio, struct qpnp_gpio_cfg *param);
-
-/**
- * qpnp_gpio_map_gpio - Obtain Linux GPIO number from device spec
- * @slave_id: slave_id of the spmi_device for the gpio in question.
- * @pmic_gpio: PMIC gpio number to lookup.
- *
- * This routine is used in legacy configurations that do not support
- * Device Tree. If you are using Device Tree, you should not use this.
- * For such cases, use of_get_gpio() instead.
- */
-int qpnp_gpio_map_gpio(uint16_t slave_id, uint32_t pmic_gpio);
diff --git a/include/linux/qpnp/pin.h b/include/linux/qpnp/pin.h
new file mode 100644
index 0000000..fa9c30f
--- /dev/null
+++ b/include/linux/qpnp/pin.h
@@ -0,0 +1,190 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Mode select */
+#define QPNP_PIN_MODE_DIG_IN			0
+#define QPNP_PIN_MODE_DIG_OUT			1
+#define QPNP_PIN_MODE_DIG_IN_OUT		2
+#define QPNP_PIN_MODE_BIDIR			3
+#define QPNP_PIN_MODE_AIN			4
+#define QPNP_PIN_MODE_AOUT			5
+#define QPNP_PIN_MODE_SINK			6
+
+/* Invert source select (GPIO, MPP) */
+#define QPNP_PIN_INVERT_DISABLE			0
+#define QPNP_PIN_INVERT_ENABLE			1
+
+/* Output type (GPIO) */
+#define QPNP_PIN_OUT_BUF_CMOS			0
+#define QPNP_PIN_OUT_BUF_OPEN_DRAIN_NMOS	1
+#define QPNP_PIN_OUT_BUF_OPEN_DRAIN_PMOS	2
+
+/* Voltage select (GPIO, MPP) */
+#define QPNP_PIN_VIN0				0
+#define QPNP_PIN_VIN1				1
+#define QPNP_PIN_VIN2				2
+#define QPNP_PIN_VIN3				3
+#define QPNP_PIN_VIN4				4
+#define QPNP_PIN_VIN5				5
+#define QPNP_PIN_VIN6				6
+#define QPNP_PIN_VIN7				7
+
+/* Pull Up Values (GPIO) */
+#define QPNP_PIN_GPIO_PULL_UP_30		0
+#define QPNP_PIN_GPIO_PULL_UP_1P5		1
+#define QPNP_PIN_GPIO_PULL_UP_31P5		2
+#define QPNP_PIN_GPIO_PULL_UP_1P5_30		3
+#define QPNP_PIN_GPIO_PULL_DN			4
+#define QPNP_PIN_GPIO_PULL_NO			5
+
+/* Pull Up Values (MPP) */
+#define QPNP_PIN_MPP_PULL_UP_0P6KOHM		0
+#define QPNP_PIN_MPP_PULL_UP_OPEN		1
+#define QPNP_PIN_MPP_PULL_UP_10KOHM		2
+#define QPNP_PIN_MPP_PULL_UP_30KOHM		3
+
+/* Out Strength (GPIO) */
+#define QPNP_PIN_OUT_STRENGTH_LOW		1
+#define QPNP_PIN_OUT_STRENGTH_MED		2
+#define QPNP_PIN_OUT_STRENGTH_HIGH		3
+
+/* Source Select (GPIO) / Enable Select (MPP) */
+#define QPNP_PIN_SEL_FUNC_CONSTANT		0
+#define QPNP_PIN_SEL_FUNC_PAIRED		1
+#define QPNP_PIN_SEL_FUNC_1			2
+#define QPNP_PIN_SEL_FUNC_2			3
+#define QPNP_PIN_SEL_DTEST1			4
+#define QPNP_PIN_SEL_DTEST2			5
+#define QPNP_PIN_SEL_DTEST3			6
+#define QPNP_PIN_SEL_DTEST4			7
+
+/* Master enable (GPIO, MPP) */
+#define QPNP_PIN_MASTER_DISABLE			0
+#define QPNP_PIN_MASTER_ENABLE			1
+
+/* Analog Output (MPP) */
+#define QPNP_PIN_AOUT_1V25			0
+#define QPNP_PIN_AOUT_0V625			1
+#define QPNP_PIN_AOUT_0V3125			2
+#define QPNP_PIN_AOUT_MPP			3
+#define QPNP_PIN_AOUT_ABUS1			4
+#define QPNP_PIN_AOUT_ABUS2			5
+#define QPNP_PIN_AOUT_ABUS3			6
+#define QPNP_PIN_AOUT_ABUS4			7
+
+/* Analog Input (MPP) */
+#define QPNP_PIN_AIN_AMUX_CH5			0
+#define QPNP_PIN_AIN_AMUX_CH6			1
+#define QPNP_PIN_AIN_AMUX_CH7			2
+#define QPNP_PIN_AIN_AMUX_CH8			3
+#define QPNP_PIN_AIN_AMUX_ABUS1			4
+#define QPNP_PIN_AIN_AMUX_ABUS2			5
+#define QPNP_PIN_AIN_AMUX_ABUS3			6
+#define QPNP_PIN_AIN_AMUX_ABUS4			7
+
+/* Current Sink (MPP) */
+#define QPNP_PIN_CS_OUT_5MA			0
+#define QPNP_PIN_CS_OUT_10MA			1
+#define QPNP_PIN_CS_OUT_15MA			2
+#define QPNP_PIN_CS_OUT_20MA			3
+#define QPNP_PIN_CS_OUT_25MA			4
+#define QPNP_PIN_CS_OUT_30MA			5
+#define QPNP_PIN_CS_OUT_35MA			6
+#define QPNP_PIN_CS_OUT_40MA			7
+
+/**
+ * struct qpnp_pin_cfg - structure to specify pin configurtion values
+ * @mode:		indicates whether the pin should be input, output, or
+ *			both for gpios. mpp pins also support bidirectional,
+ *			analog in, analog out and current sink. This value
+ *			should be of type QPNP_PIN_MODE_*.
+ * @output_type:	indicates pin should be configured as CMOS or open
+ *			drain. Should be of the type QPNP_PIN_OUT_BUF_*. This
+ *			setting applies for gpios only.
+ * @invert:		Invert the signal of the line -
+ *			QPNP_PIN_INVERT_DISABLE or QPNP_PIN_INVERT_ENABLE.
+ * @pull:		This parameter should be programmed to different values
+ *			depending on whether it's GPIO or MPP.
+ *			For GPIO, it indicates whether a pull up or pull down
+ *			should be applied. If a pullup is required the
+ *			current strength needs to be specified.
+ *			Current values of 30uA, 1.5uA, 31.5uA, 1.5uA with 30uA
+ *			boost are supported. This value should be one of
+ *			the QPNP_PIN_GPIO_PULL_*. Note that the hardware ignores
+ *			this configuration if the GPIO is not set to input or
+ *			output open-drain mode.
+ *			For MPP, it indicates whether a pullup should be
+ *			applied for bidirectitional mode only. The hardware
+ *			ignores the configuration when operating in other modes.
+ *			This value should be one of the QPNP_PIN_MPP_PULL_*.
+ * @vin_sel:		specifies the voltage level when the output is set to 1.
+ *			For an input gpio specifies the voltage level at which
+ *			the input is interpreted as a logical 1.
+ * @out_strength:	the amount of current supplied for an output gpio,
+ *			should be of the type QPNP_PIN_STRENGTH_*.
+ * @select:		select alternate function for the pin. Certain pins
+ *			can be paired (shorted) with each other. Some pins
+ *			can act as alternate functions. In the context of
+ *			gpio, this acts as a source select. For mpps,
+ *			this is an enable select.
+ *			This parameter should be of type QPNP_PIN_SEL_*.
+ * @master_en:		QPNP_PIN_MASTER_ENABLE = Enable features within the
+ *			pin block based on configurations.
+ *			QPNP_PIN_MASTER_DISABLE = Completely disable the pin
+ *			block and let the pin float with high impedance
+ *			regardless of other settings.
+ * @aout_ref:		Set the analog output reference. This parameter should
+ *			be of type QPNP_PIN_AOUT_*. This parameter only applies
+ *			to mpp pins.
+ * @ain_route:		Set the source for analog input. This parameter
+ *			should be of type QPNP_PIN_AIN_*. This parameter only
+ *			applies to mpp pins.
+ * @cs_out:		Set the the amount of current to sync in mA. This
+ *			parameter should be of type QPNP_PIN_CS_OUT_*. This
+ *			parameter only applies to mpp pins.
+ */
+struct qpnp_pin_cfg {
+	int mode;
+	int output_type;
+	int invert;
+	int pull;
+	int vin_sel;
+	int out_strength;
+	int select;
+	int master_en;
+	int aout_ref;
+	int ain_route;
+	int cs_out;
+};
+
+/**
+ * qpnp_pin_config - Apply pin configuration for Linux gpio
+ * @gpio: Linux gpio number to configure.
+ * @param: parameters to configure.
+ *
+ * This routine takes a Linux gpio number that corresponds with a
+ * PMIC pin and applies the configuration specified in 'param'.
+ * This gpio number can be ascertained by of_get_gpio_flags() or
+ * the qpnp_pin_map_gpio() API.
+ */
+int qpnp_pin_config(int gpio, struct qpnp_pin_cfg *param);
+
+/**
+ * qpnp_pin_map - Obtain Linux GPIO number from device spec
+ * @name: Name assigned by the 'label' binding for the primary node.
+ * @pmic_pin: PMIC pin number to lookup.
+ *
+ * This routine is used in legacy configurations that do not support
+ * Device Tree. If you are using Device Tree, you should not use this.
+ * For such cases, use of_get_gpio() or friends instead.
+ */
+int qpnp_pin_map(const char *name, uint32_t pmic_pin);
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index 927978a..f94b5c5 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -92,11 +92,19 @@
  * @num_resources: number of resources for this device node
  * @resources: array of resources for this device_node
  * @of_node: device_node of the resource in question
+ * @label: name used to reference the device from the driver
+ *
+ * Note that we explicitly add a 'label' pointer here since per
+ * the ePAPR 2.2.2, the device_node->name should be generic and not
+ * reflect precise programming model. Thus label enables a
+ * platform specific name to be assigned with the 'label' binding to
+ * allow for unique query names.
  */
 struct spmi_resource {
 	struct resource		*resource;
 	u32			num_resources;
 	struct device_node	*of_node;
+	const char		*label;
 };
 
 /**
@@ -108,7 +116,8 @@
  *  @dev: Driver model representation of the device.
  *  @name: Name of driver to use with this device.
  *  @ctrl: SPMI controller managing the bus hosting this device.
- *  @dev_node: array of SPMI resources - one entry per device_node.
+ *  @res: SPMI resource for the primary node
+ *  @dev_node: array of SPMI resources when used with spmi-dev-container.
  *  @num_dev_node: number of device_node structures.
  *  @sid: Slave Identifier.
  */
@@ -116,6 +125,7 @@
 	struct device		dev;
 	const char		*name;
 	struct spmi_controller	*ctrl;
+	struct spmi_resource	res;
 	struct spmi_resource	*dev_node;
 	u32			num_dev_node;
 	u8			sid;
@@ -124,10 +134,12 @@
 
 /**
  * struct spmi_boardinfo: Declare board info for SPMI device bringup.
+ * @name: Name of driver to use with this device.
  * @slave_id: slave identifier.
  * @spmi_device: device to be registered with the SPMI framework.
  * @of_node: pointer to the OpenFirmware device node.
- * @dev_node: one spmi_resource for each device_node.
+ * @res: SPMI resource for the primary node
+ * @dev_node: array of SPMI resources when used with spmi-dev-container.
  * @num_dev_node: number of device_node structures.
  * @platform_data: goes to spmi_device.dev.platform_data
  */
@@ -135,6 +147,7 @@
 	char			name[SPMI_NAME_SIZE];
 	uint8_t			slave_id;
 	struct device_node	*of_node;
+	struct spmi_resource	res;
 	struct spmi_resource	*dev_node;
 	u32			num_dev_node;
 	const void		*platform_data;
@@ -417,4 +430,49 @@
  * -ETIMEDOUT if the SPMI transaction times out.
  */
 extern int spmi_command_shutdown(struct spmi_controller *ctrl, u8 sid);
+
+/**
+ * spmi_for_each_container_dev - iterate over the array of devnode resources.
+ * @res: spmi_resource pointer used as the array cursor
+ * @spmi_dev: spmi_device to iterate
+ *
+ * Only useable in spmi-dev-container configurations.
+ */
+#define spmi_for_each_container_dev(res, spmi_dev)			      \
+	for (res = ((spmi_dev)->dev_node ? &(spmi_dev)->dev_node[0] : NULL);  \
+	     (res - (spmi_dev)->dev_node) < (spmi_dev)->num_dev_node; res++)
+
+extern struct resource *spmi_get_resource(struct spmi_device *dev,
+				      struct spmi_resource *node,
+				      unsigned int type, unsigned int res_num);
+
+struct resource *spmi_get_resource_byname(struct spmi_device *dev,
+					  struct spmi_resource *node,
+					  unsigned int type,
+					  const char *name);
+
+extern int spmi_get_irq(struct spmi_device *dev, struct spmi_resource *node,
+						 unsigned int res_num);
+
+extern int spmi_get_irq_byname(struct spmi_device *dev,
+			       struct spmi_resource *node, const char *name);
+
+/**
+ * spmi_get_node_name - return device name for spmi node
+ * @dev: spmi device handle
+ *
+ * Get the primary node name of a spmi_device coresponding with
+ * with the 'label' binding.
+ *
+ * Returns NULL if no primary dev name has been assigned to this spmi_device.
+ */
+static inline const char *spmi_get_primary_dev_name(struct spmi_device *dev)
+{
+	if (dev->res.label)
+		return dev->res.label;
+	return NULL;
+}
+
+struct spmi_resource *spmi_get_dev_container_byname(struct spmi_device *dev,
+						    const char *label);
 #endif
diff --git a/include/linux/test-iosched.h b/include/linux/test-iosched.h
new file mode 100644
index 0000000..8054409
--- /dev/null
+++ b/include/linux/test-iosched.h
@@ -0,0 +1,233 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * The test scheduler allows to test the block device by dispatching
+ * specific requests according to the test case and declare PASS/FAIL
+ * according to the requests completion error code.
+ * Each test is exposed via debugfs and can be triggered by writing to
+ * the debugfs file.
+ *
+ */
+
+#ifndef _LINUX_TEST_IOSCHED_H
+#define _LINUX_TEST_IOSCHED_H
+
+/*
+ * Patterns definitions for read/write requests data
+ */
+#define TEST_PATTERN_SEQUENTIAL	-1
+#define TEST_PATTERN_5A		0x5A5A5A5A
+#define TEST_PATTERN_FF		0xFFFFFFFF
+#define TEST_NO_PATTERN		0xDEADBEEF
+#define BIO_U32_SIZE 1024
+
+struct test_data;
+
+typedef int (prepare_test_fn) (struct test_data *);
+typedef int (run_test_fn) (struct test_data *);
+typedef int (check_test_result_fn) (struct test_data *);
+typedef int (post_test_fn) (struct test_data *);
+typedef char* (get_test_case_str_fn) (struct test_data *);
+typedef void (blk_dev_test_init_fn) (void);
+typedef void (blk_dev_test_exit_fn) (void);
+
+/**
+ * enum test_state - defines the state of the test
+ */
+enum test_state {
+	TEST_IDLE,
+	TEST_RUNNING,
+	TEST_COMPLETED,
+};
+
+/**
+ * enum test_results - defines the success orfailure of the test
+ */
+enum test_results {
+	TEST_NO_RESULT,
+	TEST_FAILED,
+	TEST_PASSED,
+	TEST_NOT_SUPPORTED,
+};
+
+/**
+ * enum req_unique_type - defines a unique request type
+ */
+enum req_unique_type {
+	REQ_UNIQUE_NONE,
+	REQ_UNIQUE_DISCARD,
+	REQ_UNIQUE_FLUSH,
+};
+
+/**
+ * struct test_debug - debugfs directories
+ * @debug_root:		The test-iosched debugfs root directory
+ * @debug_utils_root:	test-iosched debugfs utils root
+ *			directory
+ * @debug_tests_root:	test-iosched debugfs tests root
+ *			directory
+ * @debug_test_result:	Exposes the test result to the user
+ *			space
+ * @start_sector:	The start sector for read/write requests
+ */
+struct test_debug {
+	struct dentry *debug_root;
+	struct dentry *debug_utils_root;
+	struct dentry *debug_tests_root;
+	struct dentry *debug_test_result;
+	struct dentry *start_sector;
+};
+
+/**
+ * struct test_request - defines a test request
+ * @queuelist:		The test requests list
+ * @bios_buffer:	Write/read requests data buffer
+ * @buf_size:		Write/read requests data buffer size (in
+ *			bytes)
+ * @rq:			A block request, to be dispatched
+ * @req_completed:	A flag to indicate if the request was
+ *			completed
+ * @req_result:		Keeps the error code received in the
+ *			request completion callback
+ * @is_err_expected:	A flag to indicate if the request should
+ *			fail
+ * @wr_rd_data_pattern:	A pattern written to the write data
+ *			buffer. Can be used in read requests to
+ *			verify the data
+ * @req_id:		A unique ID to identify a test request
+ *			to ease the debugging of the test cases
+ */
+struct test_request {
+	struct list_head queuelist;
+	unsigned int *bios_buffer;
+	int buf_size;
+	struct request *rq;
+	bool req_completed;
+	int req_result;
+	int is_err_expected;
+	int wr_rd_data_pattern;
+	int req_id;
+};
+
+/**
+ * struct test_info - specific test information
+ * @testcase:		The current running test case
+ * @timeout_msec:	Test specific test timeout
+ * @buf_size:		Write/read requests data buffer size (in
+ *			bytes)
+ * @prepare_test_fn:	Test specific test preparation callback
+ * @run_test_fn:	Test specific test running callback
+ * @check_test_result_fn: Test specific test result checking
+ *			callback
+ * @get_test_case_str_fn: Test specific function to get the test name
+ * @data:		Test specific private data
+ */
+struct test_info {
+	int testcase;
+	unsigned timeout_msec;
+	prepare_test_fn *prepare_test_fn;
+	run_test_fn *run_test_fn;
+	check_test_result_fn *check_test_result_fn;
+	post_test_fn *post_test_fn;
+	get_test_case_str_fn *get_test_case_str_fn;
+	void *data;
+};
+
+/**
+ * struct blk_dev_test_type - identifies block device test
+ * @list:	list head pointer
+ * @init_fn:	block device test init callback
+ * @exit_fn:	block device test exit callback
+ */
+struct blk_dev_test_type {
+	struct list_head list;
+	blk_dev_test_init_fn *init_fn;
+	blk_dev_test_exit_fn *exit_fn;
+};
+
+/**
+ * struct test_data - global test iosched data
+ * @queue:		The test IO scheduler requests list
+ * @test_queue:		The test requests list
+ * @next_req:		Points to the next request to be
+ *			dispatched from the test requests list
+ * @wait_q:		A wait queue for waiting for the test
+ *			requests completion
+ * @test_state:		Indicates if there is a running test.
+ *			Used for dispatch function
+ * @test_result:	Indicates if the test passed or failed
+ * @debug:		The test debugfs entries
+ * @req_q:		The block layer request queue
+ * @num_of_write_bios:	The number of write BIOs added to the test requests.
+ *			Used to calcualte the sector number of
+ *			new BIOs.
+ * @start_sector:	The address of the first sector that can
+ *			be accessed by the test
+ * @timeout_timer:	A timer to verify test completion in
+ *			case of non-completed requests
+ * @wr_rd_next_req_id:	A unique ID to identify WRITE/READ
+ *			request to ease the debugging of the
+ *			test cases
+ * @unique_next_req_id:	A unique ID to identify
+ *			FLUSH/DISCARD/SANITIZE request to ease
+ *			the debugging of the test cases
+ * @lock:		A lock to verify running a single test
+ *			at a time
+ * @test_info:		A specific test data to be set by the
+ *			test invokation function
+ * @ignore_round:	A boolean variable indicating that a
+ *			test round was disturbed by an external
+ *			flush request, therefore disqualifying
+ *			the results
+ */
+struct test_data {
+	struct list_head queue;
+	struct list_head test_queue;
+	struct test_request *next_req;
+	wait_queue_head_t wait_q;
+	enum test_state test_state;
+	enum test_results test_result;
+	struct test_debug debug;
+	struct request_queue *req_q;
+	int num_of_write_bios;
+	u32 start_sector;
+	struct timer_list timeout_timer;
+	int wr_rd_next_req_id;
+	int unique_next_req_id;
+	spinlock_t lock;
+	struct test_info test_info;
+	bool fs_wr_reqs_during_test;
+	bool ignore_round;
+};
+
+extern int test_iosched_start_test(struct test_info *t_info);
+extern void test_iosched_mark_test_completion(void);
+extern int test_iosched_add_unique_test_req(int is_err_expcted,
+		enum req_unique_type req_unique,
+		int start_sec, int nr_sects, rq_end_io_fn *end_req_io);
+extern int test_iosched_add_wr_rd_test_req(int is_err_expcted,
+	      int direction, int start_sec,
+	      int num_bios, int pattern, rq_end_io_fn *end_req_io);
+
+extern struct dentry *test_iosched_get_debugfs_tests_root(void);
+extern struct dentry *test_iosched_get_debugfs_utils_root(void);
+
+extern struct request_queue *test_iosched_get_req_queue(void);
+
+extern void test_iosched_set_test_result(int);
+
+void test_iosched_set_ignore_round(bool ignore_round);
+
+void test_iosched_register(struct blk_dev_test_type *bdt);
+
+void test_iosched_unregister(struct blk_dev_test_type *bdt);
+
+#endif /* _LINUX_TEST_IOSCHED_H */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index d9ec332..eabe4e8 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -212,6 +212,7 @@
 #define	HCD_MEMORY	0x0001		/* HC regs use memory (else I/O) */
 #define	HCD_LOCAL_MEM	0x0002		/* HC needs local memory */
 #define	HCD_SHARED	0x0004		/* Two (or more) usb_hcds share HW */
+#define	HCD_OLD_ENUM	0x0008		/* HC supports short enumeration */
 #define	HCD_USB11	0x0010		/* USB 1.1 */
 #define	HCD_USB2	0x0020		/* USB 2.0 */
 #define	HCD_USB3	0x0040		/* USB 3.0 */
@@ -348,6 +349,8 @@
 	/* to log completion events*/
 	void	(*log_urb_complete)(struct urb *urb, char * event,
 			unsigned extra);
+	void	(*enable_ulpi_control)(struct usb_hcd *hcd, u32 linestate);
+	void	(*disable_ulpi_control)(struct usb_hcd *hcd);
 };
 
 extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h
index 320ac8b..3308243 100644
--- a/include/media/msm_camera.h
+++ b/include/media/msm_camera.h
@@ -446,10 +446,12 @@
 #define CMD_VFE_BUFFER_RELEASE 51
 #define CMD_VFE_PROCESS_IRQ 52
 
-#define CMD_AXI_CFG_PRIM		0xF1
-#define CMD_AXI_CFG_PRIM_ALL_CHNLS	0xF2
-#define CMD_AXI_CFG_SEC			0xF4
-#define CMD_AXI_CFG_SEC_ALL_CHNLS	0xF8
+#define CMD_AXI_CFG_PRIM		0xc1
+#define CMD_AXI_CFG_PRIM_ALL_CHNLS	0xc2
+#define CMD_AXI_CFG_SEC			0xc4
+#define CMD_AXI_CFG_SEC_ALL_CHNLS	0xc8
+#define CMD_AXI_CFG_TERT1		0xd0
+
 
 #define CMD_AXI_START  0xE1
 #define CMD_AXI_STOP   0xE2
@@ -549,10 +551,11 @@
 #define OUTPUT_ZSL_ALL_CHNLS 10
 #define LAST_AXI_OUTPUT_MODE_ENUM = OUTPUT_ZSL_ALL_CHNLS
 
-#define OUTPUT_PRIM		0xF1
-#define OUTPUT_PRIM_ALL_CHNLS	0xF2
-#define OUTPUT_SEC		0xF4
-#define OUTPUT_SEC_ALL_CHNLS	0xF8
+#define OUTPUT_PRIM		0xC1
+#define OUTPUT_PRIM_ALL_CHNLS	0xC2
+#define OUTPUT_SEC		0xC4
+#define OUTPUT_SEC_ALL_CHNLS	0xC8
+#define OUTPUT_TERT1		0xD0
 
 
 #define MSM_FRAME_PREV_1	0
diff --git a/include/media/msm_isp.h b/include/media/msm_isp.h
index 333d0df..93f6c8b 100644
--- a/include/media/msm_isp.h
+++ b/include/media/msm_isp.h
@@ -59,6 +59,8 @@
 #define MSG_ID_OUTPUT_PRIMARY           40
 #define MSG_ID_OUTPUT_SECONDARY         41
 #define MSG_ID_STATS_COMPOSITE          42
+#define MSG_ID_OUTPUT_TERTIARY1         43
+
 
 /* ISP command IDs */
 #define VFE_CMD_DUMMY_0                                 0
@@ -326,6 +328,9 @@
 #define VFE_OUTPUTS_RAW			BIT(8)
 #define VFE_OUTPUTS_JPEG_AND_THUMB	BIT(9)
 #define VFE_OUTPUTS_THUMB_AND_JPEG	BIT(10)
+#define VFE_OUTPUTS_RDI0	BIT(11)
+
+
 
 struct msm_frame_info {
 	uint32_t image_mode;
diff --git a/include/media/radio-iris.h b/include/media/radio-iris.h
index b5e8f2e..25a1d84 100644
--- a/include/media/radio-iris.h
+++ b/include/media/radio-iris.h
@@ -489,6 +489,29 @@
 	__u8    in_det_out;
 } __packed;
 
+#define CLKSPURID_INDEX0	0
+#define CLKSPURID_INDEX1	5
+#define CLKSPURID_INDEX2	10
+#define CLKSPURID_INDEX3	15
+#define CLKSPURID_INDEX4	20
+#define CLKSPURID_INDEX5	25
+
+#define MAX_SPUR_FREQ_LIMIT	30
+#define CKK_SPUR		0x3B
+#define SPUR_DATA_SIZE		0x4
+#define SPUR_ENTRIES_PER_ID	0x5
+
+#define COMPUTE_SPUR(val)         ((((val) - (76000)) / (50)))
+#define GET_FREQ(val, bit)        ((bit == 1) ? ((val) >> 8) : ((val) & 0xFF))
+#define GET_SPUR_ENTRY_LEVEL(val) ((val) / (5))
+
+struct hci_fm_spur_data {
+	__u32	freq[MAX_SPUR_FREQ_LIMIT];
+	__s8	rmssi[MAX_SPUR_FREQ_LIMIT];
+	__u8	enable[MAX_SPUR_FREQ_LIMIT];
+} __packed;
+
+
 /* HCI dev events */
 #define RADIO_HCI_DEV_REG			1
 #define RADIO_HCI_DEV_WRITE			2
@@ -572,6 +595,10 @@
 	V4L2_CID_PRIVATE_INTF_HIGH_THRESHOLD,
 	V4L2_CID_PRIVATE_SINR_THRESHOLD,
 	V4L2_CID_PRIVATE_SINR_SAMPLES,
+	V4L2_CID_PRIVATE_SPUR_FREQ,
+	V4L2_CID_PRIVATE_SPUR_FREQ_RMSSI,
+	V4L2_CID_PRIVATE_SPUR_SELECTION,
+	V4L2_CID_PRIVATE_UPDATE_SPUR_TABLE,
 
 	/*using private CIDs under userclass*/
 	V4L2_CID_PRIVATE_IRIS_READ_DEFAULT = 0x00980928,
@@ -680,6 +707,14 @@
 	RDS_AF_JUMP,
 };
 
+enum spur_entry_levels {
+	ENTRY_0,
+	ENTRY_1,
+	ENTRY_2,
+	ENTRY_3,
+	ENTRY_4,
+	ENTRY_5,
+};
 
 /* Band limits */
 #define REGION_US_EU_BAND_LOW              87500
@@ -774,6 +809,7 @@
 #define RDS_SYNC_INTR   (1 << 1)
 #define AUDIO_CTRL_INTR (1 << 2)
 #define AF_JUMP_ENABLE  (1 << 4)
+
 int hci_def_data_read(struct hci_fm_def_data_rd_req *arg,
 	struct radio_hci_dev *hdev);
 int hci_def_data_write(struct hci_fm_def_data_wr_req *arg,
diff --git a/include/media/tavarua.h b/include/media/tavarua.h
index 9943287..adbdada 100644
--- a/include/media/tavarua.h
+++ b/include/media/tavarua.h
@@ -395,6 +395,22 @@
 
 #define	FM_TX_PWR_LVL_0		0 /* Lowest power lvl that can be set for Tx */
 #define	FM_TX_PWR_LVL_MAX	7 /* Max power lvl for Tx */
+
+/* Tone Generator control value */
+#define TONE_GEN_CTRL_BYTE		 0x00
+#define TONE_CHANNEL_EN_AND_SCALING_BYTE 0x01
+#define TONE_LEFT_FREQ_BYTE		 0x02
+#define TONE_RIGHT_FREQ_BYTE		 0x03
+#define TONE_LEFT_PHASE			 0x04
+#define TONE_RIGHT_PHASE		 0x05
+
+#define TONE_LEFT_CH_ENABLED		 0x01
+#define TONE_RIGHT_CH_ENABLED		 0x02
+#define TONE_LEFT_RIGHT_CH_ENABLED	 (TONE_LEFT_CH_ENABLED\
+						 | TONE_RIGHT_CH_ENABLED)
+
+#define TONE_SCALING_SHIFT		 0x02
+
 /* Transfer */
 enum tavarua_xfr_ctrl_t {
 	RDS_PS_0 = 0x01,
@@ -453,6 +469,7 @@
 	PHY_CONFIG,
 	PHY_TXBLOCK,
 	PHY_TCB,
+	XFR_EXT,
 	XFR_PEEK_MODE = 0x40,
 	XFR_POKE_MODE = 0xC0,
 	TAVARUA_XFR_CTRL_MAX
@@ -503,6 +520,7 @@
 	TWELVE_BYTE,
 	THIRTEEN_BYTE
 };
+
 #define XFR_READ		(0)
 #define XFR_WRITE		(1)
 #define XFR_MODE_OFFSET		(0)
@@ -531,4 +549,28 @@
 	__u8   data[XFR_REG_NUM];
 } __packed;
 
+enum Internal_tone_gen_vals {
+	ONE_KHZ_LR_EQUA_0DBFS = 1,
+	ONE_KHZ_LEFTONLY_EQUA_0DBFS,
+	ONE_KHZ_RIGHTONLY_EQUA_0DBFS,
+	ONE_KHZ_LR_EQUA_l8DBFS,
+	FIFTEEN_KHZ_LR_EQUA_l8DBFS
+};
+
+enum Tone_scaling_indexes {
+	TONE_SCALE_IND_0,
+	TONE_SCALE_IND_1,
+	TONE_SCALE_IND_2,
+	TONE_SCALE_IND_3,
+	TONE_SCALE_IND_4,
+	TONE_SCALE_IND_5,
+	TONE_SCALE_IND_6,
+	TONE_SCALE_IND_7,
+	TONE_SCALE_IND_8,
+	TONE_SCALE_IND_9,
+	TONE_SCALE_IND_10,
+	TONE_SCALE_IND_11,
+	TONE_SCALE_IND_12
+};
+
 #endif /* __LINUX_TAVARUA_H */
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 695fea9..8e8778a 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -2322,6 +2322,9 @@
 } __packed;
 
 #define ASM_END_POINT_DEVICE_MATRIX     0
+
+#define PCM_CHANNEL_NULL 0
+
 /* Front left channel. */
 #define PCM_CHANNEL_FL    1
 
@@ -2444,7 +2447,7 @@
 } __packed;
 
 struct asm_stream_cmd_set_encdec_param {
-		u32                  param_id;
+	u32                  param_id;
 	/* ID of the parameter. */
 
 	u32                  param_size;
@@ -2573,9 +2576,6 @@
  * - 6 -- 5.1 content
  */
 
-	u16          reserved;
-	/* Reserved. Clients must set this field to zero. */
-
 	u16          total_size_of_PCE_bits;
 /* greater or equal to zero. * -In case of RAW formats and
  * channel config = 0 (PCE), client can send * the bit stream
@@ -2986,6 +2986,8 @@
 	u16          enc_options;
 	/* Options used during encoding. */
 
+	u16          reserved;
+
 } __packed;
 
 #define ASM_MEDIA_FMT_WMA_V8                    0x00010D91
@@ -4495,7 +4497,6 @@
 struct asm_dec_out_chan_map_param {
 	struct apr_hdr hdr;
 	struct asm_stream_cmd_set_encdec_param  encdec;
-	struct asm_enc_cfg_blk_param_v2	encblk;
 	u32                 num_channels;
 /* Number of decoder output channels.
  * Supported values: 0 to #MAX_CHAN_MAP_CHANNELS
diff --git a/include/sound/compress_params.h b/include/sound/compress_params.h
index 5aa7b09..9c7a1ea 100644
--- a/include/sound/compress_params.h
+++ b/include/sound/compress_params.h
@@ -241,6 +241,8 @@
 	__u32 bits_per_sample;
 	__u32 channelmask;
 	__u32 encodeopt;
+	__u32 encodeopt1;
+	__u32 encodeopt2;
 };
 
 
diff --git a/include/sound/q6adm.h b/include/sound/q6adm.h
index 56594d4..8e15955 100644
--- a/include/sound/q6adm.h
+++ b/include/sound/q6adm.h
@@ -43,6 +43,8 @@
 int adm_connect_afe_port(int mode, int session_id, int port_id);
 int adm_disconnect_afe_port(int mode, int session_id, int port_id);
 
+void adm_ec_ref_rx_id(int  port_id);
+
 #ifdef CONFIG_RTAC
 int adm_get_copp_id(int port_id);
 #endif
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index 7ef15ac..2a555b2 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -64,8 +64,11 @@
 /* Enable Sample_Rate/Channel_Mode notification event from Decoder */
 #define SR_CM_NOTIFY_ENABLE	0x0004
 
-#define ASYNC_IO_MODE	0x0002
 #define SYNC_IO_MODE	0x0001
+#define ASYNC_IO_MODE	0x0002
+#define NT_MODE        0x0400
+
+
 #define NO_TIMESTAMP    0xFF00
 #define SET_TIMESTAMP   0x0000
 
@@ -230,6 +233,9 @@
 int q6asm_set_encdec_chan_map(struct audio_client *ac,
 		uint32_t num_channels);
 
+int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac,
+			uint32_t rate, uint32_t channels);
+
 int q6asm_enable_sbrps(struct audio_client *ac,
 			uint32_t sbr_ps);
 
diff --git a/include/sound/q6asm.h b/include/sound/q6asm.h
index ee90797..1e647a2 100644
--- a/include/sound/q6asm.h
+++ b/include/sound/q6asm.h
@@ -239,6 +239,9 @@
 int q6asm_enc_cfg_blk_pcm(struct audio_client *ac,
 			uint32_t rate, uint32_t channels);
 
+int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac,
+			uint32_t rate, uint32_t channels);
+
 int q6asm_enc_cfg_blk_multi_ch_pcm(struct audio_client *ac,
 			uint32_t rate, uint32_t channels);
 
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 0aa96d3..0e0ba5f 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1,312 +1,780 @@
+#include <linux/debugfs.h>
+#include <linux/hardirq.h>
+#include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/irqdesc.h>
 #include <linux/irqdomain.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/seq_file.h>
 #include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/fs.h>
+
+#define IRQ_DOMAIN_MAP_LEGACY 0 /* driver allocated fixed range of irqs.
+				 * ie. legacy 8259, gets irqs 1..15 */
+#define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */
+#define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */
+#define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */
 
 static LIST_HEAD(irq_domain_list);
 static DEFINE_MUTEX(irq_domain_mutex);
 
-/**
- * irq_domain_add() - Register an irq_domain
- * @domain: ptr to initialized irq_domain structure
- *
- * Adds a irq_domain structure.  The irq_domain must at a minimum be
- * initialized with an ops structure pointer, and either a ->to_irq hook or
- * a valid irq_base value.  The irq range must be mutually exclusive with
- * domains already registered. Everything else is optional.
- */
-int irq_domain_add(struct irq_domain *domain)
-{
-	struct irq_domain *curr;
-	uint32_t d_highirq = domain->irq_base + domain->nr_irq - 1;
+static DEFINE_MUTEX(revmap_trees_mutex);
+static struct irq_domain *irq_default_domain;
 
-	if (!domain->nr_irq)
-		return -EINVAL;
+/**
+ * irq_domain_alloc() - Allocate a new irq_domain data structure
+ * @of_node: optional device-tree node of the interrupt controller
+ * @revmap_type: type of reverse mapping to use
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Allocates and initialize and irq_domain structure.  Caller is expected to
+ * register allocated irq_domain with irq_domain_register().  Returns pointer
+ * to IRQ domain, or NULL on failure.
+ */
+static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
+					   unsigned int revmap_type,
+					   const struct irq_domain_ops *ops,
+					   void *host_data)
+{
+	struct irq_domain *domain;
+
+	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+	if (WARN_ON(!domain))
+		return NULL;
+
+	/* Fill structure */
+	domain->revmap_type = revmap_type;
+	domain->ops = ops;
+	domain->host_data = host_data;
+	domain->of_node = of_node_get(of_node);
+
+	return domain;
+}
+
+static void irq_domain_add(struct irq_domain *domain)
+{
+	mutex_lock(&irq_domain_mutex);
+	list_add(&domain->link, &irq_domain_list);
+	mutex_unlock(&irq_domain_mutex);
+	pr_debug("irq: Allocated domain of type %d @0x%p\n",
+		 domain->revmap_type, domain);
+}
+
+static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
+					     irq_hw_number_t hwirq)
+{
+	irq_hw_number_t first_hwirq = domain->revmap_data.legacy.first_hwirq;
+	int size = domain->revmap_data.legacy.size;
+
+	if (WARN_ON(hwirq < first_hwirq || hwirq >= first_hwirq + size))
+		return 0;
+	return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq;
+}
+
+/**
+ * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
+ * @of_node: pointer to interrupt controller's device tree node.
+ * @size: total number of irqs in legacy mapping
+ * @first_irq: first number of irq block assigned to the domain
+ * @first_hwirq: first hwirq number to use for the translation. Should normally
+ *               be '0', but a positive integer can be used if the effective
+ *               hwirqs numbering does not begin at zero.
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Note: the map() callback will be called before this function returns
+ * for all legacy interrupts except 0 (which is always the invalid irq for
+ * a legacy controller).
+ */
+struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
+					 unsigned int size,
+					 unsigned int first_irq,
+					 irq_hw_number_t first_hwirq,
+					 const struct irq_domain_ops *ops,
+					 void *host_data)
+{
+	struct irq_domain *domain;
+	unsigned int i;
+
+	domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data);
+	if (!domain)
+		return NULL;
+
+	domain->revmap_data.legacy.first_irq = first_irq;
+	domain->revmap_data.legacy.first_hwirq = first_hwirq;
+	domain->revmap_data.legacy.size = size;
 
 	mutex_lock(&irq_domain_mutex);
-	/* insert in ascending order of domain->irq_base */
-	list_for_each_entry(curr, &irq_domain_list, list) {
-		uint32_t c_highirq = curr->irq_base + curr->nr_irq - 1;
-		if (domain->irq_base < curr->irq_base &&
-		    d_highirq < curr->irq_base) {
-			break;
-		}
-		if (d_highirq <= c_highirq) {
+	/* Verify that all the irqs are available */
+	for (i = 0; i < size; i++) {
+		int irq = first_irq + i;
+		struct irq_data *irq_data = irq_get_irq_data(irq);
+
+		if (WARN_ON(!irq_data || irq_data->domain)) {
 			mutex_unlock(&irq_domain_mutex);
-			return -EINVAL;
+			of_node_put(domain->of_node);
+			kfree(domain);
+			return NULL;
 		}
 	}
-	list_add_tail(&domain->list, &curr->list);
+
+	/* Claim all of the irqs before registering a legacy domain */
+	for (i = 0; i < size; i++) {
+		struct irq_data *irq_data = irq_get_irq_data(first_irq + i);
+		irq_data->hwirq = first_hwirq + i;
+		irq_data->domain = domain;
+	}
 	mutex_unlock(&irq_domain_mutex);
 
+	for (i = 0; i < size; i++) {
+		int irq = first_irq + i;
+		int hwirq = first_hwirq + i;
+
+		/* IRQ0 gets ignored */
+		if (!irq)
+			continue;
+
+		/* Legacy flags are left to default at this point,
+		 * one can then use irq_create_mapping() to
+		 * explicitly change them
+		 */
+		ops->map(domain, irq, hwirq);
+
+		/* Clear norequest flags */
+		irq_clear_status_flags(irq, IRQ_NOREQUEST);
+	}
+
+	irq_domain_add(domain);
+	return domain;
+}
+
+/**
+ * irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain.
+ * @of_node: pointer to interrupt controller's device tree node.
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ */
+struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
+					 unsigned int size,
+					 const struct irq_domain_ops *ops,
+					 void *host_data)
+{
+	struct irq_domain *domain;
+	unsigned int *revmap;
+
+	revmap = kzalloc(sizeof(*revmap) * size, GFP_KERNEL);
+	if (WARN_ON(!revmap))
+		return NULL;
+
+	domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data);
+	if (!domain) {
+		kfree(revmap);
+		return NULL;
+	}
+	domain->revmap_data.linear.size = size;
+	domain->revmap_data.linear.revmap = revmap;
+	irq_domain_add(domain);
+	return domain;
+}
+
+struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
+					 unsigned int max_irq,
+					 const struct irq_domain_ops *ops,
+					 void *host_data)
+{
+	struct irq_domain *domain = irq_domain_alloc(of_node,
+					IRQ_DOMAIN_MAP_NOMAP, ops, host_data);
+	if (domain) {
+		domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
+		irq_domain_add(domain);
+	}
+	return domain;
+}
+
+/**
+ * irq_domain_add_tree()
+ * @of_node: pointer to interrupt controller's device tree node.
+ * @ops: map/unmap domain callbacks
+ *
+ * Note: The radix tree will be allocated later during boot automatically
+ * (the reverse mapping will use the slow path until that happens).
+ */
+struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
+					 const struct irq_domain_ops *ops,
+					 void *host_data)
+{
+	struct irq_domain *domain = irq_domain_alloc(of_node,
+					IRQ_DOMAIN_MAP_TREE, ops, host_data);
+	if (domain) {
+		INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
+		irq_domain_add(domain);
+	}
+	return domain;
+}
+
+/**
+ * irq_find_host() - Locates a domain for a given device node
+ * @node: device-tree node of the interrupt controller
+ */
+struct irq_domain *irq_find_host(struct device_node *node)
+{
+	struct irq_domain *h, *found = NULL;
+	int rc;
+
+	/* We might want to match the legacy controller last since
+	 * it might potentially be set to match all interrupts in
+	 * the absence of a device node. This isn't a problem so far
+	 * yet though...
+	 */
+	mutex_lock(&irq_domain_mutex);
+	list_for_each_entry(h, &irq_domain_list, link) {
+		if (h->ops->match)
+			rc = h->ops->match(h, node);
+		else
+			rc = (h->of_node != NULL) && (h->of_node == node);
+
+		if (rc) {
+			found = h;
+			break;
+		}
+	}
+	mutex_unlock(&irq_domain_mutex);
+	return found;
+}
+EXPORT_SYMBOL_GPL(irq_find_host);
+
+/**
+ * irq_set_default_host() - Set a "default" irq domain
+ * @domain: default domain pointer
+ *
+ * For convenience, it's possible to set a "default" domain that will be used
+ * whenever NULL is passed to irq_create_mapping(). It makes life easier for
+ * platforms that want to manipulate a few hard coded interrupt numbers that
+ * aren't properly represented in the device-tree.
+ */
+void irq_set_default_host(struct irq_domain *domain)
+{
+	pr_debug("irq: Default domain set to @0x%p\n", domain);
+
+	irq_default_domain = domain;
+}
+
+static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
+			    irq_hw_number_t hwirq)
+{
+	struct irq_data *irq_data = irq_get_irq_data(virq);
+
+	irq_data->hwirq = hwirq;
+	irq_data->domain = domain;
+	if (domain->ops->map(domain, virq, hwirq)) {
+		pr_debug("irq: -> mapping failed, freeing\n");
+		irq_data->domain = NULL;
+		irq_data->hwirq = 0;
+		return -1;
+	}
+
+	irq_clear_status_flags(virq, IRQ_NOREQUEST);
+
 	return 0;
 }
 
 /**
- * irq_domain_register() - Register an entire irq_domain
- * @domain: ptr to initialized irq_domain structure
+ * irq_create_direct_mapping() - Allocate an irq for direct mapping
+ * @domain: domain to allocate the irq for or NULL for default domain
  *
- * Registers the entire irq_domain.  The irq_domain must at a minimum be
- * initialized with an ops structure pointer, and either a ->to_irq hook or
- * a valid irq_base value.  Everything else is optional.
+ * This routine is used for irq controllers which can choose the hardware
+ * interrupt numbers they generate. In such a case it's simplest to use
+ * the linux irq as the hardware interrupt number.
  */
-void irq_domain_register(struct irq_domain *domain)
+unsigned int irq_create_direct_mapping(struct irq_domain *domain)
 {
-	struct irq_data *d;
-	int hwirq, irq;
+	unsigned int virq;
 
-	irq_domain_for_each_irq(domain, hwirq, irq) {
-		d = irq_get_irq_data(irq);
-		if (!d) {
-			WARN(1, "error: assigning domain to non existant irq_desc");
-			return;
-		}
-		if (d->domain) {
-			/* things are broken; just report, don't clean up */
-			WARN(1, "error: irq_desc already assigned to a domain");
-			return;
-		}
-		d->domain = domain;
-		d->hwirq = hwirq;
+	if (domain == NULL)
+		domain = irq_default_domain;
+
+	BUG_ON(domain == NULL);
+	WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP);
+
+	virq = irq_alloc_desc_from(1, 0);
+	if (!virq) {
+		pr_debug("irq: create_direct virq allocation failed\n");
+		return 0;
 	}
+	if (virq >= domain->revmap_data.nomap.max_irq) {
+		pr_err("ERROR: no free irqs available below %i maximum\n",
+			domain->revmap_data.nomap.max_irq);
+		irq_free_desc(virq);
+		return 0;
+	}
+	pr_debug("irq: create_direct obtained virq %d\n", virq);
+
+	if (irq_setup_virq(domain, virq, virq)) {
+		irq_free_desc(virq);
+		return 0;
+	}
+
+	return virq;
 }
 
 /**
- * irq_domain_register_irq() - Register an irq_domain
- * @domain: ptr to initialized irq_domain structure
- * @hwirq: irq_domain hwirq to register
+ * irq_create_mapping() - Map a hardware interrupt into linux irq space
+ * @domain: domain owning this hardware interrupt or NULL for default domain
+ * @hwirq: hardware irq number in that domain space
  *
- * Registers a specific hwirq within the irq_domain.  The irq_domain
- * must at a minimum be initialized with an ops structure pointer, and
- * either a ->to_irq hook or a valid irq_base value.  Everything else is
- * optional.
+ * Only one mapping per hardware interrupt is permitted. Returns a linux
+ * irq number.
+ * If the sense/trigger is to be specified, set_irq_type() should be called
+ * on the number returned from that call.
  */
-void irq_domain_register_irq(struct irq_domain *domain, int hwirq)
+unsigned int irq_create_mapping(struct irq_domain *domain,
+				irq_hw_number_t hwirq)
 {
-	struct irq_data *d;
+	unsigned int hint;
+	int virq;
 
-	d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
-	if (!d) {
-		WARN(1, "error: assigning domain to non existant irq_desc");
-		return;
+	pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
+
+	/* Look for default domain if nececssary */
+	if (domain == NULL)
+		domain = irq_default_domain;
+	if (domain == NULL) {
+		printk(KERN_WARNING "irq_create_mapping called for"
+		       " NULL domain, hwirq=%lx\n", hwirq);
+		WARN_ON(1);
+		return 0;
 	}
-	if (d->domain) {
-		/* things are broken; just report, don't clean up */
-		WARN(1, "error: irq_desc already assigned to a domain");
-		return;
+	pr_debug("irq: -> using domain @%p\n", domain);
+
+	/* Check if mapping already exists */
+	virq = irq_find_mapping(domain, hwirq);
+	if (virq) {
+		pr_debug("irq: -> existing mapping on virq %d\n", virq);
+		return virq;
 	}
-	d->domain = domain;
-	d->hwirq = hwirq;
-}
 
-/**
- * irq_domain_del() - Removes a irq_domain from the system
- * @domain: ptr to registered irq_domain.
- */
-void irq_domain_del(struct irq_domain *domain)
-{
-	mutex_lock(&irq_domain_mutex);
-	list_del(&domain->list);
-	mutex_unlock(&irq_domain_mutex);
-}
+	/* Get a virtual interrupt number */
+	if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
+		return irq_domain_legacy_revmap(domain, hwirq);
 
-/**
- * irq_domain_unregister() - Unregister an irq_domain
- * @domain: ptr to registered irq_domain.
- */
-void irq_domain_unregister(struct irq_domain *domain)
-{
-	struct irq_data *d;
-	int hwirq, irq;
-
-	/* Clear the irq_domain assignments */
-	irq_domain_for_each_irq(domain, hwirq, irq) {
-		d = irq_get_irq_data(irq);
-		d->domain = NULL;
+	/* Allocate a virtual interrupt number */
+	hint = hwirq % nr_irqs;
+	if (hint == 0)
+		hint++;
+	virq = irq_alloc_desc_from(hint, 0);
+	if (virq <= 0)
+		virq = irq_alloc_desc_from(1, 0);
+	if (virq <= 0) {
+		pr_debug("irq: -> virq allocation failed\n");
+		return 0;
 	}
-}
 
-/**
- * irq_domain_unregister_irq() - Unregister a hwirq within a irq_domain
- * @domain: ptr to registered irq_domain.
- * @hwirq: irq_domain hwirq to unregister.
- */
-void irq_domain_unregister_irq(struct irq_domain *domain, int hwirq)
-{
-	struct irq_data *d;
-
-	/* Clear the irq_domain assignment */
-	d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
-	d->domain = NULL;
-}
-
-/**
- * irq_domain_find_free_range() - Find an available irq range
- * @from: lowest logical irq number to request from
- * @cnt: number of interrupts to search for
- *
- * Finds an available logical irq range from the domains specified
- * on the system. The from parameter can be used to allocate a range
- * at least as great as the specified irq number.
- */
-int irq_domain_find_free_range(unsigned int from, unsigned int cnt)
-{
-	struct irq_domain *curr, *prev = NULL;
-
-	if (list_empty(&irq_domain_list))
-		return from;
-
-	list_for_each_entry(curr, &irq_domain_list, list) {
-		if (prev == NULL) {
-			if ((from + cnt - 1) < curr->irq_base)
-				return from;
-		} else {
-			uint32_t p_next_irq = prev->irq_base + prev->nr_irq;
-			uint32_t start_irq;
-			if (from >= curr->irq_base)
-				continue;
-			if (from < p_next_irq)
-				start_irq = p_next_irq;
-			else
-				start_irq = from;
-			if ((curr->irq_base - start_irq) >= cnt)
-				return p_next_irq;
-		}
-		prev = curr;
+	if (irq_setup_virq(domain, virq, hwirq)) {
+		if (domain->revmap_type != IRQ_DOMAIN_MAP_LEGACY)
+			irq_free_desc(virq);
+		return 0;
 	}
-	curr = list_entry(curr->list.prev, struct irq_domain, list);
 
-	return from > curr->irq_base + curr->nr_irq ?
-	       from : curr->irq_base + curr->nr_irq;
+	pr_debug("irq: irq %lu on domain %s mapped to virtual irq %u\n",
+		hwirq, domain->of_node ? domain->of_node->full_name : "null", virq);
+
+	return virq;
 }
+EXPORT_SYMBOL_GPL(irq_create_mapping);
 
-#if defined(CONFIG_OF_IRQ)
-/**
- * irq_create_of_mapping() - Map a linux irq number from a DT interrupt spec
- *
- * Used by the device tree interrupt mapping code to translate a device tree
- * interrupt specifier to a valid linux irq number.  Returns either a valid
- * linux IRQ number or 0.
- *
- * When the caller no longer need the irq number returned by this function it
- * should arrange to call irq_dispose_mapping().
- */
 unsigned int irq_create_of_mapping(struct device_node *controller,
 				   const u32 *intspec, unsigned int intsize)
 {
 	struct irq_domain *domain;
-	unsigned long hwirq;
-	unsigned int irq, type;
-	int rc = -EINVAL;
+	irq_hw_number_t hwirq;
+	unsigned int type = IRQ_TYPE_NONE;
+	unsigned int virq;
 
-	/* Find a domain which can translate the irq spec */
-	mutex_lock(&irq_domain_mutex);
-	list_for_each_entry(domain, &irq_domain_list, list) {
-		if (!domain->ops->dt_translate)
-			continue;
-
-		rc = domain->ops->dt_translate(domain, controller,
-					intspec, intsize, &hwirq, &type);
-		if (rc == 0)
-			break;
-	}
-	mutex_unlock(&irq_domain_mutex);
-
-	if (rc != 0)
+	domain = controller ? irq_find_host(controller) : irq_default_domain;
+	if (!domain) {
+#ifdef CONFIG_MIPS
+		/*
+		 * Workaround to avoid breaking interrupt controller drivers
+		 * that don't yet register an irq_domain.  This is temporary
+		 * code. ~~~gcl, Feb 24, 2012
+		 *
+		 * Scheduled for removal in Linux v3.6.  That should be enough
+		 * time.
+		 */
+		if (intsize > 0)
+			return intspec[0];
+#endif
+		printk(KERN_WARNING "irq: no irq domain found for %s !\n",
+		       controller->full_name);
 		return 0;
+	}
 
-	irq = irq_domain_to_irq(domain, hwirq);
-	if (type != IRQ_TYPE_NONE)
-		irq_set_irq_type(irq, type);
-	pr_debug("%s: mapped hwirq=%i to irq=%i, flags=%x\n",
-		 controller->full_name, (int)hwirq, irq, type);
-	return irq;
+	/* If domain has no translation, then we assume interrupt line */
+	if (domain->ops->xlate == NULL)
+		hwirq = intspec[0];
+	else {
+		if (domain->ops->xlate(domain, controller, intspec, intsize,
+				     &hwirq, &type))
+			return 0;
+	}
+
+	/* Create mapping */
+	virq = irq_create_mapping(domain, hwirq);
+	if (!virq)
+		return virq;
+
+	/* Set type if specified and different than the current one */
+	if (type != IRQ_TYPE_NONE &&
+	    type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
+		irq_set_irq_type(virq, type);
+	return virq;
 }
 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
 
 /**
- * irq_dispose_mapping() - Discard a mapping created by irq_create_of_mapping()
- * @irq: linux irq number to be discarded
- *
- * Calling this function indicates the caller no longer needs a reference to
- * the linux irq number returned by a prior call to irq_create_of_mapping().
+ * irq_dispose_mapping() - Unmap an interrupt
+ * @virq: linux irq number of the interrupt to unmap
  */
-void irq_dispose_mapping(unsigned int irq)
+void irq_dispose_mapping(unsigned int virq)
 {
-	/*
-	 * nothing yet; will be filled when support for dynamic allocation of
-	 * irq_descs is added to irq_domain
-	 */
+	struct irq_data *irq_data = irq_get_irq_data(virq);
+	struct irq_domain *domain;
+	irq_hw_number_t hwirq;
+
+	if (!virq || !irq_data)
+		return;
+
+	domain = irq_data->domain;
+	if (WARN_ON(domain == NULL))
+		return;
+
+	/* Never unmap legacy interrupts */
+	if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
+		return;
+
+	irq_set_status_flags(virq, IRQ_NOREQUEST);
+
+	/* remove chip and handler */
+	irq_set_chip_and_handler(virq, NULL, NULL);
+
+	/* Make sure it's completed */
+	synchronize_irq(virq);
+
+	/* Tell the PIC about it */
+	if (domain->ops->unmap)
+		domain->ops->unmap(domain, virq);
+	smp_mb();
+
+	/* Clear reverse map */
+	hwirq = irq_data->hwirq;
+	switch(domain->revmap_type) {
+	case IRQ_DOMAIN_MAP_LINEAR:
+		if (hwirq < domain->revmap_data.linear.size)
+			domain->revmap_data.linear.revmap[hwirq] = 0;
+		break;
+	case IRQ_DOMAIN_MAP_TREE:
+		mutex_lock(&revmap_trees_mutex);
+		radix_tree_delete(&domain->revmap_data.tree, hwirq);
+		mutex_unlock(&revmap_trees_mutex);
+		break;
+	}
+
+	irq_free_desc(virq);
 }
 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
 
-int irq_domain_simple_dt_translate(struct irq_domain *d,
-			    struct device_node *controller,
-			    const u32 *intspec, unsigned int intsize,
-			    unsigned long *out_hwirq, unsigned int *out_type)
+/**
+ * irq_find_mapping() - Find a linux irq from an hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * This is a slow path, for use by generic code. It's expected that an
+ * irq controller implementation directly calls the appropriate low level
+ * mapping function.
+ */
+unsigned int irq_find_mapping(struct irq_domain *domain,
+			      irq_hw_number_t hwirq)
 {
-	if (d->of_node != controller)
-		return -EINVAL;
-	if (intsize < 1)
-		return -EINVAL;
-	if (d->nr_irq && ((intspec[0] < d->hwirq_base) ||
-	    (intspec[0] >= d->hwirq_base + d->nr_irq)))
-		return -EINVAL;
+	unsigned int i;
+	unsigned int hint = hwirq % nr_irqs;
 
-	*out_hwirq = intspec[0];
-	*out_type = IRQ_TYPE_NONE;
-	if (intsize > 1)
-		*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+	/* Look for default domain if nececssary */
+	if (domain == NULL)
+		domain = irq_default_domain;
+	if (domain == NULL)
+		return 0;
+
+	/* legacy -> bail early */
+	if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
+		return irq_domain_legacy_revmap(domain, hwirq);
+
+	/* Slow path does a linear search of the map */
+	if (hint == 0)
+		hint = 1;
+	i = hint;
+	do {
+		struct irq_data *data = irq_get_irq_data(i);
+		if (data && (data->domain == domain) && (data->hwirq == hwirq))
+			return i;
+		i++;
+		if (i >= nr_irqs)
+			i = 1;
+	} while(i != hint);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(irq_find_mapping);
+
+/**
+ * irq_radix_revmap_lookup() - Find a linux irq from a hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * This is a fast path, for use by irq controller code that uses radix tree
+ * revmaps
+ */
+unsigned int irq_radix_revmap_lookup(struct irq_domain *domain,
+				     irq_hw_number_t hwirq)
+{
+	struct irq_data *irq_data;
+
+	if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
+		return irq_find_mapping(domain, hwirq);
+
+	/*
+	 * Freeing an irq can delete nodes along the path to
+	 * do the lookup via call_rcu.
+	 */
+	rcu_read_lock();
+	irq_data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
+	rcu_read_unlock();
+
+	/*
+	 * If found in radix tree, then fine.
+	 * Else fallback to linear lookup - this should not happen in practice
+	 * as it means that we failed to insert the node in the radix tree.
+	 */
+	return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq);
+}
+
+/**
+ * irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping.
+ * @domain: domain owning this hardware interrupt
+ * @virq: linux irq number
+ * @hwirq: hardware irq number in that domain space
+ *
+ * This is for use by irq controllers that use a radix tree reverse
+ * mapping for fast lookup.
+ */
+void irq_radix_revmap_insert(struct irq_domain *domain, unsigned int virq,
+			     irq_hw_number_t hwirq)
+{
+	struct irq_data *irq_data = irq_get_irq_data(virq);
+
+	if (WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
+		return;
+
+	if (virq) {
+		mutex_lock(&revmap_trees_mutex);
+		radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
+		mutex_unlock(&revmap_trees_mutex);
+	}
+}
+
+/**
+ * irq_linear_revmap() - Find a linux irq from a hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * This is a fast path, for use by irq controller code that uses linear
+ * revmaps. It does fallback to the slow path if the revmap doesn't exist
+ * yet and will create the revmap entry with appropriate locking
+ */
+unsigned int irq_linear_revmap(struct irq_domain *domain,
+			       irq_hw_number_t hwirq)
+{
+	unsigned int *revmap;
+
+	if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR))
+		return irq_find_mapping(domain, hwirq);
+
+	/* Check revmap bounds */
+	if (unlikely(hwirq >= domain->revmap_data.linear.size))
+		return irq_find_mapping(domain, hwirq);
+
+	/* Check if revmap was allocated */
+	revmap = domain->revmap_data.linear.revmap;
+	if (unlikely(revmap == NULL))
+		return irq_find_mapping(domain, hwirq);
+
+	/* Fill up revmap with slow path if no mapping found */
+	if (unlikely(!revmap[hwirq]))
+		revmap[hwirq] = irq_find_mapping(domain, hwirq);
+
+	return revmap[hwirq];
+}
+
+#ifdef CONFIG_IRQ_DOMAIN_DEBUG
+static int virq_debug_show(struct seq_file *m, void *private)
+{
+	unsigned long flags;
+	struct irq_desc *desc;
+	const char *p;
+	static const char none[] = "none";
+	void *data;
+	int i;
+
+	seq_printf(m, "%-5s  %-7s  %-15s  %-*s  %s\n", "irq", "hwirq",
+		      "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
+		      "domain name");
+
+	for (i = 1; i < nr_irqs; i++) {
+		desc = irq_to_desc(i);
+		if (!desc)
+			continue;
+
+		raw_spin_lock_irqsave(&desc->lock, flags);
+
+		if (desc->action && desc->action->handler) {
+			struct irq_chip *chip;
+
+			seq_printf(m, "%5d  ", i);
+			seq_printf(m, "0x%05lx  ", desc->irq_data.hwirq);
+
+			chip = irq_desc_get_chip(desc);
+			if (chip && chip->name)
+				p = chip->name;
+			else
+				p = none;
+			seq_printf(m, "%-15s  ", p);
+
+			data = irq_desc_get_chip_data(desc);
+			seq_printf(m, data ? "0x%p  " : "  %p  ", data);
+
+			if (desc->irq_data.domain && desc->irq_data.domain->of_node)
+				p = desc->irq_data.domain->of_node->full_name;
+			else
+				p = none;
+			seq_printf(m, "%s\n", p);
+		}
+
+		raw_spin_unlock_irqrestore(&desc->lock, flags);
+	}
+
+	return 0;
+}
+
+static int virq_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, virq_debug_show, inode->i_private);
+}
+
+static const struct file_operations virq_debug_fops = {
+	.open = virq_debug_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int __init irq_debugfs_init(void)
+{
+	if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
+				 NULL, &virq_debug_fops) == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+__initcall(irq_debugfs_init);
+#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
+
+int irq_domain_simple_map(struct irq_domain *d, unsigned int irq,
+			  irq_hw_number_t hwirq)
+{
 	return 0;
 }
 
 /**
- * irq_domain_create_simple() - Set up a 'simple' translation range
+ * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
+ *
+ * Device Tree IRQ specifier translation function which works with one cell
+ * bindings where the cell value maps directly to the hwirq number.
  */
-void irq_domain_add_simple(struct device_node *controller, int irq_base)
+int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
+			     const u32 *intspec, unsigned int intsize,
+			     unsigned long *out_hwirq, unsigned int *out_type)
 {
-	struct irq_domain *domain;
-	int rc;
-
-	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
-	if (!domain) {
-		WARN_ON(1);
-		return;
-	}
-
-	domain->irq_base = irq_base;
-	domain->of_node = of_node_get(controller);
-	domain->ops = &irq_domain_simple_ops;
-	rc = irq_domain_add(domain);
-	if (rc) {
-		WARN(1, "Unable to create irq domain\n");
-		return;
-	}
-	irq_domain_register(domain);
+	if (WARN_ON(intsize < 1))
+		return -EINVAL;
+	*out_hwirq = intspec[0];
+	*out_type = IRQ_TYPE_NONE;
+	return 0;
 }
-EXPORT_SYMBOL_GPL(irq_domain_add_simple);
+EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
 
+/**
+ * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
+ *
+ * Device Tree IRQ specifier translation function which works with two cell
+ * bindings where the cell values map directly to the hwirq number
+ * and linux irq flags.
+ */
+int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
+			const u32 *intspec, unsigned int intsize,
+			irq_hw_number_t *out_hwirq, unsigned int *out_type)
+{
+	if (WARN_ON(intsize < 2))
+		return -EINVAL;
+	*out_hwirq = intspec[0];
+	*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
+
+/**
+ * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
+ *
+ * Device Tree IRQ specifier translation function which works with either one
+ * or two cell bindings where the cell values map directly to the hwirq number
+ * and linux irq flags.
+ *
+ * Note: don't use this function unless your interrupt controller explicitly
+ * supports both one and two cell bindings.  For the majority of controllers
+ * the _onecell() or _twocell() variants above should be used.
+ */
+int irq_domain_xlate_onetwocell(struct irq_domain *d,
+				struct device_node *ctrlr,
+				const u32 *intspec, unsigned int intsize,
+				unsigned long *out_hwirq, unsigned int *out_type)
+{
+	if (WARN_ON(intsize < 1))
+		return -EINVAL;
+	*out_hwirq = intspec[0];
+	*out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
+
+const struct irq_domain_ops irq_domain_simple_ops = {
+	.map = irq_domain_simple_map,
+	.xlate = irq_domain_xlate_onetwocell,
+};
+EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
+
+#ifdef CONFIG_OF_IRQ
 void irq_domain_generate_simple(const struct of_device_id *match,
 				u64 phys_base, unsigned int irq_start)
 {
 	struct device_node *node;
-	pr_info("looking for phys_base=%llx, irq_start=%i\n",
+	pr_debug("looking for phys_base=%llx, irq_start=%i\n",
 		(unsigned long long) phys_base, (int) irq_start);
 	node = of_find_matching_node_by_address(NULL, match, phys_base);
 	if (node)
-		irq_domain_add_simple(node, irq_start);
-	else
-		pr_info("no node found\n");
+		irq_domain_add_legacy(node, 32, irq_start, 0,
+				      &irq_domain_simple_ops, NULL);
 }
 EXPORT_SYMBOL_GPL(irq_domain_generate_simple);
-#endif /* CONFIG_OF_IRQ */
-
-struct irq_domain_ops irq_domain_simple_ops = {
-#ifdef CONFIG_OF_IRQ
-	.dt_translate = irq_domain_simple_dt_translate,
-#endif /* CONFIG_OF_IRQ */
-};
-EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
+#endif
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 5034393..947bd85 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2779,7 +2779,7 @@
 
 	pcm_file = file->private_data;
 
-	if (((cmd >> 8) & 0xff) != 'A')
+	if ((((cmd >> 8) & 0xff) != 'A') && (((cmd >> 8) & 0xff) != 'C'))
 		return -ENOTTY;
 
 	return snd_pcm_capture_ioctl1(file, pcm_file->substream, cmd,
diff --git a/sound/soc/msm/mdm9615.c b/sound/soc/msm/mdm9615.c
index 90d8723..dbe5d00 100644
--- a/sound/soc/msm/mdm9615.c
+++ b/sound/soc/msm/mdm9615.c
@@ -188,9 +188,6 @@
 #define LPAIF_SPARE_MUX_CTL_PRI_MUX_SEL_BMSK	0x3
 #define LPAIF_SPARE_MUX_CTL_PRI_MUX_SEL_SHFT		0x0
 
-static u32 spare_shadow;
-static u32 sif_shadow;
-
 static atomic_t msm9615_auxpcm_ref;
 static atomic_t msm9615_sec_auxpcm_ref;
 
@@ -1066,30 +1063,26 @@
 {
 	struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
 	if (i2s_intf == MSM_INTF_PRIM) {
-		if (i2s_dir == MSM_DIR_RX)
-			gpio_free(GPIO_PRIM_I2S_DOUT);
-		if (i2s_dir == MSM_DIR_TX)
-			gpio_free(GPIO_PRIM_I2S_DIN);
 		if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
 			pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+			gpio_free(GPIO_PRIM_I2S_DIN);
+			gpio_free(GPIO_PRIM_I2S_DOUT);
 			gpio_free(GPIO_PRIM_I2S_SCK);
 			gpio_free(GPIO_PRIM_I2S_WS);
 		}
 	} else if (i2s_intf == MSM_INTF_SECN) {
-		if (i2s_dir == MSM_DIR_RX)
-			gpio_free(GPIO_SEC_I2S_DOUT);
-		if (i2s_dir == MSM_DIR_TX)
-			gpio_free(GPIO_SEC_I2S_DIN);
 		if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
 			pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+			gpio_free(GPIO_SEC_I2S_DOUT);
 			gpio_free(GPIO_SEC_I2S_WS);
+			gpio_free(GPIO_SEC_I2S_DIN);
 			gpio_free(GPIO_SEC_I2S_SCK);
 		}
 	}
 	return 0;
 }
 
-int msm9615_i2s_intf_dir_sel(const char *cpu_dai_name,
+static int msm9615_i2s_intf_dir_sel(const char *cpu_dai_name,
 			     u8 *i2s_intf, u8 *i2s_dir)
 {
 	int ret = 0;
@@ -1117,34 +1110,37 @@
 	return ret;
 }
 
-int msm9615_enable_i2s_gpio(u8 i2s_intf, u8 i2s_dir)
+static int msm9615_enable_i2s_gpio(u8 i2s_intf, u8 i2s_dir)
 {
 	u8 ret = 0;
 	struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
+
 	if (i2s_intf == MSM_INTF_PRIM) {
-		if (i2s_dir == MSM_DIR_TX) {
+		if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
+		    pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+
+			ret = gpio_request(GPIO_PRIM_I2S_DOUT,
+					   "I2S_PRIM_DOUT");
+			if (ret) {
+				pr_err("%s: Failed to request gpio %d\n",
+					__func__, GPIO_PRIM_I2S_DOUT);
+				goto err;
+			}
+
 			ret = gpio_request(GPIO_PRIM_I2S_DIN, "I2S_PRIM_DIN");
 			if (ret) {
 				pr_err("%s: Failed to request gpio %d\n",
-				       __func__, GPIO_PRIM_I2S_DIN);
+					       __func__, GPIO_PRIM_I2S_DIN);
 				goto err;
 			}
-		} else if (i2s_dir == MSM_DIR_RX) {
-			ret = gpio_request(GPIO_PRIM_I2S_DOUT,
-					       "I2S_PRIM_DOUT");
-			if (ret) {
-				pr_err("%s: Failed to request gpio %d\n",
-				       __func__, GPIO_PRIM_I2S_DOUT);
-				goto err;
-			}
-		} else if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
-			   pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+
 			ret = gpio_request(GPIO_PRIM_I2S_SCK, "I2S_PRIM_SCK");
 			if (ret) {
 				pr_err("%s: Failed to request gpio %d\n",
 				       __func__, GPIO_PRIM_I2S_SCK);
 				goto err;
 			}
+
 			ret = gpio_request(GPIO_PRIM_I2S_WS, "I2S_PRIM_WS");
 			if (ret) {
 				pr_err("%s: Failed to request gpio %d\n",
@@ -1153,28 +1149,30 @@
 			}
 		}
 	} else if (i2s_intf == MSM_INTF_SECN) {
-		if (i2s_dir == MSM_DIR_RX) {
-			ret = gpio_request(GPIO_SEC_I2S_DOUT, "I2S_SEC_DOUT");
-			if (ret) {
-				pr_err("%s: Failed to request gpio %d\n",
-				       __func__, GPIO_SEC_I2S_DOUT);
-				goto err;
-			}
-		} else if (i2s_dir == MSM_DIR_TX) {
+		if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
+		    pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+
 			ret = gpio_request(GPIO_SEC_I2S_DIN, "I2S_SEC_DIN");
 			if (ret) {
 				pr_err("%s: Failed to request gpio %d\n",
 				       __func__, GPIO_SEC_I2S_DIN);
 				goto err;
 			}
-		} else if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
-			   pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+
+			ret = gpio_request(GPIO_SEC_I2S_DOUT, "I2S_SEC_DOUT");
+			if (ret) {
+				pr_err("%s: Failed to request gpio %d\n",
+				       __func__, GPIO_SEC_I2S_DOUT);
+				goto err;
+			}
+
 			ret = gpio_request(GPIO_SEC_I2S_SCK, "I2S_SEC_SCK");
 			if (ret) {
 				pr_err("%s: Failed to request gpio %d\n",
 				       __func__, GPIO_SEC_I2S_SCK);
 				goto err;
 			}
+
 			ret = gpio_request(GPIO_SEC_I2S_WS, "I2S_SEC_WS");
 			if (ret) {
 				pr_err("%s: Failed to request gpio %d\n",
@@ -1283,20 +1281,33 @@
 	return ret;
 }
 
-void msm9615_config_i2s_sif_mux(u8 value)
+static void msm9615_config_i2s_sif_mux(u8 value)
 {
 	struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
-	sif_shadow  = 0x00000;
+	u32 sif_shadow  = 0x0000;
+	/* Make this variable global if both secondary and
+	 * primary needs to be supported. This is required
+	 * to retain bits in interace and set only specific
+	 * bits in the register. Also set Sec Intf bits.
+	 * Secondary interface bits are 0,1.
+	 **/
 	sif_shadow = (sif_shadow & LPASS_SIF_MUX_CTL_PRI_MUX_SEL_BMSK) |
 		     (value << LPASS_SIF_MUX_CTL_PRI_MUX_SEL_SHFT);
-	iowrite32(sif_shadow, pintf->sif_virt_addr);
+	if (pintf->sif_virt_addr != NULL)
+		iowrite32(sif_shadow, pintf->sif_virt_addr);
 	/* Dont read SIF register. Device crashes. */
 	pr_debug("%s() SIF Reg = 0x%x\n", __func__, sif_shadow);
 }
 
-void msm9615_config_i2s_spare_mux(u8 value, u8 i2s_intf)
+static void msm9615_config_i2s_spare_mux(u8 value, u8 i2s_intf)
 {
 	struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
+	u32 spare_shadow = 0x0000;
+	/* Make this variable global if both secondary and
+	 * primary needs to be supported. This is required
+	 * to retain bits in interace and set only specific
+	 * bits in the register. Also set Sec Intf bits.
+	 **/
 	if (i2s_intf == MSM_INTF_PRIM) {
 		/* Configure Primary SIF */
 	    spare_shadow = (spare_shadow & LPAIF_SPARE_MUX_CTL_PRI_MUX_SEL_BMSK
@@ -1307,7 +1318,8 @@
 	    spare_shadow = (spare_shadow & LPAIF_SPARE_MUX_CTL_SEC_MUX_SEL_BMSK
 			   ) | (value << LPAIF_SPARE_MUX_CTL_SEC_MUX_SEL_SHFT);
 	}
-	iowrite32(spare_shadow, pintf->spare_virt_addr);
+	if (pintf->spare_virt_addr != NULL)
+		iowrite32(spare_shadow, pintf->spare_virt_addr);
 	/* Dont read SPARE register. Device crashes. */
 	pr_debug("%s( ): SPARE Reg =0x%x\n", __func__, spare_shadow);
 }
@@ -2214,6 +2226,9 @@
 	atomic_set(&msm9615_sec_auxpcm_ref, 0);
 	msm9x15_i2s_ctl.sif_virt_addr = ioremap(LPASS_SIF_MUX_ADDR, 4);
 	msm9x15_i2s_ctl.spare_virt_addr = ioremap(LPAIF_SPARE_ADDR, 4);
+	if (msm9x15_i2s_ctl.spare_virt_addr == NULL ||
+	    msm9x15_i2s_ctl.sif_virt_addr == NULL)
+		pr_err("%s: SIF or Spare ptr are NULL", __func__);
 	sif_virt_addr = ioremap(LPASS_SIF_MUX_ADDR, 4);
 	secpcm_portslc_virt_addr = ioremap(SEC_PCM_PORT_SLC_ADDR, 4);
 
diff --git a/sound/soc/msm/mpq8064.c b/sound/soc/msm/mpq8064.c
index 4ecd8df..391c5f3 100644
--- a/sound/soc/msm/mpq8064.c
+++ b/sound/soc/msm/mpq8064.c
@@ -1257,8 +1257,8 @@
 		.cpu_dai_name	= "MultiMedia5",
 		.platform_name  = "msm-multi-ch-pcm-dsp",
 		.dynamic = 1,
-		.trigger = {SND_SOC_DPCM_TRIGGER_BESPOKE,
-					SND_SOC_DPCM_TRIGGER_BESPOKE},
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+					SND_SOC_DPCM_TRIGGER_POST},
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 		.ignore_suspend = 1,
@@ -1271,8 +1271,8 @@
 		.cpu_dai_name	= "MultiMedia6",
 		.platform_name  = "msm-multi-ch-pcm-dsp",
 		.dynamic = 1,
-		.trigger = {SND_SOC_DPCM_TRIGGER_BESPOKE,
-					SND_SOC_DPCM_TRIGGER_BESPOKE},
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+					SND_SOC_DPCM_TRIGGER_POST},
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 		.ignore_suspend = 1,
@@ -1285,8 +1285,8 @@
 		.cpu_dai_name   = "MultiMedia7",
 		.platform_name  = "msm-compr-dsp",
 		.dynamic = 1,
-		.trigger = {SND_SOC_DPCM_TRIGGER_BESPOKE,
-					SND_SOC_DPCM_TRIGGER_BESPOKE},
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+					SND_SOC_DPCM_TRIGGER_POST},
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 		.ignore_suspend = 1,
@@ -1299,8 +1299,8 @@
 		.cpu_dai_name   = "MultiMedia8",
 		.platform_name  = "msm-compr-dsp",
 		.dynamic = 1,
-		.trigger = {SND_SOC_DPCM_TRIGGER_BESPOKE,
-					SND_SOC_DPCM_TRIGGER_BESPOKE},
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+					SND_SOC_DPCM_TRIGGER_POST},
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 		.ignore_suspend = 1,
diff --git a/sound/soc/msm/msm-compr-q6.c b/sound/soc/msm/msm-compr-q6.c
index d4045e1..2455128 100644
--- a/sound/soc/msm/msm-compr-q6.c
+++ b/sound/soc/msm/msm-compr-q6.c
@@ -235,7 +235,7 @@
 		memset(&wma_cfg, 0x0, sizeof(struct asm_wma_cfg));
 		wma_cfg.format_tag = compr->info.codec_param.codec.format;
 		wma_cfg.ch_cfg = runtime->channels;
-		wma_cfg.sample_rate = runtime->rate;
+		wma_cfg.sample_rate = compr->info.codec_param.codec.sample_rate;
 		wma_cfg.avg_bytes_per_sec =
 			compr->info.codec_param.codec.bit_rate/8;
 		wma_cfg.block_align = compr->info.codec_param.codec.align;
@@ -255,7 +255,8 @@
 		memset(&wma_pro_cfg, 0x0, sizeof(struct asm_wmapro_cfg));
 		wma_pro_cfg.format_tag = compr->info.codec_param.codec.format;
 		wma_pro_cfg.ch_cfg = compr->info.codec_param.codec.ch_in;
-		wma_pro_cfg.sample_rate = runtime->rate;
+		wma_pro_cfg.sample_rate =
+			compr->info.codec_param.codec.sample_rate;
 		wma_pro_cfg.avg_bytes_per_sec =
 			compr->info.codec_param.codec.bit_rate/8;
 		wma_pro_cfg.block_align = compr->info.codec_param.codec.align;
@@ -266,6 +267,10 @@
 			compr->info.codec_param.codec.options.wma.channelmask;
 		wma_pro_cfg.encode_opt =
 			compr->info.codec_param.codec.options.wma.encodeopt;
+		wma_pro_cfg.adv_encode_opt =
+			compr->info.codec_param.codec.options.wma.encodeopt1;
+		wma_pro_cfg.adv_encode_opt2 =
+			compr->info.codec_param.codec.options.wma.encodeopt2;
 		ret = q6asm_media_format_block_wmapro(prtd->audio_client,
 				&wma_pro_cfg);
 		if (ret < 0)
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 56e83d5..8a49f1b 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -182,6 +182,7 @@
 	{
 		.playback = {
 			.stream_name = "MultiMedia5 Playback",
+			.aif_name = "MM_DL5",
 			.rates = (SNDRV_PCM_RATE_8000_48000 |
 					SNDRV_PCM_RATE_KNOT),
 			.formats = SNDRV_PCM_FMTBIT_S16_LE,
@@ -196,6 +197,7 @@
 	{
 		.playback = {
 			.stream_name = "MultiMedia6 Playback",
+			.aif_name = "MM_DL6",
 			.rates = (SNDRV_PCM_RATE_8000_48000 |
 					SNDRV_PCM_RATE_KNOT),
 			.formats = SNDRV_PCM_FMTBIT_S16_LE,
@@ -210,6 +212,7 @@
 	{
 		.playback = {
 			.stream_name = "MultiMedia7 Playback",
+			.aif_name = "MM_DL7",
 			.rates = (SNDRV_PCM_RATE_8000_48000 |
 					SNDRV_PCM_RATE_KNOT),
 			.formats = SNDRV_PCM_FMTBIT_S16_LE,
@@ -224,6 +227,7 @@
 	{
 		.playback = {
 			.stream_name = "MultiMedia8 Playback",
+			.aif_name = "MM_DL8",
 			.rates = (SNDRV_PCM_RATE_8000_48000 |
 					SNDRV_PCM_RATE_KNOT),
 			.formats = SNDRV_PCM_FMTBIT_S16_LE,
diff --git a/sound/soc/msm/msm-multi-ch-pcm-q6.c b/sound/soc/msm/msm-multi-ch-pcm-q6.c
index 734d34f..739e2ea 100644
--- a/sound/soc/msm/msm-multi-ch-pcm-q6.c
+++ b/sound/soc/msm/msm-multi-ch-pcm-q6.c
@@ -49,7 +49,8 @@
 #define PLAYBACK_MAX_PERIOD_SIZE	4032
 #define PLAYBACK_MIN_PERIOD_SIZE        256
 #define CAPTURE_NUM_PERIODS		16
-#define CAPTURE_PERIOD_SIZE		320
+#define CAPTURE_MIN_PERIOD_SIZE		320
+#define CAPTURE_MAX_PERIOD_SIZE		5376
 
 static struct snd_pcm_hardware msm_pcm_hardware_capture = {
 	.info =                 (SNDRV_PCM_INFO_MMAP |
@@ -63,9 +64,9 @@
 	.rate_max =             48000,
 	.channels_min =         1,
 	.channels_max =         2,
-	.buffer_bytes_max =     CAPTURE_NUM_PERIODS * CAPTURE_PERIOD_SIZE,
-	.period_bytes_min =	CAPTURE_PERIOD_SIZE,
-	.period_bytes_max =     CAPTURE_PERIOD_SIZE,
+	.buffer_bytes_max =     CAPTURE_NUM_PERIODS * CAPTURE_MAX_PERIOD_SIZE,
+	.period_bytes_min =	CAPTURE_MIN_PERIOD_SIZE,
+	.period_bytes_max =     CAPTURE_MAX_PERIOD_SIZE,
 	.periods_min =          CAPTURE_NUM_PERIODS,
 	.periods_max =          CAPTURE_NUM_PERIODS,
 	.fifo_size =            0,
@@ -390,6 +391,17 @@
 		}
 	}
 
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		ret = snd_pcm_hw_constraint_minmax(runtime,
+			SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+			CAPTURE_NUM_PERIODS * CAPTURE_MIN_PERIOD_SIZE,
+			CAPTURE_NUM_PERIODS * CAPTURE_MAX_PERIOD_SIZE);
+		if (ret < 0) {
+			pr_err("constraint for buffer bytes min max ret = %d\n",
+									ret);
+		}
+	}
+
 	prtd->dsp_cnt = 0;
 	runtime->private_data = prtd;
 	pr_debug("substream->pcm->device = %d\n", substream->pcm->device);
@@ -695,21 +707,14 @@
 	else
 		dir = OUT;
 
-	if (dir == OUT) {
-		ret = q6asm_audio_client_buf_alloc_contiguous(dir,
-			prtd->audio_client,
-			runtime->hw.period_bytes_min,
-			runtime->hw.periods_max);
-	} else {
-		/*
-		 *TODO : Need to Add Async IO changes. All period
-		 * size might not be supported.
-		 */
-		ret = q6asm_audio_client_buf_alloc_contiguous(dir,
-			prtd->audio_client,
-			(params_buffer_bytes(params) / params_periods(params)),
-			params_periods(params));
-	}
+	/*
+	 *TODO : Need to Add Async IO changes. All period
+	 * size might not be supported.
+	 */
+	ret = q6asm_audio_client_buf_alloc_contiguous(dir,
+		prtd->audio_client,
+		(params_buffer_bytes(params) / params_periods(params)),
+		params_periods(params));
 
 	if (ret < 0) {
 		pr_err("Audio Start: Buffer Allocation failed rc = %d\n", ret);
@@ -723,10 +728,7 @@
 	dma_buf->private_data = NULL;
 	dma_buf->area = buf[0].data;
 	dma_buf->addr =  buf[0].phys;
-	if (dir == OUT)
-		dma_buf->bytes = runtime->hw.buffer_bytes_max;
-	else
-		dma_buf->bytes = params_buffer_bytes(params);
+	dma_buf->bytes = params_buffer_bytes(params);
 	if (!dma_buf->area)
 		return -ENOMEM;
 
diff --git a/sound/soc/msm/msm-pcm-routing.c b/sound/soc/msm/msm-pcm-routing.c
index cc51a0f6..239c904 100644
--- a/sound/soc/msm/msm-pcm-routing.c
+++ b/sound/soc/msm/msm-pcm-routing.c
@@ -69,7 +69,7 @@
 static const DECLARE_TLV_DB_LINEAR(compressed_rx_vol_gain, 0,
 			INT_RX_VOL_MAX_STEPS);
 
-
+static int msm_route_ec_ref_rx;
 
 /* Equal to Frontend after last of the MULTIMEDIA SESSIONS */
 #define MAX_EQ_SESSIONS		MSM_FRONTEND_DAI_CS_VOICE
@@ -1046,6 +1046,45 @@
 	return 0;
 }
 
+static int msm_routing_ec_ref_rx_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	pr_debug("%s: ec_ref_rx  = %d", __func__, msm_route_ec_ref_rx);
+	ucontrol->value.integer.value[0] = msm_route_ec_ref_rx;
+	return 0;
+}
+
+static int msm_routing_ec_ref_rx_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	switch (ucontrol->value.integer.value[0]) {
+	case 0:
+		msm_route_ec_ref_rx = SLIMBUS_0_RX;
+		break;
+	case 1:
+		msm_route_ec_ref_rx = PRIMARY_I2S_RX;
+		break;
+	default:
+		msm_route_ec_ref_rx = 0;
+		break;
+	}
+	adm_ec_ref_rx_id(msm_route_ec_ref_rx);
+	pr_debug("%s: msm_route_ec_ref_rx = %d\n",
+			__func__, msm_route_ec_ref_rx);
+	return 0;
+}
+
+static const char * const ec_ref_rx[] = {"SLIM_RX", "I2S_RX", "PROXY_RX",
+								"NONE"};
+static const struct soc_enum msm_route_ec_ref_rx_enum[] = {
+				SOC_ENUM_SINGLE_EXT(4, ec_ref_rx),
+};
+
+static const struct snd_kcontrol_new ec_ref_rx_mixer_controls[] = {
+	SOC_ENUM_EXT("EC_REF_RX", msm_route_ec_ref_rx_enum[0],
+	msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put),
+};
+
 static const struct snd_kcontrol_new pri_i2s_rx_mixer_controls[] = {
 	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_I2S_RX ,
 	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
@@ -2599,6 +2638,9 @@
 				lpa_SRS_trumedia_controls_I2S,
 			ARRAY_SIZE(lpa_SRS_trumedia_controls_I2S));
 
+	snd_soc_add_platform_controls(platform,
+				ec_ref_rx_mixer_controls,
+			ARRAY_SIZE(ec_ref_rx_mixer_controls));
 	return 0;
 }
 
diff --git a/sound/soc/msm/qdsp6/q6adm.c b/sound/soc/msm/qdsp6/q6adm.c
index bc57ef3..17a952a 100644
--- a/sound/soc/msm/qdsp6/q6adm.c
+++ b/sound/soc/msm/qdsp6/q6adm.c
@@ -38,6 +38,7 @@
 	atomic_t copp_cnt[AFE_MAX_PORTS];
 	atomic_t copp_stat[AFE_MAX_PORTS];
 	wait_queue_head_t wait;
+	int  ec_ref_rx;
 };
 
 static struct acdb_cal_block mem_addr_audproc[MAX_AUDPROC_TYPES];
@@ -45,6 +46,7 @@
 
 static struct adm_ctl			this_adm;
 
+
 int srs_trumedia_open(int port_id, int srs_tech_id, void *srs_params)
 {
 	struct asm_pp_params_command *open = NULL;
@@ -642,8 +644,16 @@
 
 		open.mode = path;
 		open.endpoint_id1 = port_id;
-		open.endpoint_id2 = 0xFFFF;
 
+		if (this_adm.ec_ref_rx == 0) {
+			open.endpoint_id2 = 0xFFFF;
+		} else if (this_adm.ec_ref_rx && (path != 1)) {
+				open.endpoint_id2 = this_adm.ec_ref_rx;
+				this_adm.ec_ref_rx = 0;
+		}
+
+		pr_debug("%s open.endpoint_id1:%d open.endpoint_id2:%d",
+			__func__, open.endpoint_id1, open.endpoint_id2);
 		/* convert path to acdb path */
 		if (path == ADM_PATH_PLAYBACK)
 			open.topology_id = get_adm_rx_topology();
@@ -772,8 +782,16 @@
 
 		open.mode = path;
 		open.endpoint_id1 = port_id;
-		open.endpoint_id2 = 0xFFFF;
 
+		if (this_adm.ec_ref_rx == 0) {
+			open.endpoint_id2 = 0xFFFF;
+		} else if (this_adm.ec_ref_rx && (path != 1)) {
+				open.endpoint_id2 = this_adm.ec_ref_rx;
+				this_adm.ec_ref_rx = 0;
+		}
+
+		pr_debug("%s open.endpoint_id1:%d open.endpoint_id2:%d",
+			__func__, open.endpoint_id1, open.endpoint_id2);
 		/* convert path to acdb path */
 		if (path == ADM_PATH_PLAYBACK)
 			open.topology_id = get_adm_rx_topology();
@@ -1073,6 +1091,12 @@
 	return atomic_read(&this_adm.copp_id[port_index]);
 }
 
+void adm_ec_ref_rx_id(int  port_id)
+{
+	this_adm.ec_ref_rx = port_id;
+	pr_debug("%s ec_ref_rx:%d", __func__, this_adm.ec_ref_rx);
+}
+
 int adm_close(int port_id)
 {
 	struct apr_hdr close;
diff --git a/sound/soc/msm/qdsp6/q6asm.c b/sound/soc/msm/qdsp6/q6asm.c
index 09bfd94..50011a1 100644
--- a/sound/soc/msm/qdsp6/q6asm.c
+++ b/sound/soc/msm/qdsp6/q6asm.c
@@ -246,21 +246,20 @@
 						port->buf[cnt].handle);
 				ion_client_destroy(port->buf[cnt].client);
 #else
-				pr_debug("%s:data[%p]phys[%p][%p] cnt[%d]"
-					 "mem_buffer[%p]\n",
+				pr_debug("%s:data[%p]phys[%p][%p] cnt[%d] mem_buffer[%p]\n",
 					__func__, (void *)port->buf[cnt].data,
-					   (void *)port->buf[cnt].phys,
-					   (void *)&port->buf[cnt].phys, cnt,
-					   (void *)port->buf[cnt].mem_buffer);
+					(void *)port->buf[cnt].phys,
+					(void *)&port->buf[cnt].phys, cnt,
+					(void *)port->buf[cnt].mem_buffer);
 				if (IS_ERR((void *)port->buf[cnt].mem_buffer))
-					pr_err("%s:mem buffer invalid, error ="
-						 "%ld\n", __func__,
+					pr_err("%s:mem buffer invalid, error = %ld\n",
+					 __func__,
 				PTR_ERR((void *)port->buf[cnt].mem_buffer));
 				else {
 					if (iounmap(
 						port->buf[cnt].mem_buffer) < 0)
-						pr_err("%s: unmap buffer"
-							" failed\n", __func__);
+						pr_err("%s: unmap buffer failed\n",
+								 __func__);
 				}
 				free_contiguous_memory_by_paddr(
 					port->buf[cnt].phys);
@@ -306,8 +305,7 @@
 		ion_unmap_kernel(port->buf[0].client, port->buf[0].handle);
 		ion_free(port->buf[0].client, port->buf[0].handle);
 		ion_client_destroy(port->buf[0].client);
-		pr_debug("%s:data[%p]phys[%p][%p]"
-			", client[%p] handle[%p]\n",
+		pr_debug("%s:data[%p]phys[%p][%p], client[%p] handle[%p]\n",
 			__func__,
 			(void *)port->buf[0].data,
 			(void *)port->buf[0].phys,
@@ -315,22 +313,20 @@
 			(void *)port->buf[0].client,
 			(void *)port->buf[0].handle);
 #else
-		pr_debug("%s:data[%p]phys[%p][%p]"
-			"mem_buffer[%p]\n",
+		pr_debug("%s:data[%p]phys[%p][%p] mem_buffer[%p]\n",
 			__func__,
 			(void *)port->buf[0].data,
 			(void *)port->buf[0].phys,
 			(void *)&port->buf[0].phys,
 			(void *)port->buf[0].mem_buffer);
 		if (IS_ERR((void *)port->buf[0].mem_buffer))
-			pr_err("%s:mem buffer invalid, error ="
-				"%ld\n", __func__,
+			pr_err("%s:mem buffer invalid, error = %ld\n",
+				 __func__,
 				PTR_ERR((void *)port->buf[0].mem_buffer));
 		else {
 			if (iounmap(
 				port->buf[0].mem_buffer) < 0)
-				pr_err("%s: unmap buffer"
-					" failed\n", __func__);
+				pr_err("%s: unmap buffer failed\n", __func__);
 		}
 		free_contiguous_memory_by_paddr(port->buf[0].phys);
 #endif
@@ -433,8 +429,8 @@
 					(apr_fn)q6asm_mmapcallback,\
 					0x0FFFFFFFF, &this_mmap);
 		if (this_mmap.apr == NULL) {
-			pr_debug("%s Unable to register \
-				APR ASM common port \n", __func__);
+			pr_debug("%s Unable to register APR ASM common port\n",
+							 __func__);
 			goto fail;
 		}
 	}
@@ -523,8 +519,7 @@
 						(UINT_MAX, "audio_client");
 					if (IS_ERR_OR_NULL((void *)
 						buf[cnt].client)) {
-						pr_err("%s: ION create client"
-						" for AUDIO failed\n",
+						pr_err("%s: ION create client for AUDIO failed\n",
 						__func__);
 						mutex_unlock(&ac->cmd_lock);
 						goto fail;
@@ -534,8 +529,7 @@
 						(0x1 << ION_AUDIO_HEAP_ID));
 					if (IS_ERR_OR_NULL((void *)
 						buf[cnt].handle)) {
-						pr_err("%s: ION memory"
-					" allocation for AUDIO failed\n",
+						pr_err("%s: ION memory allocation for AUDIO failed\n",
 							__func__);
 						mutex_unlock(&ac->cmd_lock);
 						goto fail;
@@ -547,8 +541,7 @@
 						&buf[cnt].phys,
 						(size_t *)&len);
 					if (rc) {
-						pr_err("%s: ION Get Physical"
-						" for AUDIO failed, rc = %d\n",
+						pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
 							__func__, rc);
 						mutex_unlock(&ac->cmd_lock);
 						goto fail;
@@ -559,8 +552,8 @@
 							 0);
 					if (IS_ERR_OR_NULL((void *)
 						buf[cnt].data)) {
-						pr_err("%s: ION memory"
-				" mapping for AUDIO failed\n", __func__);
+						pr_err("%s: ION memory mapping for AUDIO failed\n",
+								 __func__);
 						mutex_unlock(&ac->cmd_lock);
 						goto fail;
 					}
@@ -571,8 +564,8 @@
 					allocate_contiguous_ebi_nomap(bufsz,
 						SZ_4K);
 					if (!buf[cnt].phys) {
-						pr_err("%s:Buf alloc failed "
-						" size=%d\n", __func__,
+						pr_err("%s:Buf alloc failed size=%d\n",
+						 __func__,
 						bufsz);
 						mutex_unlock(&ac->cmd_lock);
 						goto fail;
@@ -581,17 +574,17 @@
 					ioremap(buf[cnt].phys, bufsz);
 					if (IS_ERR(
 						(void *)buf[cnt].mem_buffer)) {
-						pr_err("%s:map_buffer failed,"
-							"error = %ld\n",
-				__func__, PTR_ERR((void *)buf[cnt].mem_buffer));
+						pr_err("%s:map_buffer failed, error = %ld\n",
+					__func__,
+					 PTR_ERR((void *)buf[cnt].mem_buffer));
 						mutex_unlock(&ac->cmd_lock);
 						goto fail;
 					}
 					buf[cnt].data =
 						buf[cnt].mem_buffer;
 					if (!buf[cnt].data) {
-						pr_err("%s:invalid vaddr,"
-						" iomap failed\n", __func__);
+						pr_err("%s:invalid vaddr, iomap failed\n",
+						__func__);
 						mutex_unlock(&ac->cmd_lock);
 						goto fail;
 					}
@@ -697,17 +690,15 @@
 	buf[0].phys = allocate_contiguous_ebi_nomap(bufsz * bufcnt,
 						SZ_4K);
 	if (!buf[0].phys) {
-		pr_err("%s:Buf alloc failed "
-			" size=%d, bufcnt=%d\n", __func__,
-			bufsz, bufcnt);
+		pr_err("%s:Buf alloc failed size=%d, bufcnt=%d\n",
+		 __func__, bufsz, bufcnt);
 		mutex_unlock(&ac->cmd_lock);
 		goto fail;
 	}
 
 	buf[0].mem_buffer = ioremap(buf[0].phys, bufsz * bufcnt);
 	if (IS_ERR((void *)buf[cnt].mem_buffer)) {
-		pr_err("%s:map_buffer failed,"
-			"error = %ld\n",
+		pr_err("%s:map_buffer failed, error = %ld\n",
 			__func__, PTR_ERR((void *)buf[0].mem_buffer));
 
 		mutex_unlock(&ac->cmd_lock);
@@ -716,8 +707,7 @@
 	buf[0].data = buf[0].mem_buffer;
 #endif
 	if (!buf[0].data) {
-		pr_err("%s:invalid vaddr,"
-			" iomap failed\n", __func__);
+		pr_err("%s:invalid vaddr, iomap failed\n", __func__);
 		mutex_unlock(&ac->cmd_lock);
 		goto fail;
 	}
@@ -776,9 +766,8 @@
 		return 0;
 	}
 
-	pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x]"
-		"token[0x%x]payload_s[%d] src[%d] dest[%d]\n", __func__,
-		payload[0], payload[1], data->opcode, data->token,
+	pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]\n",
+		 __func__, payload[0], payload[1], data->opcode, data->token,
 		data->payload_size, data->src_port, data->dest_port);
 
 	if (data->opcode == APR_BASIC_RSP_RESULT) {
@@ -836,8 +825,8 @@
 		return 0;
 	}
 
-	pr_debug("%s: session[%d]opcode[0x%x] \
-		token[0x%x]payload_s[%d] src[%d] dest[%d]\n", __func__,
+	pr_debug("%s: session[%d]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]\n",
+		 __func__,
 		ac->session, data->opcode,
 		data->token, data->payload_size, data->src_port,
 		data->dest_port);
@@ -915,9 +904,8 @@
 				   out_cold_index*/
 				if (out_cold_index != 1) {
 					do_gettimeofday(&out_cold_tv);
-					pr_debug("COLD: apr_send_pkt at %ld \
-					sec %ld microsec\n",\
-					out_cold_tv.tv_sec,\
+					pr_debug("COLD: apr_send_pkt at %ld sec %ld microsec\n",
+					out_cold_tv.tv_sec,
 					out_cold_tv.tv_usec);
 					out_cold_index = 1;
 				}
@@ -953,8 +941,7 @@
 			 */
 			if (in_cont_index == 7) {
 				do_gettimeofday(&in_cont_tv);
-				pr_err("In_CONT:previous read buffer done \
-				at %ld sec %ld microsec\n",\
+				pr_err("In_CONT:previous read buffer done at %ld sec %ld microsec\n",
 				in_cont_tv.tv_sec, in_cont_tv.tv_usec);
 			}
 		}
@@ -971,9 +958,8 @@
 				payload[READDONE_IDX_ID],
 				payload[READDONE_IDX_NUMFRAMES]);
 #ifdef CONFIG_DEBUG_FS
-		if (in_enable_flag) {
+		if (in_enable_flag)
 			in_cont_index++;
-		}
 #endif
 		if (ac->io_mode == SYNC_IO_MODE) {
 			if (port->buf == NULL) {
@@ -1009,9 +995,8 @@
 		pr_err("ASM_SESSION_EVENT_TX_OVERFLOW\n");
 		break;
 	case ASM_SESSION_CMDRSP_GET_SESSION_TIME:
-		pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSION_TIME, "
-				"payload[0] = %d, payload[1] = %d, "
-				"payload[2] = %d\n", __func__,
+		pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSION_TIME, payload[0] = %d, payload[1] = %d, payload[2] = %d\n",
+				 __func__,
 				 payload[0], payload[1], payload[2]);
 		ac->time_stamp = (uint64_t)(((uint64_t)payload[1] << 32) |
 				payload[2]);
@@ -1022,9 +1007,8 @@
 		break;
 	case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
 	case ASM_DATA_EVENT_ENC_SR_CM_NOTIFY:
-		pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, "
-				"payload[0] = %d, payload[1] = %d, "
-				"payload[2] = %d, payload[3] = %d\n", __func__,
+		pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, payload[0] = %d, payload[1] = %d, payload[2] = %d, payload[3] = %d\n",
+				 __func__,
 				payload[0], payload[1], payload[2],
 				payload[3]);
 		break;
@@ -1061,8 +1045,8 @@
 		if (port->buf[idx].used == dir) {
 			/* To make it more robust, we could loop and get the
 			next avail buf, its risky though */
-			pr_debug("%s:Next buf idx[0x%x] not available,\
-				dir[%d]\n", __func__, idx, dir);
+			pr_debug("%s:Next buf idx[0x%x] not available,dir[%d]\n",
+			 __func__, idx, dir);
 			mutex_unlock(&port->lock);
 			return NULL;
 		}
@@ -1111,8 +1095,8 @@
 		 * To make it more robust, we could loop and get the
 		 * next avail buf, its risky though
 		 */
-		pr_debug("%s:Next buf idx[0x%x] not available,\
-			dir[%d]\n", __func__, idx, dir);
+		pr_debug("%s:Next buf idx[0x%x] not available, dir[%d]\n",
+		 __func__, idx, dir);
 		return NULL;
 	}
 	*size = port->buf[idx].actual_size;
@@ -1594,8 +1578,8 @@
 	struct asm_stream_cmd_encdec_cfg_blk enc_cfg;
 	int rc = 0;
 
-	pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d]"
-		"format[%d]", __func__, ac->session, frames_per_buf,
+	pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d] format[%d]",
+		 __func__, ac->session, frames_per_buf,
 		sample_rate, channels, bit_rate, mode, format);
 
 	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
@@ -1670,6 +1654,47 @@
 	return -EINVAL;
 }
 
+int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac,
+			uint32_t rate, uint32_t channels)
+{
+	struct asm_stream_cmd_encdec_cfg_blk  enc_cfg;
+
+	int rc = 0;
+
+	pr_debug("%s: Session %d, rate = %d, channels = %d, setting the rate and channels to 0 for native\n",
+			 __func__, ac->session, rate, channels);
+
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID;
+	enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk);
+	enc_cfg.enc_blk.frames_per_buf = 1;
+	enc_cfg.enc_blk.format_id = LINEAR_PCM;
+	enc_cfg.enc_blk.cfg_size = sizeof(struct asm_pcm_cfg);
+	enc_cfg.enc_blk.cfg.pcm.ch_cfg = 0;/*channels;*/
+	enc_cfg.enc_blk.cfg.pcm.bits_per_sample = 16;
+	enc_cfg.enc_blk.cfg.pcm.sample_rate = 0;/*rate;*/
+	enc_cfg.enc_blk.cfg.pcm.is_signed = 1;
+	enc_cfg.enc_blk.cfg.pcm.interleaved = 1;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("Comamnd open failed\n");
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("timeout opcode[0x%x] ", enc_cfg.hdr.opcode);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
 int q6asm_enc_cfg_blk_multi_ch_pcm(struct audio_client *ac,
 			uint32_t rate, uint32_t channels)
 {
@@ -1861,8 +1886,8 @@
 	struct asm_stream_cmd_encdec_cfg_blk enc_cfg;
 	int rc = 0;
 
-	pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] \
-		reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]", __func__,
+	pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]",
+		 __func__,
 		ac->session, frames_per_buf, min_rate, max_rate,
 		reduced_rate_level, rate_modulation_cmd);
 
@@ -1904,8 +1929,8 @@
 	struct asm_stream_cmd_encdec_cfg_blk enc_cfg;
 	int rc = 0;
 
-	pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] \
-		rate_modulation_cmd[0x%4x]", __func__, ac->session,
+	pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] rate_modulation_cmd[0x%4x]",
+		 __func__, ac->session,
 		frames_per_buf,	min_rate, max_rate, rate_modulation_cmd);
 
 	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
@@ -2267,8 +2292,7 @@
 	struct asm_wma_cfg *wma_cfg = (struct asm_wma_cfg *)cfg;
 	int rc = 0;
 
-	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d],\
-		balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n",
+	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n",
 		ac->session, wma_cfg->format_tag, wma_cfg->sample_rate,
 		wma_cfg->ch_cfg, wma_cfg->avg_bytes_per_sec,
 		wma_cfg->block_align, wma_cfg->valid_bits_per_sample,
@@ -2319,9 +2343,7 @@
 	struct asm_wmapro_cfg *wmapro_cfg = (struct asm_wmapro_cfg *)cfg;
 	int rc = 0;
 
-	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d],"
-		"balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x],\
-		adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n",
+	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x], adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n",
 		ac->session, wmapro_cfg->format_tag, wmapro_cfg->sample_rate,
 		wmapro_cfg->ch_cfg,  wmapro_cfg->avg_bytes_per_sec,
 		wmapro_cfg->block_align, wmapro_cfg->valid_bits_per_sample,
@@ -2778,8 +2800,8 @@
 	params->period = pause_param->period;
 	params->step = pause_param->step;
 	params->rampingcurve = pause_param->rampingcurve;
-	pr_debug("%s: soft Pause Command: enable = %d, period = %d,"
-			 "step = %d, curve = %d\n", __func__, params->enable,
+	pr_debug("%s: soft Pause Command: enable = %d, period = %d, step = %d, curve = %d\n",
+			 __func__, params->enable,
 			 params->period, params->step, params->rampingcurve);
 	rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd);
 	if (rc < 0) {
@@ -2791,8 +2813,8 @@
 	rc = wait_event_timeout(ac->cmd_wait,
 			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
 	if (!rc) {
-		pr_err("%s: timeout in sending volume command(soft_pause)"
-		       "to apr\n", __func__);
+		pr_err("%s: timeout in sending volume command(soft_pause) to apr\n",
+						 __func__);
 		rc = -EINVAL;
 		goto fail_cmd;
 	}
@@ -2837,13 +2859,13 @@
 	params->period = softvol_param->period;
 	params->step = softvol_param->step;
 	params->rampingcurve = softvol_param->rampingcurve;
-	pr_debug("%s: soft Volume:opcode = %d,payload_sz =%d,module_id =%d,"
-			 "param_id = %d, param_sz = %d\n", __func__,
+	pr_debug("%s: soft Volume:opcode = %d,payload_sz =%d,module_id =%d, param_id = %d, param_sz = %d\n",
+			 __func__,
 			cmd->hdr.opcode, cmd->payload_size,
 			cmd->params.module_id, cmd->params.param_id,
 			cmd->params.param_size);
-	pr_debug("%s: soft Volume Command: period = %d,"
-			 "step = %d, curve = %d\n", __func__, params->period,
+	pr_debug("%s: soft Volume Command: period = %d, step = %d, curve = %d\n",
+			 __func__, params->period,
 			 params->step, params->rampingcurve);
 	rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd);
 	if (rc < 0) {
@@ -2855,8 +2877,8 @@
 	rc = wait_event_timeout(ac->cmd_wait,
 			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
 	if (!rc) {
-		pr_err("%s: timeout in sending volume command(soft_volume)"
-		       "to apr\n", __func__);
+		pr_err("%s: timeout in sending volume command(soft_volume) to apr\n",
+							 __func__);
 		rc = -EINVAL;
 		goto fail_cmd;
 	}
@@ -3197,8 +3219,8 @@
 			if ((strncmp(((char *)ab->data), zero_pattern, 2)) &&
 			(!strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
 				do_gettimeofday(&out_warm_tv);
-				pr_debug("WARM:apr_send_pkt at \
-				%ld sec %ld microsec\n", out_warm_tv.tv_sec,\
+				pr_debug("WARM:apr_send_pkt at %ld sec %ld microsec\n",
+				 out_warm_tv.tv_sec,\
 				out_warm_tv.tv_usec);
 				pr_debug("Warm Pattern Matched");
 			}
@@ -3207,8 +3229,8 @@
 			else if ((!strncmp(((char *)ab->data), zero_pattern, 2))
 			&& (strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
 				do_gettimeofday(&out_cont_tv);
-				pr_debug("CONT:apr_send_pkt at \
-				%ld sec %ld microsec\n", out_cont_tv.tv_sec,\
+				pr_debug("CONT:apr_send_pkt at %ld sec %ld microsec\n",
+				out_cont_tv.tv_sec,\
 				out_cont_tv.tv_usec);
 				pr_debug("Cont Pattern Matched");
 			}
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index f982134..0bb88e8 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -84,6 +84,8 @@
 				uint32_t bufsz, uint32_t bufcnt);
 static void q6asm_reset_buf_state(struct audio_client *ac);
 
+static int q6asm_map_channels(u8 *channel_mapping, uint32_t channels);
+
 
 #ifdef CONFIG_DEBUG_FS
 #define OUT_BUFFER_SIZE 56
@@ -196,8 +198,7 @@
 		out_cold_index*/
 		if (out_cold_index != 1) {
 			do_gettimeofday(&out_cold_tv);
-			pr_debug("COLD: apr_send_pkt at %ld"
-				"sec %ld microsec\n",\
+			pr_debug("COLD: apr_send_pkt at %ld sec %ld microsec\n",
 				out_cold_tv.tv_sec,\
 				out_cold_tv.tv_usec);
 			out_cold_index = 1;
@@ -222,8 +223,7 @@
 		*/
 		if (in_cont_index == 7) {
 			do_gettimeofday(&in_cont_tv);
-			pr_err("In_CONT:previous read buffer done"
-				"at %ld sec %ld microsec\n",\
+			pr_err("In_CONT:previous read buffer done at %ld sec %ld microsec\n",
 				in_cont_tv.tv_sec, in_cont_tv.tv_usec);
 		}
 		in_cont_index++;
@@ -253,8 +253,8 @@
 		if ((strncmp(((char *)ab->data), zero_pattern, 2)) &&
 		(!strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
 			do_gettimeofday(&out_warm_tv);
-			pr_debug("WARM:apr_send_pkt at"
-			"%ld sec %ld microsec\n", out_warm_tv.tv_sec,\
+			pr_debug("WARM:apr_send_pkt at %ld sec %ld microsec\n",
+			 out_warm_tv.tv_sec,\
 			out_warm_tv.tv_usec);
 			pr_debug("Warm Pattern Matched");
 		}
@@ -263,8 +263,8 @@
 		else if ((!strncmp(((char *)ab->data), zero_pattern, 2))
 		&& (strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
 			do_gettimeofday(&out_cont_tv);
-			pr_debug("CONT:apr_send_pkt at"
-			"%ld sec %ld microsec\n", out_cont_tv.tv_sec,\
+			pr_debug("CONT:apr_send_pkt at %ld sec %ld microsec\n",
+			out_cont_tv.tv_sec,\
 			out_cont_tv.tv_usec);
 			pr_debug("Cont Pattern Matched");
 		}
@@ -410,8 +410,7 @@
 		ion_unmap_kernel(port->buf[0].client, port->buf[0].handle);
 		ion_free(port->buf[0].client, port->buf[0].handle);
 		ion_client_destroy(port->buf[0].client);
-		pr_debug("%s:data[%p]phys[%p][%p]"
-			", client[%p] handle[%p]\n",
+		pr_debug("%s:data[%p]phys[%p][%p] , client[%p] handle[%p]\n",
 			__func__,
 			(void *)port->buf[0].data,
 			(void *)port->buf[0].phys,
@@ -479,13 +478,16 @@
 
 int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode)
 {
+	ac->io_mode &= 0xFF00;
+	pr_debug("%s ac->mode after anding with FF00:0x[%x],\n",
+		__func__, ac->io_mode);
 	if (ac == NULL) {
 		pr_err("%s APR handle NULL\n", __func__);
 		return -EINVAL;
 	}
 	if ((mode == ASYNC_IO_MODE) || (mode == SYNC_IO_MODE)) {
-		ac->io_mode = mode;
-		pr_debug("%s:Set Mode to %d\n", __func__, ac->io_mode);
+		ac->io_mode |= mode;
+		pr_debug("%s:Set Mode to 0x[%x]\n", __func__, ac->io_mode);
 		return 0;
 	} else {
 		pr_err("%s:Not an valid IO Mode:%d\n", __func__, ac->io_mode);
@@ -500,8 +502,8 @@
 					(apr_fn)q6asm_mmapcallback,\
 					0x0FFFFFFFF, &this_mmap);
 		if (this_mmap.apr == NULL) {
-			pr_debug("%s Unable to register"
-				"APR ASM common port\n", __func__);
+			pr_debug("%s Unable to register APR ASM common port\n",
+			 __func__);
 			goto fail;
 		}
 	}
@@ -624,8 +626,7 @@
 						(UINT_MAX, "audio_client");
 					if (IS_ERR_OR_NULL((void *)
 						buf[cnt].client)) {
-						pr_err("%s: ION create client"
-						" for AUDIO failed\n",
+						pr_err("%s: ION create client for AUDIO failed\n",
 						__func__);
 						goto fail;
 					}
@@ -634,8 +635,7 @@
 						(0x1 << ION_AUDIO_HEAP_ID));
 					if (IS_ERR_OR_NULL((void *)
 						buf[cnt].handle)) {
-						pr_err("%s: ION memory"
-					" allocation for AUDIO failed\n",
+						pr_err("%s: ION memory allocation for AUDIO failed\n",
 							__func__);
 						goto fail;
 					}
@@ -646,8 +646,7 @@
 						&buf[cnt].phys,
 						(size_t *)&len);
 					if (rc) {
-						pr_err("%s: ION Get Physical"
-						" for AUDIO failed, rc = %d\n",
+						pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
 							__func__, rc);
 						goto fail;
 					}
@@ -657,8 +656,8 @@
 							 0);
 					if (IS_ERR_OR_NULL((void *)
 						buf[cnt].data)) {
-						pr_err("%s: ION memory"
-				" mapping for AUDIO failed\n", __func__);
+						pr_err("%s: ION memory mapping for AUDIO failed\n",
+						 __func__);
 						goto fail;
 					}
 					memset((void *)buf[cnt].data, 0, bufsz);
@@ -752,8 +751,7 @@
 	}
 	memset((void *)buf[0].data, 0, (bufsz * bufcnt));
 	if (!buf[0].data) {
-		pr_err("%s:invalid vaddr,"
-			" iomap failed\n", __func__);
+		pr_err("%s:invalid vaddr, iomap failed\n", __func__);
 		mutex_unlock(&ac->cmd_lock);
 		goto fail;
 	}
@@ -822,8 +820,7 @@
 	}
 	sid = (data->token >> 8) & 0x0F;
 	ac = q6asm_get_audio_client(sid);
-	pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x]"
-		"token[0x%x]payload_s[%d] src[%d] dest[%d]sid[%d]dir[%d]\n",
+	pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]sid[%d]dir[%d]\n",
 		__func__, payload[0], payload[1], data->opcode, data->token,
 		data->payload_size, data->src_port, data->dest_port, sid, dir);
 	pr_debug("%s:Payload = [0x%x] status[0x%x]\n",
@@ -918,8 +915,8 @@
 		return 0;
 	}
 
-	pr_debug("%s: session[%d]opcode[0x%x]"
-		"token[0x%x]payload_s[%d] src[%d] dest[%d]\n", __func__,
+	pr_debug("%s: session[%d]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]\n",
+		 __func__,
 		ac->session, data->opcode,
 		data->token, data->payload_size, data->src_port,
 		data->dest_port);
@@ -1060,9 +1057,8 @@
 		pr_err("ASM_SESSION_EVENTX_OVERFLOW\n");
 		break;
 	case ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3:
-		pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3, "
-				"payload[0] = %d, payload[1] = %d, "
-				"payload[2] = %d\n", __func__,
+		pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3, payload[0] = %d, payload[1] = %d, payload[2] = %d\n",
+				 __func__,
 				 payload[0], payload[1], payload[2]);
 		ac->time_stamp = (uint64_t)(((uint64_t)payload[1] << 32) |
 				payload[2]);
@@ -1073,9 +1069,8 @@
 		break;
 	case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
 	case ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY:
-		pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, "
-				"payload[0] = %d, payload[1] = %d, "
-				"payload[2] = %d, payload[3] = %d\n", __func__,
+		pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, payload[0] = %d, payload[1] = %d, payload[2] = %d, payload[3] = %d\n",
+				 __func__,
 				payload[0], payload[1], payload[2],
 				payload[3]);
 		break;
@@ -1112,8 +1107,8 @@
 		if (port->buf[idx].used == dir) {
 			/* To make it more robust, we could loop and get the
 			next avail buf, its risky though */
-			pr_debug("%s:Next buf idx[0x%x] not available,"
-				"dir[%d]\n", __func__, idx, dir);
+			pr_debug("%s:Next buf idx[0x%x] not available, dir[%d]\n",
+			 __func__, idx, dir);
 			mutex_unlock(&port->lock);
 			return NULL;
 		}
@@ -1162,8 +1157,8 @@
 		 * To make it more robust, we could loop and get the
 		 * next avail buf, its risky though
 		 */
-		pr_debug("%s:Next buf idx[0x%x] not available,"
-			"dir[%d]\n", __func__, idx, dir);
+		pr_debug("%s:Next buf idx[0x%x] not available, dir[%d]\n",
+		 __func__, idx, dir);
 		return NULL;
 	}
 	*size = port->buf[idx].actual_size;
@@ -1427,6 +1422,7 @@
 	pr_debug("wr_format[0x%x]rd_format[0x%x]",
 				wr_format, rd_format);
 
+	ac->io_mode |= NT_MODE;
 	q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
 	open.hdr.opcode = ASM_STREAM_CMD_OPEN_READWRITE_V2;
 
@@ -1593,8 +1589,8 @@
 	struct asm_aac_enc_cfg_v2 enc_cfg;
 	int rc = 0;
 
-	pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d]"
-		"format[%d]", __func__, ac->session, frames_per_buf,
+	pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d] format[%d]",
+		 __func__, ac->session, frames_per_buf,
 		sample_rate, channels, bit_rate, mode, format);
 
 	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
@@ -1632,8 +1628,41 @@
 int q6asm_set_encdec_chan_map(struct audio_client *ac,
 			uint32_t num_channels)
 {
-	/* Todo: */
+	struct asm_dec_out_chan_map_param chan_map;
+	u8 *channel_mapping;
+	int rc = 0;
+	pr_debug("%s: Session %d, num_channels = %d\n",
+			 __func__, ac->session, num_channels);
+	q6asm_add_hdr(ac, &chan_map.hdr, sizeof(chan_map), TRUE);
+	chan_map.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	chan_map.encdec.param_id = ASM_PARAM_ID_DEC_OUTPUT_CHAN_MAP;
+	chan_map.encdec.param_size = sizeof(struct asm_dec_out_chan_map_param) -
+			 (sizeof(struct apr_hdr) +
+			 sizeof(struct asm_stream_cmd_set_encdec_param));
+	chan_map.num_channels = num_channels;
+	channel_mapping = chan_map.channel_mapping;
+	memset(channel_mapping, PCM_CHANNEL_NULL, MAX_CHAN_MAP_CHANNELS);
+	if (q6asm_map_channels(channel_mapping, num_channels))
+		return -EINVAL;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &chan_map);
+	if (rc < 0) {
+		pr_err("%s:Command opcode[0x%x]paramid[0x%x] failed\n",
+			   __func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM,
+			   ASM_PARAM_ID_DEC_OUTPUT_CHAN_MAP);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+				 (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s:timeout opcode[0x%x]\n", __func__,
+			   chan_map.hdr.opcode);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
 	return 0;
+fail_cmd:
+		return rc;
 }
 
 int q6asm_enc_cfg_blk_pcm(struct audio_client *ac,
@@ -1665,23 +1694,8 @@
 
 	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
 
-	if (channels == 1)  {
-		channel_mapping[0] = PCM_CHANNEL_FL;
-	} else if (channels == 2) {
-		channel_mapping[0] = PCM_CHANNEL_FL;
-		channel_mapping[1] = PCM_CHANNEL_FR;
-	} else if (channels == 6) {
-		channel_mapping[0] = PCM_CHANNEL_FC;
-		channel_mapping[1] = PCM_CHANNEL_FL;
-		channel_mapping[2] = PCM_CHANNEL_FR;
-		channel_mapping[3] = PCM_CHANNEL_LB;
-		channel_mapping[4] = PCM_CHANNEL_RB;
-		channel_mapping[5] = PCM_CHANNEL_LFE;
-	} else {
-		pr_err("%s: ERROR.unsupported num_ch = %u\n", __func__,
-				channels);
+	if (q6asm_map_channels(channel_mapping, channels))
 		return -EINVAL;
-	}
 
 	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
 	if (rc < 0) {
@@ -1700,6 +1714,96 @@
 	return -EINVAL;
 }
 
+int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac,
+			uint32_t rate, uint32_t channels)
+{
+	struct asm_multi_channel_pcm_enc_cfg_v2  enc_cfg;
+	u8 *channel_mapping;
+	u32 frames_per_buf = 0;
+
+	int rc = 0;
+
+	pr_debug("%s: Session %d, rate = %d, channels = %d\n", __func__,
+			 ac->session, rate, channels);
+
+	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+
+	enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+	enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) -
+				 sizeof(enc_cfg.encdec);
+	enc_cfg.encblk.frames_per_buf = frames_per_buf;
+	enc_cfg.encblk.enc_cfg_blk_size  = enc_cfg.encdec.param_size -
+				sizeof(struct asm_enc_cfg_blk_param_v2);
+
+	enc_cfg.num_channels = 0;/*channels;*/
+	enc_cfg.bits_per_sample = 16;
+	enc_cfg.sample_rate = 0;/*rate;*/
+	enc_cfg.is_signed = 1;
+	channel_mapping = enc_cfg.channel_mapping;  /* ??? PHANI */
+
+	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+	if (q6asm_map_channels(channel_mapping, channels))
+		return -EINVAL;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+	if (rc < 0) {
+		pr_err("Comamnd open failed\n");
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("timeout opcode[0x%x] ", enc_cfg.hdr.opcode);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
+static int q6asm_map_channels(u8 *channel_mapping, uint32_t channels)
+{
+	u8 *lchannel_mapping;
+	lchannel_mapping = channel_mapping;
+	pr_debug("%s channels passed: %d\n", __func__, channels);
+	if (channels == 1)  {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+	} else if (channels == 2) {
+		lchannel_mapping[0] = PCM_CHANNEL_FL;
+		lchannel_mapping[1] = PCM_CHANNEL_FR;
+	} else if (channels == 3) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+	} else if (channels == 4) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+		lchannel_mapping[3] = PCM_CHANNEL_LB;
+	} else if (channels == 5) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+		lchannel_mapping[3] = PCM_CHANNEL_LB;
+		lchannel_mapping[4] = PCM_CHANNEL_RB;
+	} else if (channels == 6) {
+		lchannel_mapping[0] = PCM_CHANNEL_FC;
+		lchannel_mapping[1] = PCM_CHANNEL_FL;
+		lchannel_mapping[2] = PCM_CHANNEL_FR;
+		lchannel_mapping[3] = PCM_CHANNEL_LB;
+		lchannel_mapping[4] = PCM_CHANNEL_RB;
+		lchannel_mapping[5] = PCM_CHANNEL_LFE;
+	} else {
+		pr_err("%s: ERROR.unsupported num_ch = %u\n",
+		 __func__, channels);
+		return -EINVAL;
+	}
+	return 0;
+}
+
 int q6asm_enable_sbrps(struct audio_client *ac,
 			uint32_t sbr_ps_enable)
 {
@@ -1791,8 +1895,8 @@
 	struct asm_v13k_enc_cfg enc_cfg;
 	int rc = 0;
 
-	pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x]"
-		"reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]", __func__,
+	pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]",
+		 __func__,
 		ac->session, frames_per_buf, min_rate, max_rate,
 		reduced_rate_level, rate_modulation_cmd);
 
@@ -1833,8 +1937,8 @@
 	struct asm_evrc_enc_cfg enc_cfg;
 	int rc = 0;
 
-	pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x]"
-		"rate_modulation_cmd[0x%4x]", __func__, ac->session,
+	pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] rate_modulation_cmd[0x%4x]",
+		 __func__, ac->session,
 		frames_per_buf,	min_rate, max_rate, rate_modulation_cmd);
 
 	q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
@@ -1972,23 +2076,8 @@
 
 	memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
 
-	if (channels == 1)  {
-		channel_mapping[0] = PCM_CHANNEL_FL;
-	} else if (channels == 2) {
-		channel_mapping[0] = PCM_CHANNEL_FL;
-		channel_mapping[1] = PCM_CHANNEL_FR;
-	} else if (channels == 6) {
-		channel_mapping[0] = PCM_CHANNEL_FC;
-		channel_mapping[1] = PCM_CHANNEL_FL;
-		channel_mapping[2] = PCM_CHANNEL_FR;
-		channel_mapping[3] = PCM_CHANNEL_LB;
-		channel_mapping[4] = PCM_CHANNEL_RB;
-		channel_mapping[5] = PCM_CHANNEL_LFE;
-	} else {
-		pr_err("%s: ERROR.unsupported num_ch = %u\n", __func__,
-				channels);
+	if (q6asm_map_channels(channel_mapping, channels))
 		return -EINVAL;
-	}
 
 	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
 	if (rc < 0) {
@@ -2056,8 +2145,7 @@
 	struct asm_wma_cfg *wma_cfg = (struct asm_wma_cfg *)cfg;
 	int rc = 0;
 
-	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d],"
-		"balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n",
+	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n",
 		ac->session, wma_cfg->format_tag, wma_cfg->sample_rate,
 		wma_cfg->ch_cfg, wma_cfg->avg_bytes_per_sec,
 		wma_cfg->block_align, wma_cfg->valid_bits_per_sample,
@@ -2065,8 +2153,9 @@
 
 	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
 
-	fmt.hdr.opcode = ASM_MEDIA_FMT_WMA_V9_V2;
-
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+					sizeof(fmt.fmtblk);
 	fmt.fmtag = wma_cfg->format_tag;
 	fmt.num_channels = wma_cfg->ch_cfg;
 	fmt.sample_rate = wma_cfg->sample_rate;
@@ -2100,9 +2189,7 @@
 	struct asm_wmapro_cfg *wmapro_cfg = (struct asm_wmapro_cfg *)cfg;
 	int rc = 0;
 
-	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d],"
-		"balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x],"
-		"adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n",
+	pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x], adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n",
 		ac->session, wmapro_cfg->format_tag, wmapro_cfg->sample_rate,
 		wmapro_cfg->ch_cfg,  wmapro_cfg->avg_bytes_per_sec,
 		wmapro_cfg->block_align, wmapro_cfg->valid_bits_per_sample,
@@ -2111,7 +2198,9 @@
 
 	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
 
-	fmt.hdr.opcode = ASM_MEDIA_FMT_WMA_V10PRO_V2;
+	fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+	fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+						sizeof(fmt.fmtblk);
 
 	fmt.fmtag = wmapro_cfg->format_tag;
 	fmt.num_channels = wmapro_cfg->ch_cfg;
@@ -2147,12 +2236,10 @@
 	struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
 	struct avs_shared_map_region_payload  *mregions = NULL;
 	struct audio_port_data *port = NULL;
-	struct audio_buffer *ab = NULL;
 	void	*mmap_region_cmd = NULL;
 	void	*payload = NULL;
 	struct asm_buffer_node *buffer_node = NULL;
 	int	rc = 0;
-	int	i = 0;
 	int	cmd_size = 0;
 
 	if (!ac || ac->apr == NULL || ac->mmap_apr == NULL) {
@@ -2181,21 +2268,18 @@
 	mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_EBI_POOL;
 	mmap_regions->num_regions = bufcnt & 0x00ff;
 	mmap_regions->property_flag = 0x00;
-	pr_debug("map_regions->nregions = %d\n", mmap_regions->num_regions);
 	payload = ((u8 *) mmap_region_cmd +
 		sizeof(struct avs_cmd_shared_mem_map_regions));
 	mregions = (struct avs_shared_map_region_payload *)payload;
 
 	ac->port[dir].tmp_hdl = 0;
 	port = &ac->port[dir];
-	for (i = 0; i < bufcnt; i++) {
-		ab = &port->buf[i];
-		mregions->shm_addr_lsw = ab->phys;
-		/* Using only 32 bit address */
-		mregions->shm_addr_msw = 0;
-		mregions->mem_size_bytes = ab->size;
-		++mregions;
-	}
+	pr_debug("%s, buf_add 0x%x, bufsz: %d\n", __func__, buf_add, bufsz);
+	mregions->shm_addr_lsw = buf_add;
+	/* Using only 32 bit address */
+	mregions->shm_addr_msw = 0;
+	mregions->mem_size_bytes = bufsz;
+	++mregions;
 
 	rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) mmap_region_cmd);
 	if (rc < 0) {
@@ -2295,7 +2379,7 @@
 	void	*payload = NULL;
 	struct asm_buffer_node *buffer_node = NULL;
 	int	rc = 0;
-	int	i = 0;
+	int    i = 0;
 	int	cmd_size = 0;
 
 	if (!ac || ac->apr == NULL || ac->mmap_apr == NULL) {
@@ -2351,7 +2435,6 @@
 	rc = wait_event_timeout(ac->cmd_wait,
 			(atomic_read(&ac->cmd_state) == 0)
 			 , 5*HZ);
-			 /*ac->port[dir].tmp_hdl), 5*HZ);*/
 	if (!rc) {
 		pr_err("timeout. waited for memory_map\n");
 		rc = -EINVAL;
@@ -2843,8 +2926,6 @@
 					read.buf_addr_lsw,
 					read.hdr.token,
 					read.seq_id);
-		pr_debug("q6asm_read_nolock mem-map handle is %x",
-				read.mem_map_handle);
 		rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
 		if (rc < 0) {
 			pr_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc);
@@ -2865,6 +2946,8 @@
 	struct list_head *ptr, *next;
 	struct audio_buffer        *ab;
 	struct audio_port_data     *port;
+	u32 lbuf_addr_lsw;
+	u32 liomode;
 
 	if (!ac || ac->apr == NULL) {
 		pr_err("%s: APR handle NULL\n", __func__);
@@ -2884,11 +2967,21 @@
 	write.buf_size = param->len;
 	write.timestamp_msw = param->msw_ts;
 	write.timestamp_lsw = param->lsw_ts;
-	pr_debug("%s: token[0x%x], buf_addr_lsw[0x%x], buf_size[0x%x],"
-		"ts_msw[0x%x], ts_lsw[0x%x]\n",
-		__func__, write.hdr.token, write.buf_addr_lsw,
+	liomode = (ASYNC_IO_MODE | NT_MODE);
+
+	if (ac->io_mode == liomode) {
+		pr_info("%s: subtracting 32 for header\n", __func__);
+		lbuf_addr_lsw = (write.buf_addr_lsw - 32);
+	} else{
+		lbuf_addr_lsw = write.buf_addr_lsw;
+	}
+
+	pr_debug("%s: token[0x%x], buf_addr_lsw[0x%x], buf_size[0x%x], ts_msw[0x%x], ts_lsw[0x%x], lbuf_addr_lsw: 0x[%x]\n",
+		__func__,
+		write.hdr.token, write.buf_addr_lsw,
 		write.buf_size, write.timestamp_msw,
-		write.timestamp_lsw);
+		write.timestamp_lsw, lbuf_addr_lsw);
+
 	/* Use 0xFF00 for disabling timestamps */
 	if (param->flags == 0xFF00)
 		write.flags = (0x00000000 | (param->flags & 0x800000FF));
@@ -2899,21 +2992,12 @@
 	list_for_each_safe(ptr, next, &ac->port[IN].mem_map_handle) {
 		buf_node = list_entry(ptr, struct asm_buffer_node,
 						list);
-		if (buf_node->buf_addr_lsw == (uint32_t)write.buf_addr_lsw) {
+		if (buf_node->buf_addr_lsw == lbuf_addr_lsw) {
 			write.mem_map_handle = buf_node->mmap_hdl;
-			pr_debug("%s:buf_node->mmap_hdl = 0x%x,"
-				"write.mem_map_handle = 0x%x\n",
-					__func__,
-					buf_node->mmap_hdl,
-					(uint32_t)write.mem_map_handle);
 			break;
 		}
 	}
 
-	pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x],"
-			"mem_map_handle[0x%x]\n", __func__, ac->session,
-		write.buf_addr_lsw, write.buf_size, write.mem_map_handle);
-
 	rc = apr_send_pkt(ac->apr, (uint32_t *) &write);
 	if (rc < 0) {
 		pr_debug("[%s] write op[0x%x]rc[%d]\n", __func__,
@@ -2932,6 +3016,8 @@
 	struct asm_data_cmd_read_v2 read;
 	struct asm_buffer_node *buf_node = NULL;
 	struct list_head *ptr, *next;
+	u32 lbuf_addr_lsw;
+	u32 liomode;
 
 	if (!ac || ac->apr == NULL) {
 		pr_err("%s: APR handle NULL\n", __func__);
@@ -2947,16 +3033,21 @@
 	read.buf_addr_msw = 0;
 	read.buf_size = param->len;
 	read.seq_id = param->uid;
-
-	list_for_each_safe(ptr, next, &ac->port[IN].mem_map_handle) {
-		buf_node = list_entry(ptr, struct asm_buffer_node,
-						list);
-			if (buf_node->buf_addr_lsw == param->paddr)
-				read.mem_map_handle = buf_node->mmap_hdl;
+	liomode = (NT_MODE | ASYNC_IO_MODE);
+	if (ac->io_mode == liomode) {
+		pr_info("%s: subtracting 32 for header\n", __func__);
+		lbuf_addr_lsw = (read.buf_addr_lsw - 32);
+	} else{
+		lbuf_addr_lsw = read.buf_addr_lsw;
 	}
 
-	pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x]", __func__, ac->session,
-		read.buf_addr_lsw, read.buf_size);
+	list_for_each_safe(ptr, next, &ac->port[IN].mem_map_handle) {
+		buf_node = list_entry(ptr, struct asm_buffer_node, list);
+			if (buf_node->buf_addr_lsw == lbuf_addr_lsw) {
+				read.mem_map_handle = buf_node->mmap_hdl;
+				break;
+		}
+	}
 
 	rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
 	if (rc < 0) {
@@ -3013,8 +3104,7 @@
 						list);
 		write.mem_map_handle = buf_node->mmap_hdl;
 
-		pr_debug("%s:ab->phys[0x%x]bufadd[0x%x]"
-			"token[0x%x]buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
+		pr_debug("%s:ab->phys[0x%x]bufadd[0x%x] token[0x%x]buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
 						, __func__,
 						ab->phys,
 						write.buf_addr_lsw,
@@ -3081,8 +3171,7 @@
 			write.flags = (0x80000000 | flags);
 		port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1);
 
-		pr_err("%s:ab->phys[0x%x]bufadd[0x%x]token[0x%x]"
-			"buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
+		pr_debug("%s:ab->phys[0x%x]bufadd[0x%x]token[0x%x] buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
 							, __func__,
 							ab->phys,
 							write.buf_addr_lsw,